diff --git a/.mailmap b/.mailmap index 45ac9d3125396..6cecc35f6e0db 100644 --- a/.mailmap +++ b/.mailmap @@ -1,7 +1,7 @@ Adrian-Constantin Popescu Alex Blewitt Alex Hoppen -Alex Hoppen > +Alex Hoppen Alexis Beingessner Alper Çugun Amr Aboelela @@ -79,6 +79,7 @@ Joe Shajrawi Johannes Weiß John Regner Karoy Lorentey +Kavon Farvardin Keith Smiley Kevin Ballard Kevin Saldaña diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt index 0db48967fad2a..08dded4dd11c4 100644 --- a/benchmark/CMakeLists.txt +++ b/benchmark/CMakeLists.txt @@ -80,6 +80,7 @@ set(SWIFT_BENCH_MODULES single-source/DictionaryRemove single-source/DictionarySubscriptDefault single-source/DictionarySwap + single-source/Differentiation single-source/Diffing single-source/DiffingMyers single-source/DropFirst diff --git a/benchmark/single-source/Differentiation.swift b/benchmark/single-source/Differentiation.swift new file mode 100644 index 0000000000000..f15f64348bec6 --- /dev/null +++ b/benchmark/single-source/Differentiation.swift @@ -0,0 +1,73 @@ +//===--- Differentiation.swift -------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#if canImport(_Differentiation) + +import TestsUtils +import _Differentiation + +public let Differentiation = [ + BenchmarkInfo( + name: "DifferentiationIdentity", + runFunction: run_DifferentiationIdentity, + tags: [.regression, .differentiation] + ), + BenchmarkInfo( + name: "DifferentiationSquare", + runFunction: run_DifferentiationSquare, + tags: [.regression, .differentiation] + ), + BenchmarkInfo( + name: "DifferentiationArraySum", + runFunction: run_DifferentiationArraySum, + tags: [.regression, .differentiation], + setUpFunction: { blackHole(onesArray) } + ), +] + +@inline(never) +public func run_DifferentiationIdentity(N: Int) { + func f(_ x: Float) -> Float { + x + } + for _ in 0..<1000*N { + blackHole(valueWithGradient(at: 1, in: f)) + } +} + +@inline(never) +public func run_DifferentiationSquare(N: Int) { + func f(_ x: Float) -> Float { + x * x + } + for _ in 0..<1000*N { + blackHole(valueWithGradient(at: 1, in: f)) + } +} + +let onesArray: [Float] = Array(repeating: 1, count: 50) + +@inline(never) +public func run_DifferentiationArraySum(N: Int) { + func sum(_ array: [Float]) -> Float { + var result: Float = 0 + for i in withoutDerivative(at: 0.. type ::= 'Bi' NATURAL '_' // Builtin.Int type ::= 'BI' // Builtin.IntLiteral + type ::= 'Bj' // Builtin.Job type ::= 'BO' // Builtin.UnknownObject (no longer a distinct type, but still used for AnyObject) type ::= 'Bo' // Builtin.NativeObject type ::= 'Bp' // Builtin.RawPointer @@ -1006,6 +1008,8 @@ Function Specializations :: specialization ::= type '_' type* 'Tg' SPEC-INFO // Generic re-abstracted specialization + specialization ::= type '_' type* 'TB' SPEC-INFO // Alternative mangling for generic re-abstracted specializations, + // used for functions with re-abstracted resilient parameter types. specialization ::= type '_' type* 'Ts' SPEC-INFO // Generic re-abstracted prespecialization specialization ::= type '_' type* 'TG' SPEC-INFO // Generic not re-abstracted specialization specialization ::= type '_' type* 'Ti' SPEC-INFO // Inlined function with generic substitutions. diff --git a/docs/Android.md b/docs/Android.md index 77d6b46479d2d..69a8741c85d7e 100644 --- a/docs/Android.md +++ b/docs/Android.md @@ -103,7 +103,7 @@ $ NDK_PATH="path/to/android-ndk21" $ build/Ninja-ReleaseAssert/swift-linux-x86_64/bin/swiftc \ # The Swift compiler built in the previous step. # The location of the tools used to build Android binaries -tools-directory ${NDK_PATH}/toolchains/llvm/prebuilt/linux-x86_64/bin/ \ - -target armv7a-none-linux-androideabi \ # Targeting android-armv7, and supply the path to libgcc. + -target armv7a-unknown-linux-androideabi \ # Targeting android-armv7, and supply the path to libgcc. -L ${NDK_PATH}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x/armv7-a \ -sdk ${NDK_PATH}/platforms/android-21/arch-arm \ # Use the same architecture and API version as you used to build the stdlib in the previous step. hello.swift diff --git a/docs/ContinuousIntegration.md b/docs/ContinuousIntegration.md index 7375f093445c9..7e55056cabe7b 100644 --- a/docs/ContinuousIntegration.md +++ b/docs/ContinuousIntegration.md @@ -215,7 +215,7 @@ preset=buildbot,tools=RA,stdlib=RD,test=non_executable ### Build and Test the Minimal Freestanding Stdlib using Toolchain Specific Preset Testing -To test the minimal freestanding stdlib on macho, you can use the support for running a miscellanous preset against a snapshot toolchain. +To test the minimal freestanding stdlib on macho, you can use the support for running a miscellaneous preset against a snapshot toolchain. ``` preset=stdlib_S_standalone_minimal_macho_x86_64,build,test diff --git a/docs/DifferentiableProgramming.md b/docs/DifferentiableProgramming.md index 57d2fb0934c82..0216fce54900b 100644 --- a/docs/DifferentiableProgramming.md +++ b/docs/DifferentiableProgramming.md @@ -1273,6 +1273,12 @@ The synthesized `TangentVector` has the same effective access level as the original type declaration. Properties in the synthesized `TangentVector` have the same effective access level as their corresponding original properties. +The synthesized `TangentVector` adopts protocols from all `TangentVector` +conformance constraints implied by the declaration that triggers synthesis. For +example, synthesized `TangentVector`s always adopt the `AdditiveArithmetic` and +`Differentiable` protocols because the `Differentiable` protocol requires that +`TangentVector` conforms to `AdditiveArithmetic` and `Differentiable`. + The synthesized `move(along:)` method calls `move(along:)` for each pair of a differentiable variable and its corresponding property in `TangentVector`. @@ -1879,7 +1885,7 @@ Since complex numbers are not yet defined in the standard library, we extended the complex number type defined in the [NumericAnnex](https://github.com/xwu/NumericAnnex) library to be differentiable. -[The full implementation is here](https://github.com/tensorflow/swift-apis/blob/master/Sources/third_party/Experimental/Complex.swift). +[The full implementation is here](https://github.com/tensorflow/swift-apis/blob/main/Sources/third_party/Experimental/Complex.swift). The implementation adopts the [Autograd convention](https://github.com/HIPS/autograd/blob/master/docs/tutorial.md#complex-numbers) for derivatives of functions with complex arguments or results, so that we can diff --git a/include/swift/ABI/Actor.h b/include/swift/ABI/Actor.h new file mode 100644 index 0000000000000..442383343ee87 --- /dev/null +++ b/include/swift/ABI/Actor.h @@ -0,0 +1,44 @@ +//===--- Actor.h - ABI structures for actors --------------------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Swift ABI describing actors. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_ABI_ACTOR_H +#define SWIFT_ABI_ACTOR_H + +#include "swift/ABI/HeapObject.h" +#include "swift/ABI/MetadataValues.h" + +namespace swift { + +/// The default actor implementation. This is the layout of both +/// the DefaultActor and NSDefaultActor classes. +class alignas(Alignment_DefaultActor) DefaultActor : public HeapObject { +public: + // These constructors do not initialize the actor instance, and the + // destructor does not destroy the actor instance; you must call + // swift_defaultActor_{initialize,destroy} yourself. + constexpr DefaultActor(const HeapMetadata *metadata) + : HeapObject(metadata), PrivateData{} {} + + constexpr DefaultActor(const HeapMetadata *metadata, + InlineRefCounts::Immortal_t immortal) + : HeapObject(metadata, immortal), PrivateData{} {} + + void *PrivateData[NumWords_DefaultActor]; +}; + +} // end namespace swift + +#endif \ No newline at end of file diff --git a/include/swift/ABI/Class.h b/include/swift/ABI/Class.h index 421dd7fae21dd..08e64d5f3b92e 100644 --- a/include/swift/ABI/Class.h +++ b/include/swift/ABI/Class.h @@ -53,7 +53,12 @@ enum class ObjCClassFlags : uint32_t { /// This class provides a non-trivial .cxx_destruct method, but /// its .cxx_construct is trivial. For backwards compatibility, /// when setting this flag, HasCXXStructors must be set as well. - HasCXXDestructorOnly = 0x00100 + HasCXXDestructorOnly = 0x00100, + + /// This class does not allow associated objects on instances. + /// + /// Will cause the objc runtime to trap in objc_setAssociatedObject. + ForbidsAssociatedObjects = 0x00400, }; inline ObjCClassFlags &operator|=(ObjCClassFlags &lhs, ObjCClassFlags rhs) { lhs = ObjCClassFlags(uint32_t(lhs) | uint32_t(rhs)); diff --git a/include/swift/ABI/Executor.h b/include/swift/ABI/Executor.h new file mode 100644 index 0000000000000..04835a048ae6c --- /dev/null +++ b/include/swift/ABI/Executor.h @@ -0,0 +1,119 @@ +//===--- Executor.h - ABI structures for executors --------------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Swift ABI describing executors. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_ABI_EXECUTOR_H +#define SWIFT_ABI_EXECUTOR_H + +#include + +namespace swift { +class AsyncContext; +class AsyncTask; +class DefaultActor; +class Job; + +/// An ExecutorRef isn't necessarily just a pointer to an executor +/// object; it may have other bits set. +class ExecutorRef { + static constexpr uintptr_t IsDefaultActor = 1; + static constexpr uintptr_t PointerMask = 7; + + uintptr_t Value; + + constexpr ExecutorRef(uintptr_t value) : Value(value) {} + +public: + /// A generic execution environment. When running in a generic + /// environment, it's presumed to be okay to switch synchronously + /// to an actor. As an executor request, this represents a request + /// to drop whatever the current actor is. + constexpr static ExecutorRef generic() { + return ExecutorRef(0); + } + + /// Given a pointer to a default actor, return an executor reference + /// for it. + static ExecutorRef forDefaultActor(DefaultActor *actor) { + assert(actor); + return ExecutorRef(reinterpret_cast(actor) | IsDefaultActor); + } + + /// Is this the generic executor reference? + bool isGeneric() const { + return Value == 0; + } + + /// Is this a default-actor executor reference? + bool isDefaultActor() const { + return Value & IsDefaultActor; + } + DefaultActor *getDefaultActor() const { + assert(isDefaultActor()); + return reinterpret_cast(Value & ~PointerMask); + } + + /// Do we have to do any work to start running as the requested + /// executor? + bool mustSwitchToRun(ExecutorRef newExecutor) const { + return *this != newExecutor; + } + + bool operator==(ExecutorRef other) const { + return Value == other.Value; + } + bool operator!=(ExecutorRef other) const { + return Value != other.Value; + } +}; + +using JobInvokeFunction = + SWIFT_CC(swiftasync) + void (Job *, ExecutorRef); + +using TaskContinuationFunction = + SWIFT_CC(swiftasync) + void (AsyncTask *, ExecutorRef, AsyncContext *); + +template +struct AsyncFunctionTypeImpl; +template +struct AsyncFunctionTypeImpl { + // TODO: expand and include the arguments in the parameters. + using type = TaskContinuationFunction; +}; + +template +using AsyncFunctionType = typename AsyncFunctionTypeImpl::type; + +/// A "function pointer" for an async function. +/// +/// Eventually, this will always be signed with the data key +/// using a type-specific discriminator. +template +class AsyncFunctionPointer { +public: + /// The function to run. + RelativeDirectPointer, + /*nullable*/ false, + int32_t> Function; + + /// The expected size of the context. + uint32_t ExpectedContextSize; +}; + +} + +#endif diff --git a/include/swift/ABI/MetadataValues.h b/include/swift/ABI/MetadataValues.h index 76c5fd75a3845..313049e5f34b3 100644 --- a/include/swift/ABI/MetadataValues.h +++ b/include/swift/ABI/MetadataValues.h @@ -42,6 +42,10 @@ enum { /// The number of words in a yield-many coroutine buffer. NumWords_YieldManyBuffer = 8, + + /// The number of words (in addition to the heap-object header) + /// in a default actor. + NumWords_DefaultActor = 10, }; struct InProcess; @@ -114,6 +118,9 @@ enum class NominalTypeKind : uint32_t { /// The maximum supported type alignment. const size_t MaximumAlignment = 16; +/// The alignment of a DefaultActor. +const size_t Alignment_DefaultActor = MaximumAlignment; + /// Flags stored in the value-witness table. template class TargetValueWitnessFlags { @@ -1885,7 +1892,11 @@ enum class JobKind : size_t { Task = 0, /// Job kinds >= 192 are private to the implementation. - First_Reserved = 192 + First_Reserved = 192, + + DefaultActorInline = First_Reserved, + DefaultActorSeparate, + DefaultActorOverride }; /// The priority of a job. Higher priorities are larger values. @@ -1920,6 +1931,10 @@ class JobFlags : public FlagSet { explicit JobFlags(size_t bits) : FlagSet(bits) {} JobFlags(JobKind kind) { setKind(kind); } + JobFlags(JobKind kind, JobPriority priority) { + setKind(kind); + setPriority(priority); + } constexpr JobFlags() {} FLAGSET_DEFINE_FIELD_ACCESSORS(Kind, Kind_width, JobKind, diff --git a/include/swift/ABI/Task.h b/include/swift/ABI/Task.h index 4cfee38b92ca6..db1c2debee6ed 100644 --- a/include/swift/ABI/Task.h +++ b/include/swift/ABI/Task.h @@ -18,6 +18,7 @@ #define SWIFT_ABI_TASK_H #include "swift/Basic/RelativePointer.h" +#include "swift/ABI/Executor.h" #include "swift/ABI/HeapObject.h" #include "swift/ABI/Metadata.h" #include "swift/ABI/MetadataValues.h" @@ -25,68 +26,13 @@ #include "swift/Basic/STLExtras.h" namespace swift { - class AsyncTask; class AsyncContext; -class Executor; class Job; struct OpaqueValue; struct SwiftError; class TaskStatusRecord; -/// An ExecutorRef isn't necessarily just a pointer to an executor -/// object; it may have other bits set. -struct ExecutorRef { - Executor *Pointer; - - /// Get an executor ref that represents a lack of preference about - /// where execution resumes. This is only valid in continuations, - /// return contexts, and so on; it is not generally passed to - /// executing functions. - static ExecutorRef noPreference() { - return { nullptr }; - } - - bool operator==(ExecutorRef other) const { - return Pointer == other.Pointer; - } -}; - -using JobInvokeFunction = - SWIFT_CC(swift) - void (Job *, ExecutorRef); - -using TaskContinuationFunction = - SWIFT_CC(swift) - void (AsyncTask *, ExecutorRef, AsyncContext *); - -template -struct AsyncFunctionTypeImpl; -template -struct AsyncFunctionTypeImpl { - // TODO: expand and include the arguments in the parameters. - using type = TaskContinuationFunction; -}; - -template -using AsyncFunctionType = typename AsyncFunctionTypeImpl::type; - -/// A "function pointer" for an async function. -/// -/// Eventually, this will always be signed with the data key -/// using a type-specific discriminator. -template -class AsyncFunctionPointer { -public: - /// The function to run. - RelativeDirectPointer, - /*nullable*/ false, - int32_t> Function; - - /// The expected size of the context. - uint32_t ExpectedContextSize; -}; - /// A schedulable job. class alignas(2 * alignof(void*)) Job { protected: @@ -436,6 +382,16 @@ class alignas(MaximumAlignment) AsyncContext { AsyncContext(const AsyncContext &) = delete; AsyncContext &operator=(const AsyncContext &) = delete; + + /// Perform a return from this context. + /// + /// Generally this should be tail-called. + SWIFT_CC(swiftasync) + void resumeParent(AsyncTask *task, ExecutorRef executor) { + // TODO: destroy context before returning? + // FIXME: force tail call + return ResumeParent(task, executor, Parent); + } }; /// An async context that supports yielding. diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index 5e536c5aaaea1..46125e348f464 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -580,7 +580,7 @@ class ASTContext final { // Retrieve the declaration of Swift._stdlib_isOSVersionAtLeast. FuncDecl *getIsOSVersionAtLeastDecl() const; - + /// Look for the declaration with the given name within the /// Swift module. void lookupInSwiftModule(StringRef name, @@ -713,6 +713,9 @@ class ASTContext final { /// Get the runtime availability of support for concurrency. AvailabilityContext getConcurrencyAvailability(); + /// Get the runtime availability of support for differentiation. + AvailabilityContext getDifferentiationAvailability(); + /// Get the runtime availability of features introduced in the Swift 5.2 /// compiler for the target platform. AvailabilityContext getSwift52Availability(); @@ -745,13 +748,10 @@ class ASTContext final { const CanType TheUnresolvedType; /// This is the UnresolvedType singleton. const CanType TheEmptyTupleType; /// This is '()', aka Void const CanType TheAnyType; /// This is 'Any', the empty protocol composition - const CanType TheNativeObjectType; /// Builtin.NativeObject - const CanType TheBridgeObjectType; /// Builtin.BridgeObject - const CanType TheRawPointerType; /// Builtin.RawPointer - const CanType TheUnsafeValueBufferType; /// Builtin.UnsafeValueBuffer - const CanType TheSILTokenType; /// Builtin.SILToken - const CanType TheIntegerLiteralType; /// Builtin.IntegerLiteralType - +#define SINGLETON_TYPE(SHORT_ID, ID) \ + const CanType The##SHORT_ID##Type; +#include "swift/AST/TypeNodes.def" + const CanType TheIEEE32Type; /// 32-bit IEEE floating point const CanType TheIEEE64Type; /// 64-bit IEEE floating point diff --git a/include/swift/AST/ASTMangler.h b/include/swift/AST/ASTMangler.h index 6b69083c97de4..9b79375872d65 100644 --- a/include/swift/AST/ASTMangler.h +++ b/include/swift/AST/ASTMangler.h @@ -77,6 +77,7 @@ class ASTMangler : public Mangler { public: enum class SymbolKind { Default, + AsyncHandlerBody, DynamicThunk, SwiftAsObjCThunk, ObjCAsSwiftThunk, @@ -323,8 +324,15 @@ class ASTMangler : public Mangler { void appendAnyGenericType(const GenericTypeDecl *decl); - void appendFunction(AnyFunctionType *fn, bool isFunctionMangling = false, - const ValueDecl *forDecl = nullptr); + enum FunctionManglingKind { + NoFunctionMangling, + FunctionMangling, + AsyncHandlerBodyMangling + }; + + void appendFunction(AnyFunctionType *fn, + FunctionManglingKind functionMangling = NoFunctionMangling, + const ValueDecl *forDecl = nullptr); void appendFunctionType(AnyFunctionType *fn, bool isAutoClosure = false, const ValueDecl *forDecl = nullptr); void appendClangType(AnyFunctionType *fn); @@ -332,7 +340,8 @@ class ASTMangler : public Mangler { void appendClangType(FnType *fn, llvm::raw_svector_ostream &os); void appendFunctionSignature(AnyFunctionType *fn, - const ValueDecl *forDecl = nullptr); + const ValueDecl *forDecl, + FunctionManglingKind functionMangling); void appendFunctionInputType(ArrayRef params, const ValueDecl *forDecl = nullptr); @@ -383,7 +392,10 @@ class ASTMangler : public Mangler { GenericSignature &genericSig, GenericSignature &parentGenericSig); - void appendDeclType(const ValueDecl *decl, bool isFunctionMangling = false); + + + void appendDeclType(const ValueDecl *decl, + FunctionManglingKind functionMangling = NoFunctionMangling); bool tryAppendStandardSubstitution(const GenericTypeDecl *type); @@ -400,7 +412,7 @@ class ASTMangler : public Mangler { void appendEntity(const ValueDecl *decl, StringRef EntityOp, bool isStatic); - void appendEntity(const ValueDecl *decl); + void appendEntity(const ValueDecl *decl, bool isAsyncHandlerBody = false); void appendProtocolConformance(const ProtocolConformance *conformance); void appendProtocolConformanceRef(const RootProtocolConformance *conformance); diff --git a/include/swift/AST/Builtins.def b/include/swift/AST/Builtins.def index 457b4c1792eed..93a58f75ae639 100644 --- a/include/swift/AST/Builtins.def +++ b/include/swift/AST/Builtins.def @@ -504,6 +504,20 @@ BUILTIN_SIL_OPERATION(DifferentiableFunction, "differentiableFunction", Special) /// linearFunction BUILTIN_SIL_OPERATION(LinearFunction, "linearFunction", Special) +/// withUnsafeContinuation : (Builtin.RawUnsafeContinuation -> ()) async -> T +/// +/// Unsafely capture the current continuation and pass it to the given +/// function value. Returns a value of type T when the continuation is +/// resumed. +BUILTIN_SIL_OPERATION(WithUnsafeContinuation, "withUnsafeContinuation", Special) + +/// withUnsafeThrowingContinuation : (Builtin.RawUnsafeContinuation -> ()) async throws -> T +/// +/// Unsafely capture the current continuation and pass it to the given +/// function value. Returns a value of type T or throws an error when +/// the continuation is resumed. +BUILTIN_SIL_OPERATION(WithUnsafeThrowingContinuation, "withUnsafeThrowingContinuation", Special) + #undef BUILTIN_SIL_OPERATION // BUILTIN_RUNTIME_CALL - A call into a runtime function. @@ -752,6 +766,15 @@ BUILTIN_MISC_OPERATION_WITH_SILGEN(CreateAsyncTaskFuture, /// is a pure value and therefore we can consider it as readnone). BUILTIN_MISC_OPERATION_WITH_SILGEN(GlobalStringTablePointer, "globalStringTablePointer", "n", Special) +// autoDiffCreateLinearMapContext: (Builtin.Word) -> Builtin.NativeObject +BUILTIN_MISC_OPERATION_WITH_SILGEN(AutoDiffCreateLinearMapContext, "autoDiffCreateLinearMapContext", "n", Special) + +// autoDiffProjectTopLevelSubcontext: (Builtin.NativeObject) -> Builtin.RawPointer +BUILTIN_MISC_OPERATION_WITH_SILGEN(AutoDiffProjectTopLevelSubcontext, "autoDiffProjectTopLevelSubcontext", "n", Special) + +// autoDiffAllocateSubcontext: (Builtin.NativeObject, Builtin.Word) -> Builtin.RawPointer +BUILTIN_MISC_OPERATION_WITH_SILGEN(AutoDiffAllocateSubcontext, "autoDiffAllocateSubcontext", "", Special) + #undef BUILTIN_MISC_OPERATION_WITH_SILGEN #undef BUILTIN_MISC_OPERATION diff --git a/include/swift/AST/Decl.h b/include/swift/AST/Decl.h index 4ca710c2c7b54..6d4bf77e8ae8e 100644 --- a/include/swift/AST/Decl.h +++ b/include/swift/AST/Decl.h @@ -242,14 +242,10 @@ struct OverloadSignature { /// Whether this declaration has an opaque return type. unsigned HasOpaqueReturnType : 1; - /// Whether this declaration is 'async' - unsigned HasAsync : 1; - OverloadSignature() : UnaryOperator(UnaryOperatorKind::None), IsInstanceMember(false), IsVariable(false), IsFunction(false), InProtocolExtension(false), - InExtensionOfGenericType(false), HasOpaqueReturnType(false), - HasAsync(false) {} + InExtensionOfGenericType(false), HasOpaqueReturnType(false) { } }; /// Determine whether two overload signatures conflict. @@ -3459,6 +3455,10 @@ enum class AncestryFlags : uint8_t { /// The class or one of its superclasses requires stored property initializers. RequiresStoredPropertyInits = (1<<6), + + /// The class uses the ObjC object model (reference counting, + /// isa encoding, etc.). + ObjCObjectModel = (1<<7), }; /// Return type of ClassDecl::checkAncestry(). Describes a set of interesting @@ -3625,6 +3625,30 @@ class ClassDecl final : public NominalTypeDecl { /// Whether the class is an actor. bool isActor() const; + /// Whether the class is (known to be) a default actor. + bool isDefaultActor() const; + + /// Whether the class is known to be a *root* default actor, + /// i.e. the first class in its hierarchy that is a default actor. + bool isRootDefaultActor() const; + + /// Does this class explicitly declare any of the methods that + /// would prevent it from being a default actor? + bool hasExplicitCustomActorMethods() const; + + /// Is this the NSObject class type? + bool isNSObject() const; + + /// Whether the class directly inherits from NSObject but should use + /// Swift's native object model. + bool isNativeNSObjectSubclass() const; + + /// Whether the class uses the ObjC object model (reference counting, + /// allocation, etc.) instead of the Swift model. + bool usesObjCObjectModel() const { + return checkAncestry(AncestryFlags::ObjCObjectModel); + } + /// Returns true if the class has designated initializers that are not listed /// in its members. /// diff --git a/include/swift/AST/DiagnosticsCommon.def b/include/swift/AST/DiagnosticsCommon.def index 72d8d64bbecb7..d79dfbc137669 100644 --- a/include/swift/AST/DiagnosticsCommon.def +++ b/include/swift/AST/DiagnosticsCommon.def @@ -103,6 +103,10 @@ ERROR(generic_signature_not_equal,none, "generic signature %0 is not equal to new signature %1", (StringRef, StringRef)) +// Used in diagnostics that are split across requests implemented in several places. +ERROR(concurrency_default_actor_not_found,none, + "broken standard library: cannot find default actor type '%0'", (StringRef)) + // FIXME: Used by swift-api-digester. Don't want to set up a separate diagnostics // file just for a few errors. ERROR(sdk_node_unrecognized_key,none, diff --git a/include/swift/AST/DiagnosticsDriver.def b/include/swift/AST/DiagnosticsDriver.def index ccce85380c3f7..f1c1fe53d07f3 100644 --- a/include/swift/AST/DiagnosticsDriver.def +++ b/include/swift/AST/DiagnosticsDriver.def @@ -193,8 +193,5 @@ WARNING(warn_drv_darwin_sdk_invalid_settings, none, REMARK(remark_forwarding_to_new_driver, none, "new Swift driver at '%0' will be used", (StringRef)) -REMARK(remark_forwarding_driver_not_there, none, - "new Swift driver at '%0' cannot be found; C++ driver will be used", (StringRef)) - #define UNDEFINE_DIAGNOSTIC_MACROS #include "DefineDiagnosticMacros.h" diff --git a/include/swift/AST/DiagnosticsParse.def b/include/swift/AST/DiagnosticsParse.def index b2398cc9dd0c3..71b6ae0e3b5f8 100644 --- a/include/swift/AST/DiagnosticsParse.def +++ b/include/swift/AST/DiagnosticsParse.def @@ -1222,6 +1222,8 @@ ERROR(super_in_closure_with_capture,none, NOTE(super_in_closure_with_capture_here,none, "'self' explicitly captured here", ()) +ERROR(try_before_await,none, "'await' must precede 'try'", ()) + // Tuples and parenthesized expressions ERROR(expected_expr_in_expr_list,none, "expected expression in list of expressions", ()) @@ -1567,6 +1569,9 @@ ERROR(attr_specialize_missing_comma,none, ERROR(attr_specialize_unknown_parameter_name,none, "unknown parameter %0 in '_specialize attribute'", (StringRef)) +ERROR(attr_specialize_unsupported_exported_true ,none, + "'exported: true' has no effect in '_specialize' attribute", (StringRef)) + ERROR(attr_specialize_expected_bool_value,none, "expected a boolean true or false value in '_specialize' attribute", ()) diff --git a/include/swift/AST/DiagnosticsSema.def b/include/swift/AST/DiagnosticsSema.def index 729fc27df58a7..282df4bd0ec3a 100644 --- a/include/swift/AST/DiagnosticsSema.def +++ b/include/swift/AST/DiagnosticsSema.def @@ -551,6 +551,10 @@ ERROR(throws_functiontype_mismatch,none, "invalid conversion from throwing function of type %0 to " "non-throwing function type %1", (Type, Type)) +ERROR(async_functiontype_mismatch,none, + "invalid conversion from 'async' function of type %0 to " + "synchronous function type %1", (Type, Type)) + // Key-path expressions. ERROR(expr_keypath_no_objc_runtime,none, "'#keyPath' can only be used with the Objective-C runtime", ()) @@ -908,6 +912,10 @@ REMARK(cross_import_added,none, "import of %0 and %1 triggered a cross-import of %2", (Identifier, Identifier, Identifier)) +REMARK(module_loaded,none, + "loaded module at %0", + (StringRef)) + // Operator decls ERROR(ambiguous_operator_decls,none, "ambiguous operator declarations found for operator", ()) @@ -995,7 +1003,7 @@ ERROR(did_not_call_method,none, ERROR(init_not_instance_member_use_assignment,none, "'init' is a member of the type; use assignment " - "to initalize the value instead", ()) + "to initialize the value instead", ()) ERROR(init_not_instance_member,none, "'init' is a member of the type; use 'type(of: ...)' to initialize " @@ -1108,9 +1116,13 @@ ERROR(missing_unwrap_optional_try,none, "value of optional type %0 not unwrapped; did you mean to use 'try!' " "or chain with '?'?", (Type)) -ERROR(missing_forced_downcast,none, - "%0 is not convertible to %1; " - "did you mean to use 'as!' to force downcast?", (Type, Type)) +ERROR(cannot_coerce_to_type, none, + "%0 is not convertible to %1", (Type, Type)) +NOTE(missing_forced_downcast, none, + "did you mean to use 'as!' to force downcast?", ()) +NOTE(missing_optional_downcast, none, + "did you mean to use 'as?' to conditionally downcast?", ()) + WARNING(coercion_may_fail_warning,none, "coercion from %0 to %1 may fail; use 'as?' or 'as!' instead", (Type, Type)) @@ -1784,6 +1796,10 @@ ERROR(spi_attribute_on_protocol_requirement,none, ERROR(spi_attribute_on_frozen_stored_properties,none, "stored property %0 cannot be declared '@_spi' in a '@frozen' struct", (DeclName)) +WARNING(spi_attribute_on_import_of_public_module,none, + "'@_spi' import of %0 will not include any SPI symbols; " + "%0 was built from the public interface at %1", + (DeclName, StringRef)) // Opaque return types ERROR(opaque_type_invalid_constraint,none, @@ -2611,6 +2627,9 @@ WARNING(duplicate_anyobject_class_inheritance,none, "redundant inheritance from 'AnyObject' and Swift 3 'class' keyword", ()) ERROR(inheritance_from_protocol_with_superclass,none, "inheritance from class-constrained protocol composition type %0", (Type)) +WARNING(anyobject_class_inheritance_deprecated,none, + "using 'class' keyword for protocol inheritance is deprecated; " + "use 'AnyObject' instead", ()) ERROR(multiple_inheritance,none, "multiple inheritance from classes %0 and %1", (Type, Type)) ERROR(inheritance_from_non_protocol_or_class,none, @@ -5467,7 +5486,10 @@ NOTE(previous_result_builder_here, none, "previous result builder specified here", ()) ERROR(result_builder_arguments, none, "result builder attributes cannot have arguments", ()) -WARNING(result_builder_disabled_by_return, none, +ERROR(result_builder_disabled_by_return, none, + "cannot use explicit 'return' statement in the body of result builder %0", + (Type)) +WARNING(result_builder_disabled_by_return_warn, none, "application of result builder %0 disabled by explicit 'return' " "statement", (Type)) NOTE(result_builder_remove_attr, none, diff --git a/include/swift/AST/ExtInfo.h b/include/swift/AST/ExtInfo.h index 0620c14285a0c..98e4208098ce2 100644 --- a/include/swift/AST/ExtInfo.h +++ b/include/swift/AST/ExtInfo.h @@ -827,6 +827,11 @@ class SILExtInfo { SILExtInfo withNoEscape(bool noEscape = true) const { return builder.withNoEscape(noEscape).build(); } + + + SILExtInfo withAsync(bool isAsync = true) const { + return builder.withAsync(isAsync).build(); + } bool isEqualTo(SILExtInfo other, bool useClangTypes) const { return builder.isEqualTo(other.builder, useClangTypes); diff --git a/include/swift/AST/IRGenOptions.h b/include/swift/AST/IRGenOptions.h index 5e6e68963fa76..bf227b569f2c2 100644 --- a/include/swift/AST/IRGenOptions.h +++ b/include/swift/AST/IRGenOptions.h @@ -140,6 +140,9 @@ struct PointerAuthOptions : clang::PointerAuthOptions { /// The parent async context stored within a child async context. PointerAuthSchema AsyncContextParent; + + /// The function to call to resume running in the parent context. + PointerAuthSchema AsyncContextResume; }; enum class JITDebugArtifact : unsigned { diff --git a/include/swift/AST/KnownIdentifiers.def b/include/swift/AST/KnownIdentifiers.def index 261683cb7bf43..4f77e9a98b409 100644 --- a/include/swift/AST/KnownIdentifiers.def +++ b/include/swift/AST/KnownIdentifiers.def @@ -127,6 +127,7 @@ IDENTIFIER(super) IDENTIFIER(superDecoder) IDENTIFIER(superEncoder) IDENTIFIER_WITH_NAME(SwiftObject, "_TtCs12_SwiftObject") +IDENTIFIER(SwiftNativeNSObject) IDENTIFIER(to) IDENTIFIER(toRaw) IDENTIFIER(Type) @@ -142,7 +143,6 @@ IDENTIFIER(withKeywordArguments) IDENTIFIER(wrapped) IDENTIFIER(wrappedValue) IDENTIFIER(wrapperValue) -IDENTIFIER_WITH_NAME(actorStorage, "$__actor_storage") // Kinds of layout constraints IDENTIFIER_WITH_NAME(UnknownLayout, "_UnknownLayout") @@ -206,6 +206,10 @@ IDENTIFIER(arrayLiteral) IDENTIFIER(dictionaryLiteral) IDENTIFIER(className) +IDENTIFIER(_defaultActorInitialize) +IDENTIFIER(_defaultActorDestroy) +IDENTIFIER(_defaultActorEnqueue) + IDENTIFIER_(ErrorType) IDENTIFIER(Code) IDENTIFIER_(nsError) diff --git a/include/swift/AST/SemanticAttrs.def b/include/swift/AST/SemanticAttrs.def index ddb5a42e34c1f..5854a49582960 100644 --- a/include/swift/AST/SemanticAttrs.def +++ b/include/swift/AST/SemanticAttrs.def @@ -106,5 +106,10 @@ SEMANTICS_ATTR(KEYPATH_KVC_KEY_PATH_STRING, "keypath.kvcKeyPathString") /// consider inlining where to put these. SEMANTICS_ATTR(FORCE_EMIT_OPT_REMARK_PREFIX, "optremark") +/// An attribute that when attached to a class causes instances of the class to +/// be forbidden from having associated objects set upon them. This is only used +/// for testing purposes. +SEMANTICS_ATTR(OBJC_FORBID_ASSOCIATED_OBJECTS, "objc.forbidAssociatedObjects") + #undef SEMANTICS_ATTR diff --git a/include/swift/AST/TypeCheckRequests.h b/include/swift/AST/TypeCheckRequests.h index d053f58890e61..d705aa65fae29 100644 --- a/include/swift/AST/TypeCheckRequests.h +++ b/include/swift/AST/TypeCheckRequests.h @@ -853,6 +853,24 @@ class IsActorRequest : bool isCached() const { return true; } }; +/// Determine whether the given class is a default actor. +class IsDefaultActorRequest : + public SimpleRequest { +public: + using SimpleRequest::SimpleRequest; + +private: + friend SimpleRequest; + + bool evaluate(Evaluator &evaluator, ClassDecl *classDecl) const; + +public: + // Caching + bool isCached() const { return true; } +}; + /// Retrieve the static "shared" property within a global actor that provides /// the actor instance representing the global actor. /// diff --git a/include/swift/AST/TypeCheckerTypeIDZone.def b/include/swift/AST/TypeCheckerTypeIDZone.def index ebfafc30dd6c5..4c24a181a1888 100644 --- a/include/swift/AST/TypeCheckerTypeIDZone.def +++ b/include/swift/AST/TypeCheckerTypeIDZone.def @@ -89,6 +89,8 @@ SWIFT_REQUEST(TypeChecker, CanBeAsyncHandlerRequest, bool(FuncDecl *), Cached, NoLocationInfo) SWIFT_REQUEST(TypeChecker, IsActorRequest, bool(ClassDecl *), Cached, NoLocationInfo) +SWIFT_REQUEST(TypeChecker, IsDefaultActorRequest, bool(ClassDecl *), + Cached, NoLocationInfo) SWIFT_REQUEST(TypeChecker, GlobalActorInstanceRequest, VarDecl *(NominalTypeDecl *), Cached, NoLocationInfo) diff --git a/include/swift/AST/TypeDifferenceVisitor.h b/include/swift/AST/TypeDifferenceVisitor.h index bdad0415948bc..0e914ebaac282 100644 --- a/include/swift/AST/TypeDifferenceVisitor.h +++ b/include/swift/AST/TypeDifferenceVisitor.h @@ -107,17 +107,11 @@ class CanTypeDifferenceVisitor : public CanTypePairVisitor { // non-identical types. // These types are singleton and can't actually differ. -#define SINGLETON_TYPE(TYPE) \ - bool visit##TYPE(Can##TYPE type1, Can##TYPE type2) { \ +#define SINGLETON_TYPE(SHORT_ID, ID) \ + bool visit##ID##Type(Can##ID##Type type1, Can##ID##Type type2) {\ llvm_unreachable("singleton type that wasn't identical"); \ } - SINGLETON_TYPE(BuiltinIntegerLiteralType) - SINGLETON_TYPE(BuiltinRawPointerType) - SINGLETON_TYPE(BuiltinNativeObjectType) - SINGLETON_TYPE(BuiltinBridgeObjectType) - SINGLETON_TYPE(BuiltinUnsafeValueBufferType) - SINGLETON_TYPE(SILTokenType) -#undef SINGLETON_TYPE +#include "swift/AST/TypeNodes.def" bool visitBuiltinIntegerType(CanBuiltinIntegerType type1, CanBuiltinIntegerType type2) { diff --git a/include/swift/AST/TypeExpansionContext.h b/include/swift/AST/TypeExpansionContext.h index 761fa53217bd5..6e11947f53a35 100644 --- a/include/swift/AST/TypeExpansionContext.h +++ b/include/swift/AST/TypeExpansionContext.h @@ -89,6 +89,10 @@ class TypeExpansionContext { other.expansion == this->expansion; } + bool operator!=(const TypeExpansionContext &other) const { + return !operator==(other); + } + bool operator<(const TypeExpansionContext other) const { assert(other.inContext != this->inContext || other.isContextWholeModule == this->isContextWholeModule); diff --git a/include/swift/AST/TypeMatcher.h b/include/swift/AST/TypeMatcher.h index 05fdd68f26ce8..21f1d71f9f18b 100644 --- a/include/swift/AST/TypeMatcher.h +++ b/include/swift/AST/TypeMatcher.h @@ -102,14 +102,10 @@ class TypeMatcher { TRIVIAL_CASE(ErrorType) TRIVIAL_CASE(BuiltinIntegerType) - TRIVIAL_CASE(BuiltinIntegerLiteralType) TRIVIAL_CASE(BuiltinFloatType) - TRIVIAL_CASE(BuiltinRawPointerType) - TRIVIAL_CASE(BuiltinNativeObjectType) - TRIVIAL_CASE(BuiltinBridgeObjectType) - TRIVIAL_CASE(BuiltinUnsafeValueBufferType) TRIVIAL_CASE(BuiltinVectorType) - TRIVIAL_CASE(SILTokenType) +#define SINGLETON_TYPE(SHORT_ID, ID) TRIVIAL_CASE(ID##Type) +#include "swift/AST/TypeNodes.def" bool visitUnresolvedType(CanUnresolvedType firstType, Type secondType, Type sugaredFirstType) { diff --git a/include/swift/AST/TypeNodes.def b/include/swift/AST/TypeNodes.def index f827339dd5064..aea2a4ad67342 100644 --- a/include/swift/AST/TypeNodes.def +++ b/include/swift/AST/TypeNodes.def @@ -47,6 +47,12 @@ /// programs and it cannot be the type of an expression. /// The default behavior is TYPE(id, parent). +/// SINGLETON_TYPE(SHORT_ID, id) +/// This type is a singleton, i.e. there is exactly one instance of +/// it, which can be found as ASTContext.The#SHORT_ID#Type. +/// This is only expanded if SINGLETON_TYPE is defined, and in this +/// case no other macros are expanded. + #ifndef ALWAYS_CANONICAL_TYPE #define ALWAYS_CANONICAL_TYPE(id, parent) TYPE(id, parent) #endif @@ -87,6 +93,8 @@ #define LAST_TYPE(Id) #endif +#if !defined(SINGLETON_TYPE) + TYPE(Error, Type) UNCHECKED_TYPE(Unresolved, Type) UNCHECKED_TYPE(Hole, Type) @@ -96,7 +104,9 @@ ABSTRACT_TYPE(Builtin, Type) BUILTIN_TYPE(BuiltinIntegerLiteral, AnyBuiltinIntegerType) TYPE_RANGE(AnyBuiltinInteger, BuiltinInteger, BuiltinIntegerLiteral) BUILTIN_TYPE(BuiltinFloat, BuiltinType) + BUILTIN_TYPE(BuiltinJob, BuiltinType) BUILTIN_TYPE(BuiltinRawPointer, BuiltinType) + BUILTIN_TYPE(BuiltinRawUnsafeContinuation, BuiltinType) BUILTIN_TYPE(BuiltinNativeObject, BuiltinType) BUILTIN_TYPE(BuiltinBridgeObject, BuiltinType) BUILTIN_TYPE(BuiltinUnsafeValueBuffer, BuiltinType) @@ -165,6 +175,20 @@ ABSTRACT_SUGARED_TYPE(Sugar, Type) TYPE_RANGE(Sugar, Paren, Dictionary) LAST_TYPE(Dictionary) // Sugared types are last to make isa() fast. +#endif + +#ifdef SINGLETON_TYPE +SINGLETON_TYPE(IntegerLiteral, BuiltinIntegerLiteral) +SINGLETON_TYPE(Job, BuiltinJob) +SINGLETON_TYPE(RawPointer, BuiltinRawPointer) +SINGLETON_TYPE(RawUnsafeContinuation, BuiltinRawUnsafeContinuation) +SINGLETON_TYPE(NativeObject, BuiltinNativeObject) +SINGLETON_TYPE(BridgeObject, BuiltinBridgeObject) +SINGLETON_TYPE(UnsafeValueBuffer, BuiltinUnsafeValueBuffer) +SINGLETON_TYPE(SILToken, SILToken) +#undef SINGLETON_TYPE +#endif + #undef TYPE_RANGE #undef ABSTRACT_SUGARED_TYPE #undef ABSTRACT_TYPE diff --git a/include/swift/AST/Types.h b/include/swift/AST/Types.h index 1a66121f58c56..a89e3184e64bf 100644 --- a/include/swift/AST/Types.h +++ b/include/swift/AST/Types.h @@ -1391,6 +1391,35 @@ class BuiltinRawPointerType : public BuiltinType { }; DEFINE_EMPTY_CAN_TYPE_WRAPPER(BuiltinRawPointerType, BuiltinType); +/// BuiltinRawContinuationType - The builtin raw unsafe continuation type. +/// In C, this is a non-null AsyncTask*. This pointer is completely +/// unmanaged (the unresumed task is self-owning), but has more spare bits +/// than Builtin.RawPointer. +class BuiltinRawUnsafeContinuationType : public BuiltinType { + friend class ASTContext; + BuiltinRawUnsafeContinuationType(const ASTContext &C) + : BuiltinType(TypeKind::BuiltinRawUnsafeContinuation, C) {} +public: + static bool classof(const TypeBase *T) { + return T->getKind() == TypeKind::BuiltinRawUnsafeContinuation; + } +}; +DEFINE_EMPTY_CAN_TYPE_WRAPPER(BuiltinRawUnsafeContinuationType, BuiltinType); + +/// BuiltinJobType - The builtin job type. In C, this is a +/// non-null Job*. This pointer is completely unmanaged (the unscheduled +/// job is self-owning), but has more spare bits than Builtin.RawPointer. +class BuiltinJobType : public BuiltinType { + friend class ASTContext; + BuiltinJobType(const ASTContext &C) + : BuiltinType(TypeKind::BuiltinJob, C) {} +public: + static bool classof(const TypeBase *T) { + return T->getKind() == TypeKind::BuiltinJob; + } +}; +DEFINE_EMPTY_CAN_TYPE_WRAPPER(BuiltinJobType, BuiltinType); + /// BuiltinNativeObjectType - The builtin opaque object-pointer type. /// Useful for keeping an object alive when it is otherwise being /// manipulated via an unsafe pointer type. diff --git a/include/swift/Basic/InlineBitfield.h b/include/swift/Basic/InlineBitfield.h index b5862919944df..b41ae018d4edf 100644 --- a/include/swift/Basic/InlineBitfield.h +++ b/include/swift/Basic/InlineBitfield.h @@ -81,6 +81,32 @@ namespace swift { LLVM_PACKED_END \ static_assert(sizeof(T##Bitfield) <= 8, "Bitfield overflow") +/// Define a full bitfield for type 'T' that uses all of the remaining bits in +/// the inline bitfield. We allow for 'T' to have a single generic parameter. +/// +/// For optimal code gen, place naturally sized fields at the end, with the +/// largest naturally sized field at the very end. For example: +/// +/// SWIFT_INLINE_BITFIELD_FULL(Foo, Bar, 1+8+16, +/// flag : 1, +/// : NumPadBits, // pad the center, not the end +/// x : 8, +/// y : 16 +/// ); +/// +/// NOTE: All instances of Foo will access via the same bitfield entry even if +/// they differ in the templated value! +#define SWIFT_INLINE_BITFIELD_FULL_TEMPLATE(T, U, C, ...) \ + LLVM_PACKED_START \ + class T##Bitfield { \ + template \ + friend class T; \ + enum { NumPadBits = 64 - (Num##U##Bits + (C)) }; \ + uint64_t : Num##U##Bits, __VA_ARGS__; \ + } T; \ + LLVM_PACKED_END \ + static_assert(sizeof(T##Bitfield) <= 8, "Bitfield overflow") + /// Define an empty bitfield for type 'T'. #define SWIFT_INLINE_BITFIELD_EMPTY(T, U) \ enum { Num##T##Bits = Num##U##Bits } diff --git a/include/swift/Basic/LangOptions.h b/include/swift/Basic/LangOptions.h index b79cbf8c1be35..315e1c2908390 100644 --- a/include/swift/Basic/LangOptions.h +++ b/include/swift/Basic/LangOptions.h @@ -129,6 +129,9 @@ namespace swift { /// overlay. bool EnableCrossImportRemarks = false; + /// Emit a remark after loading a module. + bool EnableModuleLoadingRemarks = false; + /// /// Support for alternate usage modes /// @@ -248,6 +251,9 @@ namespace swift { /// Disable the implicit import of the _Concurrency module. bool DisableImplicitConcurrencyModuleImport = false; + /// Enable experimental support for `@_specialize(exported: true,...)` . + bool EnableExperimentalPrespecialization = false; + /// Should we check the target OSs of serialized modules to see that they're /// new enough? bool EnableTargetOSChecking = true; diff --git a/include/swift/Demangling/DemangleNodes.def b/include/swift/Demangling/DemangleNodes.def index 1d464be0d86cf..6d551cb793c21 100644 --- a/include/swift/Demangling/DemangleNodes.def +++ b/include/swift/Demangling/DemangleNodes.def @@ -104,6 +104,7 @@ NODE(GenericProtocolWitnessTableInstantiationFunction) NODE(ResilientProtocolWitnessTable) NODE(GenericSpecialization) NODE(GenericSpecializationNotReAbstracted) +NODE(GenericSpecializationInResilienceDomain) NODE(GenericSpecializationParam) NODE(GenericSpecializationPrespecialized) NODE(InlinedGenericFunction) diff --git a/include/swift/Driver/Compilation.h b/include/swift/Driver/Compilation.h index 2dd41bfb57582..c07537bcfe8a1 100644 --- a/include/swift/Driver/Compilation.h +++ b/include/swift/Driver/Compilation.h @@ -24,6 +24,7 @@ #include "swift/Basic/OutputFileMap.h" #include "swift/Basic/Statistic.h" #include "swift/Driver/Driver.h" +#include "swift/Driver/FineGrainedDependencyDriverGraph.h" #include "swift/Driver/Job.h" #include "swift/Driver/Util.h" #include "llvm/ADT/StringRef.h" @@ -78,6 +79,29 @@ using CommandSet = llvm::SmallPtrSet; class Compilation { public: + struct Result { + /// Set to true if any job exits abnormally (i.e. crashes). + bool hadAbnormalExit; + /// The exit code of this driver process. + int exitCode; + /// The dependency graph built up during the compilation of this module. + /// + /// This data is used for cross-module module dependencies. + fine_grained_dependencies::ModuleDepGraph depGraph; + + Result(const Result &) = delete; + Result &operator=(const Result &) = delete; + + Result(Result &&) = default; + Result &operator=(Result &&) = default; + + /// Construct a \c Compilation::Result from just an exit code. + static Result code(int code) { + return Compilation::Result{false, code, + fine_grained_dependencies::ModuleDepGraph()}; + } + }; + class IncrementalSchemeComparator { const bool EnableIncrementalBuildWhenConstructed; const bool &EnableIncrementalBuild; @@ -490,7 +514,7 @@ class Compilation { /// /// \returns result code for the Compilation's Jobs; 0 indicates success and /// -2 indicates that one of the Compilation's Jobs crashed during execution - int performJobs(std::unique_ptr &&TQ); + Compilation::Result performJobs(std::unique_ptr &&TQ); /// Returns whether the callee is permitted to pass -emit-loaded-module-trace /// to a frontend job. @@ -534,13 +558,11 @@ class Compilation { private: /// Perform all jobs. /// - /// \param[out] abnormalExit Set to true if any job exits abnormally (i.e. - /// crashes). /// \param TQ The task queue on which jobs will be scheduled. /// /// \returns exit code of the first failed Job, or 0 on success. If a Job /// crashes during execution, a negative value will be returned. - int performJobsImpl(bool &abnormalExit, std::unique_ptr &&TQ); + Compilation::Result performJobsImpl(std::unique_ptr &&TQ); /// Performs a single Job by executing in place, if possible. /// @@ -550,7 +572,7 @@ class Compilation { /// will no longer exist, or it will call exit() if the program was /// successfully executed. In the event of an error, this function will return /// a negative value indicating a failure to execute. - int performSingleCommand(const Job *Cmd); + Compilation::Result performSingleCommand(const Job *Cmd); }; } // end namespace driver diff --git a/include/swift/Driver/FineGrainedDependencyDriverGraph.h b/include/swift/Driver/FineGrainedDependencyDriverGraph.h index ddbf729f589db..b09288068b116 100644 --- a/include/swift/Driver/FineGrainedDependencyDriverGraph.h +++ b/include/swift/Driver/FineGrainedDependencyDriverGraph.h @@ -188,8 +188,8 @@ class ModuleDepGraph { std::unordered_map dotFileSequenceNumber; public: - const bool verifyFineGrainedDependencyGraphAfterEveryImport; - const bool emitFineGrainedDependencyDotFileAfterEveryImport; + bool verifyFineGrainedDependencyGraphAfterEveryImport; + bool emitFineGrainedDependencyDotFileAfterEveryImport; private: /// If tracing dependencies, holds a vector used to hold the current path @@ -203,7 +203,7 @@ class ModuleDepGraph { dependencyPathsToJobs; /// For helping with performance tuning, may be null: - UnifiedStatsReporter *const stats; + UnifiedStatsReporter *stats; //============================================================================== // MARK: ModuleDepGraph - mutating dependencies @@ -493,7 +493,12 @@ class ModuleDepGraph { template std::vector - findJobsToRecompileWhenNodesChange(const Nodes &); + findJobsToRecompileWhenNodesChange(const Nodes &nodes) { + std::vector foundDependents; + for (ModuleDepGraphNode *n : nodes) + findPreviouslyUntracedDependents(foundDependents, n); + return jobsContaining(foundDependents); + } private: std::vector diff --git a/include/swift/Frontend/Frontend.h b/include/swift/Frontend/Frontend.h index 8e13b48b5e6a8..ec9f5b44d85c3 100644 --- a/include/swift/Frontend/Frontend.h +++ b/include/swift/Frontend/Frontend.h @@ -572,7 +572,9 @@ class CompilerInstance { /// Return the buffer ID if it is not already compiled, or None if so. /// Set failed on failure. - Optional getRecordedBufferID(const InputFile &input, bool &failed); + Optional getRecordedBufferID(const InputFile &input, + const bool shouldRecover, + bool &failed); /// Given an input file, return a buffer to use for its contents, /// and a buffer for the corresponding module doc file if one exists. diff --git a/include/swift/Frontend/FrontendInputsAndOutputs.h b/include/swift/Frontend/FrontendInputsAndOutputs.h index f739f9f511aaa..8fc939c483217 100644 --- a/include/swift/Frontend/FrontendInputsAndOutputs.h +++ b/include/swift/Frontend/FrontendInputsAndOutputs.h @@ -46,6 +46,9 @@ class FrontendInputsAndOutputs { /// Punt where needed to enable batch mode experiments. bool AreBatchModeChecksBypassed = false; + /// Recover missing inputs. Note that recovery itself is users responsibility. + bool ShouldRecoverMissingInputs = false; + public: bool areBatchModeChecksBypassed() const { return AreBatchModeChecksBypassed; } void setBypassBatchModeChecks(bool bbc) { AreBatchModeChecksBypassed = bbc; } @@ -66,6 +69,9 @@ class FrontendInputsAndOutputs { bool isWholeModule() const { return !hasPrimaryInputs(); } + bool shouldRecoverMissingInputs() { return ShouldRecoverMissingInputs; } + void setShouldRecoverMissingInputs() { ShouldRecoverMissingInputs = true; } + // Readers: // All inputs: diff --git a/include/swift/Option/FrontendOptions.td b/include/swift/Option/FrontendOptions.td index 2a8a412f046a1..5a8cfcfaecb5f 100644 --- a/include/swift/Option/FrontendOptions.td +++ b/include/swift/Option/FrontendOptions.td @@ -223,6 +223,10 @@ let Flags = [FrontendOption, NoDriverOption, HelpHidden, ModuleInterfaceOption] HelpText<"Disable requiring uses of @objc to require importing the " "Foundation module">; + def enable_experimental_prespecialization : + Flag<["-"], "enable-experimental-prespecialization">, + HelpText<"Enable experimental pre-specialization support">; + def enable_experimental_concurrency : Flag<["-"], "enable-experimental-concurrency">, HelpText<"Enable experimental concurrency model">; diff --git a/include/swift/Option/Options.td b/include/swift/Option/Options.td index d8ea67548061d..39983bb00fe6d 100644 --- a/include/swift/Option/Options.td +++ b/include/swift/Option/Options.td @@ -336,6 +336,10 @@ def emit_cross_import_remarks : Flag<["-"], "Rcross-import">, Flags<[FrontendOption, DoesNotAffectIncrementalBuild]>, HelpText<"Emit a remark if a cross-import of a module is triggered.">; +def remark_loading_module : Flag<["-"], "Rmodule-loading">, + Flags<[FrontendOption, DoesNotAffectIncrementalBuild]>, + HelpText<"Emit a remark and file path of each loaded module">; + def emit_tbd : Flag<["-"], "emit-tbd">, HelpText<"Emit a TBD file">, Flags<[FrontendOption, NoInteractiveOption, SupplementaryOutput]>; diff --git a/include/swift/Parse/Parser.h b/include/swift/Parse/Parser.h index 7de4f4c4a7cd7..63b9a874db5b1 100644 --- a/include/swift/Parse/Parser.h +++ b/include/swift/Parse/Parser.h @@ -553,14 +553,13 @@ class Parser { return Tok.getLoc().getAdvancedLoc(-LeadingTrivia.getLength()); } - SourceLoc consumeIdentifier(Identifier *Result = nullptr, - bool allowDollarIdentifier = false) { + SourceLoc consumeIdentifier(Identifier &Result, bool diagnoseDollarPrefix) { assert(Tok.isAny(tok::identifier, tok::kw_self, tok::kw_Self)); - if (Result) - *Result = Context.getIdentifier(Tok.getText()); + assert(Result.empty()); + Result = Context.getIdentifier(Tok.getText()); if (Tok.getText()[0] == '$') - diagnoseDollarIdentifier(Tok, allowDollarIdentifier); + diagnoseDollarIdentifier(Tok, diagnoseDollarPrefix); return consumeToken(); } @@ -573,15 +572,17 @@ class Parser { Result = Context.getIdentifier(Tok.getText()); if (Tok.getText()[0] == '$') - diagnoseDollarIdentifier(Tok); + diagnoseDollarIdentifier(Tok, /*diagnoseDollarPrefix=*/true); } return consumeToken(); } /// When we have a token that is an identifier starting with '$', /// diagnose it if not permitted in this mode. + /// \param diagnoseDollarPrefix Whether to diagnose dollar-prefixed + /// identifiers in addition to a standalone '$'. void diagnoseDollarIdentifier(const Token &tok, - bool allowDollarIdentifier = false) { + bool diagnoseDollarPrefix) { assert(tok.getText()[0] == '$'); // If '$' is not guarded by backticks, offer @@ -592,7 +593,7 @@ class Parser { return; } - if (allowDollarIdentifier) + if (!diagnoseDollarPrefix) return; if (tok.getText().size() == 1 || Context.LangOpts.EnableDollarIdentifiers || @@ -790,24 +791,20 @@ class Parser { /// its name in \p Result. Otherwise, emit an error. /// /// \returns false on success, true on error. - bool parseIdentifier(Identifier &Result, SourceLoc &Loc, const Diagnostic &D); - + bool parseIdentifier(Identifier &Result, SourceLoc &Loc, const Diagnostic &D, + bool diagnoseDollarPrefix); + /// Consume an identifier with a specific expected name. This is useful for /// contextually sensitive keywords that must always be present. bool parseSpecificIdentifier(StringRef expected, SourceLoc &Loc, const Diagnostic &D); - template - bool parseIdentifier(Identifier &Result, Diag ID, - ArgTypes... Args) { - SourceLoc L; - return parseIdentifier(Result, L, Diagnostic(ID, Args...)); - } - template bool parseIdentifier(Identifier &Result, SourceLoc &L, - Diag ID, ArgTypes... Args) { - return parseIdentifier(Result, L, Diagnostic(ID, Args...)); + bool diagnoseDollarPrefix, Diag ID, + ArgTypes... Args) { + return parseIdentifier(Result, L, Diagnostic(ID, Args...), + diagnoseDollarPrefix); } template @@ -820,19 +817,14 @@ class Parser { /// Consume an identifier or operator if present and return its name /// in \p Result. Otherwise, emit an error and return true. bool parseAnyIdentifier(Identifier &Result, SourceLoc &Loc, - const Diagnostic &D); + const Diagnostic &D, bool diagnoseDollarPrefix); template - bool parseAnyIdentifier(Identifier &Result, Diag ID, - ArgTypes... Args) { - SourceLoc L; - return parseAnyIdentifier(Result, L, Diagnostic(ID, Args...)); - } - - template - bool parseAnyIdentifier(Identifier &Result, SourceLoc &L, + bool parseAnyIdentifier(Identifier &Result, bool diagnoseDollarPrefix, Diag ID, ArgTypes... Args) { - return parseAnyIdentifier(Result, L, Diagnostic(ID, Args...)); + SourceLoc L; + return parseAnyIdentifier(Result, L, Diagnostic(ID, Args...), + diagnoseDollarPrefix); } /// \brief Parse an unsigned integer and returns it in \p Result. On failure diff --git a/include/swift/Reflection/Records.h b/include/swift/Reflection/Records.h index c43ade467c6b0..80478ccd3aea2 100644 --- a/include/swift/Reflection/Records.h +++ b/include/swift/Reflection/Records.h @@ -92,6 +92,10 @@ class FieldRecord { bool isIndirectCase() const { return Flags.isIndirectCase(); } + + bool isVar() const { + return Flags.isVar(); + } }; struct FieldRecordIterator { diff --git a/include/swift/Runtime/Atomic.h b/include/swift/Runtime/Atomic.h index 655078a7d6af4..67d0bca1018b7 100644 --- a/include/swift/Runtime/Atomic.h +++ b/include/swift/Runtime/Atomic.h @@ -17,6 +17,10 @@ #ifndef SWIFT_RUNTIME_ATOMIC_H #define SWIFT_RUNTIME_ATOMIC_H +#include "swift/Runtime/Config.h" +#include +#include + // FIXME: Workaround for rdar://problem/18889711. 'Consume' does not require // a barrier on ARM64, but LLVM doesn't know that. Although 'relaxed' // is formally UB by C++11 language rules, we should be OK because neither @@ -28,4 +32,139 @@ # define SWIFT_MEMORY_ORDER_CONSUME (std::memory_order_consume) #endif +#if defined(_M_ARM) || defined(__arm__) || defined(__aarch64__) +#define SWIFT_HAS_MSVC_ARM_ATOMICS 1 +#else +#define SWIFT_HAS_MSVC_ARM_ATOMICS 0 +#endif + +namespace swift { +namespace impl { + +/// The default implementation for swift::atomic, which just wraps +/// std::atomic with minor differences. +/// +/// TODO: should we make this use non-atomic operations when the runtime +/// is single-threaded? +template +class alignas(Size) atomic_impl { + std::atomic value; +public: + constexpr atomic_impl(Value value) : value(value) {} + + /// Force clients to always pass an order. + Value load(std::memory_order order) { + return value.load(order); + } + + /// Force clients to always pass an order. + bool compare_exchange_weak(Value &oldValue, Value newValue, + std::memory_order successOrder, + std::memory_order failureOrder) { + return value.compare_exchange_weak(oldValue, newValue, successOrder, + failureOrder); + } +}; + +#if defined(_WIN64) +#include + +/// MSVC's std::atomic uses an inline spin lock for 16-byte atomics, +/// which is not only unnecessarily inefficient but also doubles the size +/// of the atomic object. We don't care about supporting ancient +/// AMD processors that lack cmpxchg16b, so we just use the intrinsic. +template +class alignas(2 * sizeof(void*)) atomic_impl { + volatile Value atomicValue; +public: + constexpr atomic_impl(Value initialValue) : atomicValue(initialValue) {} + + atomic_impl(const atomic_impl &) = delete; + atomic_impl &operator=(const atomic_impl &) = delete; + + Value load(std::memory_order order) { + assert(order == std::memory_order_relaxed || + order == std::memory_order_acquire || + order == std::memory_order_consume); + // Aligned SSE loads are atomic on every known processor, but + // the only 16-byte access that's architecturally guaranteed to be + // atomic is lock cmpxchg16b, so we do that with identical comparison + // and new values purely for the side-effect of updating the old value. + __int64 resultArray[2] = {}; +#if SWIFT_HAS_MSVC_ARM_ATOMICS + if (order != std::memory_order_acquire) { + (void) _InterlockedCompareExchange128_nf( + reinterpret_cast(&atomicValue), + 0, 0, resultArray); + } else { +#endif + (void) _InterlockedCompareExchange128( + reinterpret_cast(&atomicValue), + 0, 0, resultArray); +#if SWIFT_HAS_MSVC_ARM_ATOMICS + } +#endif + return reinterpret_cast(resultArray); + } + + bool compare_exchange_weak(Value &oldValue, Value newValue, + std::memory_order successOrder, + std::memory_order failureOrder) { + assert(failureOrder == std::memory_order_relaxed || + failureOrder == std::memory_order_acquire || + failureOrder == std::memory_order_consume); + assert(successOrder == std::memory_order_relaxed || + successOrder == std::memory_order_release); +#if SWIFT_HAS_MSVC_ARM_ATOMICS + if (successOrder == std::memory_order_relaxed && + failureOrder != std::memory_order_acquire) { + return _InterlockedCompareExchange128_nf( + reinterpret_cast(&atomicValue), + reinterpret_cast(&newValue)[1], + reinterpret_cast(&newValue)[0], + reinterpret_cast<__int64*>(&oldValue)); + } else if (successOrder == std::memory_order_relaxed) { + return _InterlockedCompareExchange128_acq( + reinterpret_cast(&atomicValue), + reinterpret_cast(&newValue)[1], + reinterpret_cast(&newValue)[0], + reinterpret_cast<__int64*>(&oldValue)); + } else if (failureOrder != std::memory_order_acquire) { + return _InterlockedCompareExchange128_rel( + reinterpret_cast(&atomicValue), + reinterpret_cast(&newValue)[1], + reinterpret_cast(&newValue)[0], + reinterpret_cast<__int64*>(&oldValue)); + } else { +#endif + return _InterlockedCompareExchange128( + reinterpret_cast(&atomicValue), + reinterpret_cast(&newValue)[1], + reinterpret_cast(&newValue)[0], + reinterpret_cast<__int64*>(&oldValue)); +#if SWIFT_HAS_MSVC_ARM_ATOMICS + } +#endif + } +}; + +#endif + +} // end namespace swift::impl + +/// A simple wrapper for std::atomic that provides the most important +/// interfaces and fixes the API bug where all of the orderings dafault +/// to sequentially-consistent. +/// +/// It also sometimes uses a different implementation in cases where +/// std::atomic has made unfortunate choices; our uses of this broadly +/// don't have the ABI-compatibility issues that std::atomic faces. +template +class atomic : public impl::atomic_impl { +public: + atomic(T value) : impl::atomic_impl(value) {} +}; + +} // end namespace swift + #endif diff --git a/include/swift/Runtime/Concurrency.h b/include/swift/Runtime/Concurrency.h index a19994da0989e..9a5d8ecf73905 100644 --- a/include/swift/Runtime/Concurrency.h +++ b/include/swift/Runtime/Concurrency.h @@ -20,6 +20,9 @@ #include "swift/ABI/TaskStatus.h" namespace swift { +class DefaultActor; + +struct SwiftError; struct AsyncTaskAndContext { AsyncTask *Task; @@ -174,8 +177,7 @@ bool swift_task_removeStatusRecord(AsyncTask *task, TaskStatusRecord *record); SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) -JobFlags -swift_task_getJobFlags(AsyncTask* task); +size_t swift_task_getJobFlags(AsyncTask* task); /// This should have the same representation as an enum like this: /// enum NearestTaskDeadline { @@ -206,6 +208,88 @@ swift_task_getNearestDeadline(AsyncTask *task); SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) void swift_task_run(AsyncTask *taskToRun); +/// Switch the current task to a new executor if we aren't already +/// running on a compatible executor. +/// +/// The resumption function pointer and continuation should be set +/// appropriately in the task. +/// +/// Generally the compiler should inline a fast-path compatible-executor +/// check to avoid doing the suspension work. This function should +/// generally be tail-called, as it may continue executing the task +/// synchronously if possible. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swiftasync) +void swift_task_switch(AsyncTask *task, + ExecutorRef currentExecutor, + ExecutorRef newExecutor); + +/// Enqueue the given job to run asynchronously on the given executor. +/// +/// The resumption function pointer and continuation should be set +/// appropriately in the task. +/// +/// Generally you should call swift_task_switch to switch execution +/// synchronously when possible. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_task_enqueue(Job *job, ExecutorRef executor); + +/// Enqueue the given job to run asynchronously on the global +/// execution pool. +/// +/// The resumption function pointer and continuation should be set +/// appropriately in the task. +/// +/// Generally you should call swift_task_switch to switch execution +/// synchronously when possible. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_task_enqueueGlobal(Job *job); + +/// A hook to take over global enqueuing. +/// TODO: figure out a better abstraction plan than this. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void (*swift_task_enqueueGlobal_hook)(Job *job); + +/// Initialize the runtime storage for a default actor. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_defaultActor_initialize(DefaultActor *actor); + +/// Destroy the runtime storage for a default actor. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_defaultActor_destroy(DefaultActor *actor); + +/// Enqueue a job on the default actor implementation. +/// +/// The job must be ready to run. Notably, if it's a task, that +/// means that the resumption function and context should have been +/// set appropriately. +/// +/// Jobs are assumed to be "self-consuming": once it starts running, +/// the job memory is invalidated and the executor should not access it +/// again. +/// +/// Jobs are generally expected to keep the actor alive during their +/// execution. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_defaultActor_enqueue(Job *job, DefaultActor *actor); + +/// Resume a task from its continuation, given a normal result value. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_continuation_resume(/* +1 */ OpaqueValue *result, + void *continuation, + const Metadata *resumeType); + +/// Resume a task from its throwing continuation, given a normal result value. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_continuation_throwingResume(/* +1 */ OpaqueValue *result, + void *continuation, + const Metadata *resumeType); + +/// Resume a task from its throwing continuation by throwing an error. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_continuation_throwingResumeWithError(/* +1 */ SwiftError *error, + void *continuation, + const Metadata *resumeType); + } #endif diff --git a/include/swift/Runtime/Config.h b/include/swift/Runtime/Config.h index 64c726311e455..ec67afbf8f6c9 100644 --- a/include/swift/Runtime/Config.h +++ b/include/swift/Runtime/Config.h @@ -176,6 +176,14 @@ extern uintptr_t __COMPATIBILITY_LIBRARIES_CANNOT_CHECK_THE_IS_SWIFT_BIT_DIRECTL #define SWIFT_INDIRECT_RESULT #endif +// SWIFT_CC(swiftasync) is the Swift async calling convention. +// We assume that it supports mandatory tail call elimination. +#if __has_attribute(swiftasynccall) +#define SWIFT_CC_swiftasync __attribute__((swiftasynccall)) +#else +#define SWIFT_CC_swiftasync SWIFT_CC_swift +#endif + // SWIFT_CC(PreserveMost) is used in the runtime implementation to prevent // register spills on the hot path. // It is not safe to use for external calls; the loader's lazy function diff --git a/include/swift/Runtime/RuntimeFunctions.def b/include/swift/Runtime/RuntimeFunctions.def index 32c378505727e..266cd1c1c9548 100644 --- a/include/swift/Runtime/RuntimeFunctions.def +++ b/include/swift/Runtime/RuntimeFunctions.def @@ -1518,6 +1518,40 @@ FUNCTION(TaskCreateFutureFunc, TaskContinuationFunctionPtrTy, SizeTy), ATTRS(NoUnwind, ArgMemOnly)) +// void swift_task_switch(AsyncTask *task, +// ExecutorRef currentExecutor, +// ExecutorRef newExecutor); +FUNCTION(TaskSwitchFunc, + swift_task_switch, SwiftCC, + ConcurrencyAvailability, + RETURNS(VoidTy), + ARGS(SwiftTaskPtrTy, SwiftExecutorPtrTy, SwiftExecutorPtrTy), + ATTRS(NoUnwind)) + +// AutoDiffLinearMapContext *swift_autoDiffCreateLinearMapContext(size_t); +FUNCTION(AutoDiffCreateLinearMapContext, + swift_autoDiffCreateLinearMapContext, SwiftCC, + DifferentiationAvailability, + RETURNS(RefCountedPtrTy), + ARGS(SizeTy), + ATTRS(NoUnwind, ArgMemOnly)) + +// void *swift_autoDiffProjectTopLevelSubcontext(AutoDiffLinearMapContext *); +FUNCTION(AutoDiffProjectTopLevelSubcontext, + swift_autoDiffProjectTopLevelSubcontext, SwiftCC, + DifferentiationAvailability, + RETURNS(Int8PtrTy), + ARGS(RefCountedPtrTy), + ATTRS(NoUnwind, ArgMemOnly)) + +// void *swift_autoDiffAllocateSubcontext(AutoDiffLinearMapContext *, size_t); +FUNCTION(AutoDiffAllocateSubcontext, + swift_autoDiffAllocateSubcontext, SwiftCC, + DifferentiationAvailability, + RETURNS(Int8PtrTy), + ARGS(RefCountedPtrTy, SizeTy), + ATTRS(NoUnwind, ArgMemOnly)) + #undef RETURNS #undef ARGS #undef ATTRS diff --git a/include/swift/Runtime/ThreadLocal.h b/include/swift/Runtime/ThreadLocal.h new file mode 100644 index 0000000000000..1af2eb9cb38a2 --- /dev/null +++ b/include/swift/Runtime/ThreadLocal.h @@ -0,0 +1,136 @@ +//===--- ThreadLocal.h - Thread-local storage -------------------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Declarations and macros for working with thread-local storage in the +// Swift runtime. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_RUNTIME_THREADLOCAL_H +#define SWIFT_RUNTIME_THREADLOCAL_H + +/// SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL - Does the current configuration +/// allow the use of SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL? +#if defined(SWIFT_STDLIB_SINGLE_THREADED_RUNTIME) +// We define SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL to nothing in this +// configuration and just use a global variable, so this is okay. +#define SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL 1 +#elif __has_feature(tls) +// If __has_feature reports that TLS is available, use it. +#define SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL 1 +#elif !defined(__clang__) +// If we're not using Clang, assume that __has_feature is unreliable +// and that we can safely use TLS. +#else +// Otherwise we can't use TLS and have to fall back on something else. +#define SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL 0 +#endif + +/// SWIFT_RUNTIME_THREAD_LOCAL - Declare that something is a +/// thread-local variable in the runtime. +#if defined(SWIFT_STDLIB_SINGLE_THREADED_RUNTIME) +// In a single-threaded runtime, thread-locals are global. +#define SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL +#elif defined(__GNUC__) +// In GCC-compatible compilers, we prefer __thread because it's understood +// to guarantee a constant initializer, which permits more efficient access +// patterns. +#define SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL __thread +#else +// Otherwise, just fall back on the standard C++ feature. +#define SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL thread_local +#endif + +// Implementation of SWIFT_RUNTIME_DECLARE_THREAD_LOCAL +#if !SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL +#include +#include +#endif + +namespace swift { +// A wrapper class for thread-local storage. +// +// - On platforms that report SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL +// above, an object of this type is declared with +// SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL. This makes the object +// itself thread-local, and no internal support is required. +// +// Note that this includes platforms that set +// SWIFT_STDLIB_SINGLE_THREADED_RUNTIME, for whhch +// SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL is empty; +// thread-local declarations then create an ordinary global. +// +// - On platforms that don't report SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL, +// we have to simulate thread-local storage. Fortunately, all of +// these platforms (at least for now) support pthread_getspecific. +template +class ThreadLocal { + static_assert(sizeof(T) <= sizeof(void*), "cannot store more than a pointer"); + +#if SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL + T value; +#else + // We rely on the zero-initialization of objects with static storage + // duration. + dispatch_once_t once; + pthread_key_t key; + + pthread_key_t getKey() { + dispatch_once_f(&once, this, [](void *ctx) { + pthread_key_create(&reinterpret_cast(ctx)->key, nullptr); + }); + return key; + } +#endif + +public: + constexpr ThreadLocal() {} + + T get() { +#if SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL + return value; +#else + void *storedValue = pthread_getspecific(getKey()); + T value; + memcpy(&value, &storedValue, sizeof(T)); + return value; +#endif + } + + void set(T newValue) { +#if SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL + value = newValue; +#else + void *storedValue; + memcpy(&storedValue, &newValue, sizeof(T)); + pthread_setspecific(getKey(), storedValue); +#endif + } +}; +} // end namespace swift + +/// SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(TYPE, NAME) - Declare a variable +/// to be a thread-local variable. The declaration must have static +/// storage duration; it may be prefixed with "static". +/// +/// Because of the fallback path, the default-initialization of the +/// type must be equivalent to a bitwise zero-initialization, and the +/// type must be small and trivially copyable. +#if SWIFT_RUNTIME_SUPPORTS_THREAD_LOCAL +#define SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(TYPE, NAME) \ + SWIFT_RUNTIME_ATTRIBUTE_THREAD_LOCAL swift::ThreadLocal NAME +#else +#define SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(TYPE, NAME) \ + swift::ThreadLocal NAME +#endif + +#endif diff --git a/include/swift/SIL/BasicBlockUtils.h b/include/swift/SIL/BasicBlockUtils.h index 3cb7dc4be2102..98983fe7b5219 100644 --- a/include/swift/SIL/BasicBlockUtils.h +++ b/include/swift/SIL/BasicBlockUtils.h @@ -10,11 +10,12 @@ // //===----------------------------------------------------------------------===// -#ifndef SWIFT_SIL_DEADENDBLOCKS_H -#define SWIFT_SIL_DEADENDBLOCKS_H +#ifndef SWIFT_SIL_BASICBLOCKUTILS_H +#define SWIFT_SIL_BASICBLOCKUTILS_H #include "swift/SIL/SILValue.h" #include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" namespace swift { @@ -88,6 +89,92 @@ class DeadEndBlocks { } return ReachableBlocks.empty(); } + + const SILFunction *getFunction() const { return F; } +}; + +/// A struct that contains the intermediate state used in computing +/// joint-dominance sets. Enables a pass to easily reuse the same small data +/// structures with clearing (noting that clearing our internal state does not +/// cause us to shrink meaning that once we malloc, we keep the malloced +/// memory). +struct JointPostDominanceSetComputer { + /// The worklist that drives the algorithm. + SmallVector worklist; + + /// A set that guards our worklist. Any block before it is added to worklist + /// should be checked against visitedBlocks. + SmallPtrSet visitedBlocks; + + /// The set of blocks where we begin our walk. + SmallPtrSet initialBlocks; + + /// A subset of our initial blocks that we found as a predecessor of another + /// block along our walk. + SmallVector reachableInputBlocks; + + /// As we process the worklist, any successors that we see that have not been + /// visited yet are placed in here. At the end of our worklist, any blocks + /// that remain here are "leaking blocks" that together with our initial set + /// would provide a jointly-postdominating set of our dominating value. + SmallSetVector blocksThatLeakIfNeverVisited; + + DeadEndBlocks &deadEndBlocks; + + JointPostDominanceSetComputer(DeadEndBlocks &deadEndBlocks) + : deadEndBlocks(deadEndBlocks) {} + + void clear() { + worklist.clear(); + visitedBlocks.clear(); + initialBlocks.clear(); + reachableInputBlocks.clear(); + blocksThatLeakIfNeverVisited.clear(); + } + + /// Compute joint-postdominating set for \p dominatingBlock and \p + /// dominatedBlockSet found by walking up the CFG from the latter to the + /// former. + /// + /// We pass back the following information via callbacks so our callers can + /// use whatever container they need to: + /// + /// * inputBlocksFoundDuringWalk: Any blocks from the "dominated + /// block set" that was found as a predecessor block during our traversal is + /// passed to this callback. These can occur for two reasons: + /// + /// 1. We actually had a block in \p dominatedBlockSet that was reachable + /// from another block in said set. This is a valid usage of the API + /// since it could be that the user does not care about such uses and + /// leave this callback empty. + /// + /// 2. We had a block in \p dominatedBlockSet that is in a sub-loop in the + /// loop-nest relative to \p dominatingBlock causing us to go around a + /// backedge and hit the block during our traversal. In this case, we + /// have already during the traversal passed the exiting blocks of the + /// sub-loop as joint postdominace completion set blocks. This is useful + /// if one is using this API for lifetime extension purposes of lifetime + /// ending uses and one needs to insert compensating copy_value at these + /// locations due to the lack of strong control-equivalence in between + /// the block and \p dominatingBlock. + /// + /// + /// * foundJointPostDomSetCompletionBlocks: The set of blocks not in \p + /// dominatedBlockSet that together with \p dominatedBlockSet + /// jointly-postdominate \p dominatedBlock. This is "completing" the joint + /// post-dominance set. + /// + /// * inputBlocksInJointPostDomSet: Any of our input blocks that were never + /// found as a predecessor is passed to this callback. This block is in the + /// final minimal joint-postdominance set and is passed to this + /// callback. This is optional and we will avoid doing work if it is not + /// set. + void findJointPostDominatingSet( + SILBasicBlock *dominatingBlock, + ArrayRef dominatedBlockSet, + function_ref inputBlocksFoundDuringWalk, + function_ref foundJointPostDomSetCompletionBlocks, + function_ref inputBlocksInJointPostDomSet = {}); }; } // namespace swift diff --git a/include/swift/SIL/GenericSpecializationMangler.h b/include/swift/SIL/GenericSpecializationMangler.h index 9af0dc34b46d5..3a94d0ffc1626 100644 --- a/include/swift/SIL/GenericSpecializationMangler.h +++ b/include/swift/SIL/GenericSpecializationMangler.h @@ -69,31 +69,36 @@ class SpecializationMangler : public Mangle::ASTMangler { // The mangler for specialized generic functions. class GenericSpecializationMangler : public SpecializationMangler { - SubstitutionMap SubMap; - bool isReAbstracted; - bool isInlined; - bool isPrespecializaton; + GenericSpecializationMangler(std::string origFuncName) + : SpecializationMangler(SpecializationPass::GenericSpecializer, + IsNotSerialized, origFuncName) {} + + GenericSignature getGenericSignature() { + assert(Function && "Need a SIL function to get a generic signature"); + return Function->getLoweredFunctionType()->getInvocationGenericSignature(); + } + + void appendSubstitutions(GenericSignature sig, SubstitutionMap subs); + + std::string manglePrespecialized(GenericSignature sig, + SubstitutionMap subs); public: - GenericSpecializationMangler(SILFunction *F, SubstitutionMap SubMap, - IsSerialized_t Serialized, bool isReAbstracted, - bool isInlined = false, - bool isPrespecializaton = false) + GenericSpecializationMangler(SILFunction *F, IsSerialized_t Serialized) : SpecializationMangler(SpecializationPass::GenericSpecializer, - Serialized, F), - SubMap(SubMap), isReAbstracted(isReAbstracted), isInlined(isInlined), - isPrespecializaton(isPrespecializaton) {} + Serialized, F) {} - GenericSpecializationMangler(std::string origFuncName, SubstitutionMap SubMap) - : SpecializationMangler(SpecializationPass::GenericSpecializer, - IsNotSerialized, origFuncName), - SubMap(SubMap), isReAbstracted(true), isInlined(false), - isPrespecializaton(true) {} + std::string mangleNotReabstracted(SubstitutionMap subs); - std::string mangle(GenericSignature Sig = GenericSignature()); + std::string mangleReabstracted(SubstitutionMap subs, bool alternativeMangling); - // TODO: This utility should move from the libswiftSILOptimizer to - // libswiftSIL. + std::string mangleForDebugInfo(GenericSignature sig, SubstitutionMap subs, + bool forInlining); + + std::string manglePrespecialized(SubstitutionMap subs) { + return manglePrespecialized(getGenericSignature(), subs); + } + static std::string manglePrespecialization(std::string unspecializedName, GenericSignature genericSig, GenericSignature specializedSig); diff --git a/include/swift/SIL/OwnershipUtils.h b/include/swift/SIL/OwnershipUtils.h index 798f9b4617c08..673f144ca1450 100644 --- a/include/swift/SIL/OwnershipUtils.h +++ b/include/swift/SIL/OwnershipUtils.h @@ -32,33 +32,22 @@ class DeadEndBlocks; /// Returns true if v is an address or trivial. bool isValueAddressOrTrivial(SILValue v); -/// These operations forward both owned and guaranteed ownership. -bool isOwnershipForwardingValueKind(SILNodeKind kind); - -/// Is this an operand that can forward both owned and guaranteed ownership -/// kinds. +/// Is this an operand that can forward both owned and guaranteed ownership into +/// one of the operand's owner instruction's result. bool isOwnershipForwardingUse(Operand *op); -/// Is this an operand that forwards guaranteed ownership from its value to a -/// result of the using instruction. +/// Is this an operand that can forward guaranteed ownership into one of the +/// operand's owner instruction's result. bool isGuaranteedForwardingUse(Operand *op); -/// These operations forward guaranteed ownership, but don't necessarily forward -/// owned values. -bool isGuaranteedForwardingValueKind(SILNodeKind kind); +/// Is this an operand that can forward owned ownership into one of the +/// operand's owner instruction's result. +bool isOwnedForwardingUse(Operand *use); -/// Is this a value that is the result of an operation that forwards owned -/// ownership. +/// Is this a value that is the result of an instruction that forwards +/// guaranteed ownership from one of its operands. bool isGuaranteedForwardingValue(SILValue value); -/// Is this a node kind that can forward owned ownership, but may not be able to -/// forward guaranteed ownership. -bool isOwnedForwardingValueKind(SILNodeKind kind); - -/// Does this operand 'forward' owned ownership, but may not be able to forward -/// guaranteed ownership. -bool isOwnedForwardingUse(Operand *use); - /// Is this value the result of an instruction that 'forward's owned ownership, /// but may not be able to forward guaranteed ownership. /// @@ -75,14 +64,39 @@ class ForwardingOperand { public: static Optional get(Operand *use); + Operand *getUse() const { return use; } + OwnershipConstraint getOwnershipConstraint() const { + // We use a force unwrap since a ForwardingOperand should always have an + // ownership constraint. + return *use->getOwnershipConstraint(); + } ValueOwnershipKind getOwnershipKind() const; void setOwnershipKind(ValueOwnershipKind newKind) const; void replaceOwnershipKind(ValueOwnershipKind oldKind, ValueOwnershipKind newKind) const; - OwnershipForwardingInst *getUser() const { + const OwnershipForwardingInst *operator->() const { + return cast(use->getUser()); + } + OwnershipForwardingInst *operator->() { return cast(use->getUser()); } + const OwnershipForwardingInst &operator*() const { + return *cast(use->getUser()); + } + OwnershipForwardingInst &operator*() { + return *cast(use->getUser()); + } + + /// Call \p visitor with each value that contains the final forwarded + /// ownership of. E.x.: result of a unchecked_ref_cast, phi arguments of a + /// switch_enum. + bool visitForwardedValues(function_ref visitor); + + /// If statically this forwarded operand has a single forwarded value that the + /// operand forwards ownership into, return that value. Return false + /// otherwise. + SILValue getSingleForwardedValue() const; }; /// Returns true if the instruction is a 'reborrow'. @@ -156,6 +170,15 @@ struct BorrowingOperand { return *this; } + // A set of operators so that a BorrowingOperand can be used like a normal + // operand in a light weight way. + operator const Operand *() const { return op; } + operator Operand *() { return op; } + const Operand *operator*() const { return op; } + Operand *operator*() { return op; } + const Operand *operator->() const { return op; } + Operand *operator->() { return op; } + /// If \p op is a borrow introducing operand return it after doing some /// checks. static Optional get(Operand *op) { @@ -179,7 +202,10 @@ struct BorrowingOperand { /// Example: An apply performs an instantaneous recursive borrow of a /// guaranteed value but a begin_apply borrows the value over the entire /// region of code corresponding to the coroutine. - void visitLocalEndScopeInstructions(function_ref func) const; + /// + /// NOTE: Return false from func to stop iterating. Returns false if the + /// closure requested to stop early. + bool visitLocalEndScopeUses(function_ref func) const; /// Returns true if this borrow scope operand consumes guaranteed /// values and produces a new scope afterwards. @@ -408,7 +434,7 @@ struct BorrowedValue { /// /// NOTE: Scratch space is used internally to this method to store the end /// borrow scopes if needed. - bool areUsesWithinScope(ArrayRef instructions, + bool areUsesWithinScope(ArrayRef uses, SmallVectorImpl &scratchSpace, SmallPtrSetImpl &visitedBlocks, DeadEndBlocks &deadEndBlocks) const; @@ -430,6 +456,24 @@ struct BorrowedValue { bool visitInteriorPointerOperands( function_ref func) const; + /// Visit all immediate uses of this borrowed value and if any of them are + /// reborrows, place them in BorrowingOperand form into \p + /// foundReborrows. Returns true if we appended any such reborrows to + /// foundReborrows... false otherwise. + bool + gatherReborrows(SmallVectorImpl &foundReborrows) const { + bool foundAnyReborrows = false; + for (auto *op : value->getUses()) { + if (auto borrowingOperand = BorrowingOperand::get(op)) { + if (borrowingOperand->isReborrow()) { + foundReborrows.push_back(*borrowingOperand); + foundAnyReborrows = true; + } + } + } + return foundAnyReborrows; + } + private: /// Internal constructor for failable static constructor. Please do not expand /// its usage since it assumes the code passed in is well formed. diff --git a/include/swift/SIL/SILArgumentConvention.h b/include/swift/SIL/SILArgumentConvention.h index 429ec381a38e1..2ea29de2684ce 100644 --- a/include/swift/SIL/SILArgumentConvention.h +++ b/include/swift/SIL/SILArgumentConvention.h @@ -32,7 +32,6 @@ struct SILArgumentConvention { Indirect_Out, Direct_Owned, Direct_Unowned, - Direct_Deallocating, Direct_Guaranteed, } Value; @@ -86,7 +85,6 @@ struct SILArgumentConvention { case SILArgumentConvention::Indirect_Out: case SILArgumentConvention::Direct_Unowned: case SILArgumentConvention::Direct_Owned: - case SILArgumentConvention::Direct_Deallocating: case SILArgumentConvention::Direct_Guaranteed: return false; } @@ -105,7 +103,6 @@ struct SILArgumentConvention { case SILArgumentConvention::Indirect_Out: case SILArgumentConvention::Indirect_InoutAliasable: case SILArgumentConvention::Direct_Unowned: - case SILArgumentConvention::Direct_Deallocating: return false; } llvm_unreachable("covered switch isn't covered?!"); @@ -123,7 +120,6 @@ struct SILArgumentConvention { case SILArgumentConvention::Indirect_InoutAliasable: case SILArgumentConvention::Direct_Unowned: case SILArgumentConvention::Direct_Owned: - case SILArgumentConvention::Direct_Deallocating: return false; } llvm_unreachable("covered switch isn't covered?!"); @@ -143,7 +139,6 @@ struct SILArgumentConvention { case SILArgumentConvention::Direct_Unowned: case SILArgumentConvention::Direct_Guaranteed: case SILArgumentConvention::Direct_Owned: - case SILArgumentConvention::Direct_Deallocating: return false; } llvm_unreachable("covered switch isn't covered?!"); diff --git a/include/swift/SIL/SILBuilder.h b/include/swift/SIL/SILBuilder.h index 1dd7822952a5c..724aac10fa4b0 100644 --- a/include/swift/SIL/SILBuilder.h +++ b/include/swift/SIL/SILBuilder.h @@ -782,7 +782,7 @@ class SILBuilder { } void emitEndBorrowOperation(SILLocation loc, SILValue v) { - if (!hasOwnership()) + if (!hasOwnership() || v.getOwnershipKind() == OwnershipKind::None) return; createEndBorrow(loc, v); } @@ -1939,17 +1939,27 @@ class SILBuilder { //===--------------------------------------------------------------------===// GetAsyncContinuationInst *createGetAsyncContinuation(SILLocation Loc, - SILType ContinuationTy) { + CanType ResumeType, + bool Throws) { + auto ContinuationType = SILType::getPrimitiveObjectType( + getASTContext().TheRawUnsafeContinuationType); return insert(new (getModule()) GetAsyncContinuationInst(getSILDebugLocation(Loc), - ContinuationTy)); + ContinuationType, + ResumeType, + Throws)); } GetAsyncContinuationAddrInst *createGetAsyncContinuationAddr(SILLocation Loc, SILValue Operand, - SILType ContinuationTy) { + CanType ResumeType, + bool Throws) { + auto ContinuationType = SILType::getPrimitiveObjectType( + getASTContext().TheRawUnsafeContinuationType); return insert(new (getModule()) GetAsyncContinuationAddrInst(getSILDebugLocation(Loc), Operand, - ContinuationTy)); + ContinuationType, + ResumeType, + Throws)); } HopToExecutorInst *createHopToExecutor(SILLocation Loc, SILValue Actor) { diff --git a/include/swift/SIL/SILCloner.h b/include/swift/SIL/SILCloner.h index e80c13ccfcd34..9e79efc992d0a 100644 --- a/include/swift/SIL/SILCloner.h +++ b/include/swift/SIL/SILCloner.h @@ -2961,7 +2961,8 @@ ::visitGetAsyncContinuationInst(GetAsyncContinuationInst *Inst) { recordClonedInstruction(Inst, getBuilder().createGetAsyncContinuation( getOpLocation(Inst->getLoc()), - getOpType(Inst->getType()))); + getOpASTType(Inst->getFormalResumeType()), + Inst->throws())); } template @@ -2972,7 +2973,8 @@ ::visitGetAsyncContinuationAddrInst(GetAsyncContinuationAddrInst *Inst) { getBuilder().createGetAsyncContinuationAddr( getOpLocation(Inst->getLoc()), getOpValue(Inst->getOperand()), - getOpType(Inst->getType()))); + getOpASTType(Inst->getFormalResumeType()), + Inst->throws())); } template diff --git a/include/swift/SIL/SILDeclRef.h b/include/swift/SIL/SILDeclRef.h index b3f6b0d8f5551..1a0ca754c7a95 100644 --- a/include/swift/SIL/SILDeclRef.h +++ b/include/swift/SIL/SILDeclRef.h @@ -221,6 +221,7 @@ struct SILDeclRef { enum class ManglingKind { Default, DynamicThunk, + AsyncHandlerBody }; /// Produce a mangled form of this constant. @@ -427,6 +428,8 @@ struct SILDeclRef { assert(isAutoDiffDerivativeFunction()); return pointer.get(); } + + bool hasAsync() const; private: friend struct llvm::DenseMapInfo; diff --git a/include/swift/SIL/SILInstruction.h b/include/swift/SIL/SILInstruction.h index 52dc61895a533..5d3787b6f3743 100644 --- a/include/swift/SIL/SILInstruction.h +++ b/include/swift/SIL/SILInstruction.h @@ -905,12 +905,14 @@ class OwnershipForwardingInst { /// /// The ownership kind is set on construction and afterwards must be changed /// explicitly using setOwnershipKind(). -class OwnershipForwardingSingleValueInst : public SingleValueInstruction, - public OwnershipForwardingInst { +class FirstArgOwnershipForwardingSingleValueInst + : public SingleValueInstruction, + public OwnershipForwardingInst { protected: - OwnershipForwardingSingleValueInst(SILInstructionKind kind, - SILDebugLocation debugLoc, SILType ty, - ValueOwnershipKind ownershipKind) + FirstArgOwnershipForwardingSingleValueInst(SILInstructionKind kind, + SILDebugLocation debugLoc, + SILType ty, + ValueOwnershipKind ownershipKind) : SingleValueInstruction(kind, debugLoc, ty), OwnershipForwardingInst(kind, ownershipKind) { assert(classof(kind) && "classof missing new subclass?!"); @@ -923,18 +925,142 @@ class OwnershipForwardingSingleValueInst : public SingleValueInstruction, return false; } + static bool classof(SILInstructionKind kind); + + static bool classof(const SILInstruction *inst) { + return classof(inst->getKind()); + } +}; + +/// An ownership forwarding single value that has a preferred operand of owned +/// but if its inputs are all none can have OwnershipKind::None as a result. We +/// assume that we always forward from operand 0. +class OwnedFirstArgForwardingSingleValueInst + : public FirstArgOwnershipForwardingSingleValueInst { +protected: + OwnedFirstArgForwardingSingleValueInst(SILInstructionKind kind, + SILDebugLocation debugLoc, SILType ty, + ValueOwnershipKind resultOwnershipKind) + : FirstArgOwnershipForwardingSingleValueInst(kind, debugLoc, ty, + resultOwnershipKind) { + assert(resultOwnershipKind.isCompatibleWith(OwnershipKind::Owned)); + assert(classof(kind) && "classof missing new subclass?!"); + } + +public: + ValueOwnershipKind getPreferredOwnership() const { + return OwnershipKind::Owned; + } + + static bool classof(const SILNode *node) { + if (auto *i = dyn_cast(node)) + return classof(i); + return false; + } + static bool classof(SILInstructionKind kind) { switch (kind) { case SILInstructionKind::MarkUninitializedInst: + return true; + default: + return false; + } + } + + static bool classof(const SILInstruction *inst) { + return classof(inst->getKind()); + } +}; + +/// An instruction that forwards guaranteed or none ownership. Assumed to always +/// forward from Operand(0) -> Result(0). +class GuaranteedFirstArgForwardingSingleValueInst + : public FirstArgOwnershipForwardingSingleValueInst { +protected: + GuaranteedFirstArgForwardingSingleValueInst( + SILInstructionKind kind, SILDebugLocation debugLoc, SILType ty, + ValueOwnershipKind resultOwnershipKind) + : FirstArgOwnershipForwardingSingleValueInst(kind, debugLoc, ty, + resultOwnershipKind) { + assert(resultOwnershipKind.isCompatibleWith(OwnershipKind::Guaranteed)); + assert(classof(kind) && "classof missing new subclass?!"); + } + +public: + ValueOwnershipKind getPreferredOwnership() const { + return OwnershipKind::Guaranteed; + } + + static bool classof(const SILNode *node) { + if (auto *i = dyn_cast(node)) + return classof(i); + return false; + } + + static bool classof(SILInstructionKind kind) { + switch (kind) { + case SILInstructionKind::TupleExtractInst: + case SILInstructionKind::StructExtractInst: + case SILInstructionKind::DifferentiableFunctionExtractInst: + case SILInstructionKind::LinearFunctionExtractInst: + case SILInstructionKind::OpenExistentialValueInst: + case SILInstructionKind::OpenExistentialBoxValueInst: + return true; + default: + return false; + } + } + + static bool classof(const SILInstruction *inst) { + return classof(inst->getKind()); + } +}; + +inline bool +FirstArgOwnershipForwardingSingleValueInst::classof(SILInstructionKind kind) { + if (OwnedFirstArgForwardingSingleValueInst::classof(kind)) + return true; + if (GuaranteedFirstArgForwardingSingleValueInst::classof(kind)) + return true; + + switch (kind) { + case SILInstructionKind::ObjectInst: + case SILInstructionKind::EnumInst: + case SILInstructionKind::UncheckedEnumDataInst: + case SILInstructionKind::SelectValueInst: + case SILInstructionKind::OpenExistentialRefInst: + case SILInstructionKind::InitExistentialRefInst: + case SILInstructionKind::MarkDependenceInst: + return true; + default: + return false; + } +} + +class AllArgOwnershipForwardingSingleValueInst + : public SingleValueInstruction, + public OwnershipForwardingInst { +protected: + AllArgOwnershipForwardingSingleValueInst(SILInstructionKind kind, + SILDebugLocation debugLoc, + SILType ty, + ValueOwnershipKind ownershipKind) + : SingleValueInstruction(kind, debugLoc, ty), + OwnershipForwardingInst(kind, ownershipKind) { + assert(classof(kind) && "classof missing new subclass?!"); + } + +public: + static bool classof(const SILNode *node) { + if (auto *i = dyn_cast(node)) + return classof(i); + return false; + } + + static bool classof(SILInstructionKind kind) { + switch (kind) { case SILInstructionKind::StructInst: - case SILInstructionKind::ObjectInst: case SILInstructionKind::TupleInst: - case SILInstructionKind::EnumInst: - case SILInstructionKind::UncheckedEnumDataInst: - case SILInstructionKind::SelectValueInst: - case SILInstructionKind::OpenExistentialRefInst: - case SILInstructionKind::InitExistentialRefInst: - case SILInstructionKind::MarkDependenceInst: case SILInstructionKind::LinearFunctionInst: case SILInstructionKind::DifferentiableFunctionInst: return true; @@ -3121,15 +3247,22 @@ class GetAsyncContinuationInstBase : public SingleValueInstruction { protected: - using SingleValueInstruction::SingleValueInstruction; - + CanType ResumeType; + bool Throws; + + GetAsyncContinuationInstBase(SILInstructionKind Kind, SILDebugLocation Loc, + SILType ContinuationType, CanType ResumeType, + bool Throws) + : SingleValueInstruction(Kind, Loc, ContinuationType), + ResumeType(ResumeType), Throws(Throws) {} + public: /// Get the type of the value the async task receives on a resume. - CanType getFormalResumeType() const; + CanType getFormalResumeType() const { return ResumeType; } SILType getLoweredResumeType() const; /// True if the continuation can be used to resume the task by throwing an error. - bool throws() const; + bool throws() const { return Throws; } static bool classof(const SILNode *I) { return I->getKind() >= SILNodeKind::First_GetAsyncContinuationInstBase && @@ -3145,8 +3278,9 @@ class GetAsyncContinuationInst final friend SILBuilder; GetAsyncContinuationInst(SILDebugLocation Loc, - SILType ContinuationTy) - : InstructionBase(Loc, ContinuationTy) + SILType ContinuationType, CanType ResumeType, + bool Throws) + : InstructionBase(Loc, ContinuationType, ResumeType, Throws) {} public: @@ -3166,9 +3300,10 @@ class GetAsyncContinuationAddrInst final { friend SILBuilder; GetAsyncContinuationAddrInst(SILDebugLocation Loc, - SILValue Operand, - SILType ContinuationTy) - : UnaryInstructionBase(Loc, Operand, ContinuationTy) + SILValue ResumeBuf, + SILType ContinuationType, CanType ResumeType, + bool Throws) + : UnaryInstructionBase(Loc, ResumeBuf, ContinuationType, ResumeType, Throws) {} }; @@ -4140,7 +4275,7 @@ class AssignByWrapperInst /// this instruction. This is only valid in Raw SIL. class MarkUninitializedInst : public UnaryInstructionBase { + OwnedFirstArgForwardingSingleValueInst> { friend SILBuilder; public: @@ -5052,7 +5187,7 @@ class UnconditionalCheckedCastValueInst final /// StructInst - Represents a constructed loadable struct. class StructInst final : public InstructionBaseWithTrailingOperands< SILInstructionKind::StructInst, StructInst, - OwnershipForwardingSingleValueInst> { + AllArgOwnershipForwardingSingleValueInst> { friend SILBuilder; /// Because of the storage requirements of StructInst, object @@ -5305,7 +5440,7 @@ class SetDeallocatingInst /// static initializer list. class ObjectInst final : public InstructionBaseWithTrailingOperands< SILInstructionKind::ObjectInst, ObjectInst, - OwnershipForwardingSingleValueInst> { + FirstArgOwnershipForwardingSingleValueInst> { friend SILBuilder; /// Because of the storage requirements of ObjectInst, object @@ -5352,7 +5487,7 @@ class ObjectInst final : public InstructionBaseWithTrailingOperands< /// TupleInst - Represents a constructed loadable tuple. class TupleInst final : public InstructionBaseWithTrailingOperands< SILInstructionKind::TupleInst, TupleInst, - OwnershipForwardingSingleValueInst> { + AllArgOwnershipForwardingSingleValueInst> { friend SILBuilder; /// Because of the storage requirements of TupleInst, object @@ -5427,8 +5562,9 @@ class TupleInst final : public InstructionBaseWithTrailingOperands< /// Represents a loadable enum constructed from one of its /// elements. -class EnumInst : public InstructionBase { +class EnumInst + : public InstructionBase { friend SILBuilder; Optional> OptionalOperand; @@ -5467,7 +5603,7 @@ class EnumInst : public InstructionBase { + FirstArgOwnershipForwardingSingleValueInst> { friend SILBuilder; EnumElementDecl *Element; @@ -5654,7 +5790,7 @@ class SelectEnumInstBase return std::make_pair(getEnumElementDeclStorage()[i], getAllOperands()[i+1].get()); } - + /// Return the value that will be used as the result for the specified enum /// case. SILValue getCaseResult(EnumElementDecl *D) { @@ -5795,7 +5931,7 @@ class SelectValueInst final : public InstructionBaseWithTrailingOperands< SILInstructionKind::SelectValueInst, SelectValueInst, SelectInstBase> { + FirstArgOwnershipForwardingSingleValueInst>> { friend SILBuilder; SelectValueInst(SILDebugLocation DebugLoc, SILValue Operand, SILType Type, @@ -5887,14 +6023,14 @@ class ExistentialMetatypeInst /// Extract a numbered element out of a value of tuple type. class TupleExtractInst - : public UnaryInstructionBase -{ + : public UnaryInstructionBase { friend SILBuilder; TupleExtractInst(SILDebugLocation DebugLoc, SILValue Operand, unsigned FieldNo, SILType ResultTy) - : UnaryInstructionBase(DebugLoc, Operand, ResultTy) { + : UnaryInstructionBase(DebugLoc, Operand, ResultTy, + Operand.getOwnershipKind()) { SILInstruction::Bits.TupleExtractInst.FieldNo = FieldNo; } @@ -5976,19 +6112,22 @@ VarDecl *getIndexedField(NominalTypeDecl *decl, unsigned index); /// because it would allow constant time lookup of either the VarDecl or the /// index from a single pointer without referring back to a projection /// instruction. -class FieldIndexCacheBase : public SingleValueInstruction { +template +class FieldIndexCacheBase : public ParentTy { enum : unsigned { InvalidFieldIndex = ~unsigned(0) }; VarDecl *field; public: + template FieldIndexCacheBase(SILInstructionKind kind, SILDebugLocation loc, - SILType type, VarDecl *field) - : SingleValueInstruction(kind, loc, type), field(field) { + SILType type, VarDecl *field, ArgTys &&... extraArgs) + : ParentTy(kind, loc, type, std::forward(extraArgs)...), + field(field) { SILInstruction::Bits.FieldIndexCacheBase.FieldIndex = InvalidFieldIndex; // This needs to be a concrete class to hold bitfield information. However, // it should only be extended by UnaryInstructions. - assert(getNumOperands() == 1); + assert(ParentTy::getNumOperands() == 1); } VarDecl *getField() const { return field; } @@ -6002,7 +6141,8 @@ class FieldIndexCacheBase : public SingleValueInstruction { } NominalTypeDecl *getParentDecl() const { - auto s = getOperand(0)->getType().getNominalOrBoundGenericNominal(); + auto s = + ParentTy::getOperand(0)->getType().getNominalOrBoundGenericNominal(); assert(s); return s; } @@ -6015,18 +6155,24 @@ class FieldIndexCacheBase : public SingleValueInstruction { } private: - unsigned cacheFieldIndex(); + unsigned cacheFieldIndex() { + unsigned index = swift::getFieldIndex(getParentDecl(), getField()); + SILInstruction::Bits.FieldIndexCacheBase.FieldIndex = index; + return index; + } }; /// Extract a physical, fragile field out of a value of struct type. class StructExtractInst - : public UnaryInstructionBase { + : public UnaryInstructionBase< + SILInstructionKind::StructExtractInst, + FieldIndexCacheBase> { friend SILBuilder; StructExtractInst(SILDebugLocation DebugLoc, SILValue Operand, VarDecl *Field, SILType ResultTy) - : UnaryInstructionBase(DebugLoc, Operand, ResultTy, Field) {} + : UnaryInstructionBase(DebugLoc, Operand, ResultTy, Field, + Operand.getOwnershipKind()) {} public: StructDecl *getStructDecl() const { @@ -6046,7 +6192,7 @@ class StructExtractInst /// Derive the address of a physical field from the address of a struct. class StructElementAddrInst : public UnaryInstructionBase { + FieldIndexCacheBase> { friend SILBuilder; StructElementAddrInst(SILDebugLocation DebugLoc, SILValue Operand, @@ -6063,7 +6209,7 @@ class StructElementAddrInst /// type instance. class RefElementAddrInst : public UnaryInstructionBase { + FieldIndexCacheBase> { friend SILBuilder; RefElementAddrInst(SILDebugLocation DebugLoc, SILValue Operand, @@ -6282,11 +6428,11 @@ class OpenExistentialAddrInst /// captures the (dynamic) conformances. class OpenExistentialValueInst : public UnaryInstructionBase { + GuaranteedFirstArgForwardingSingleValueInst> { friend SILBuilder; - OpenExistentialValueInst(SILDebugLocation DebugLoc, SILValue Operand, - SILType SelfTy); + OpenExistentialValueInst(SILDebugLocation debugLoc, SILValue operand, + SILType selfTy); }; /// Given a class existential, "opens" the @@ -6294,7 +6440,7 @@ class OpenExistentialValueInst /// captures the (dynamic) conformances. class OpenExistentialRefInst : public UnaryInstructionBase { + FirstArgOwnershipForwardingSingleValueInst> { friend SILBuilder; OpenExistentialRefInst(SILDebugLocation DebugLoc, SILValue Operand, @@ -6331,9 +6477,9 @@ class OpenExistentialBoxInst /// Given a boxed existential container, "opens" the existential by returning a /// fresh archetype T, which also captures the (dynamic) conformances. class OpenExistentialBoxValueInst - : public UnaryInstructionBase -{ + : public UnaryInstructionBase< + SILInstructionKind::OpenExistentialBoxValueInst, + GuaranteedFirstArgForwardingSingleValueInst> { friend SILBuilder; OpenExistentialBoxValueInst(SILDebugLocation DebugLoc, SILValue operand, @@ -6425,7 +6571,7 @@ class InitExistentialValueInst final class InitExistentialRefInst final : public UnaryInstructionWithTypeDependentOperandsBase< SILInstructionKind::InitExistentialRefInst, InitExistentialRefInst, - OwnershipForwardingSingleValueInst> { + FirstArgOwnershipForwardingSingleValueInst> { friend SILBuilder; CanType ConcreteType; @@ -6729,7 +6875,7 @@ class UncheckedOwnershipConversionInst /// "value"'. class MarkDependenceInst : public InstructionBase { + FirstArgOwnershipForwardingSingleValueInst> { friend SILBuilder; FixedOperandList<2> Operands; @@ -8603,7 +8749,8 @@ class TryApplyInst final class DifferentiableFunctionInst final : public InstructionBaseWithTrailingOperands< SILInstructionKind::DifferentiableFunctionInst, - DifferentiableFunctionInst, OwnershipForwardingSingleValueInst> { + DifferentiableFunctionInst, + AllArgOwnershipForwardingSingleValueInst> { private: friend SILBuilder; /// Differentiability parameter indices. @@ -8686,10 +8833,10 @@ class DifferentiableFunctionInst final /// LinearFunctionInst - given a function, its derivative and traspose functions, /// create an `@differentiable(linear)` function that represents a bundle of these. -class LinearFunctionInst final : - public InstructionBaseWithTrailingOperands< - SILInstructionKind::LinearFunctionInst, - LinearFunctionInst, OwnershipForwardingSingleValueInst> { +class LinearFunctionInst final + : public InstructionBaseWithTrailingOperands< + SILInstructionKind::LinearFunctionInst, LinearFunctionInst, + AllArgOwnershipForwardingSingleValueInst> { private: friend SILBuilder; /// Parameters to differentiate with respect to. @@ -8728,7 +8875,7 @@ class LinearFunctionInst final : class DifferentiableFunctionExtractInst : public UnaryInstructionBase< SILInstructionKind::DifferentiableFunctionExtractInst, - SingleValueInstruction> { + GuaranteedFirstArgForwardingSingleValueInst> { private: /// The extractee. NormalDifferentiableFunctionTypeComponent Extractee; @@ -8765,7 +8912,7 @@ class DifferentiableFunctionExtractInst /// extract the specified function. class LinearFunctionExtractInst : public UnaryInstructionBase { + GuaranteedFirstArgForwardingSingleValueInst> { private: /// The extractee. LinearDifferentiableFunctionTypeComponent extractee; @@ -9083,7 +9230,8 @@ inline bool Operand::isTypeDependent() const { } inline bool OwnershipForwardingInst::classof(const SILInstruction *inst) { - return OwnershipForwardingSingleValueInst::classof(inst) || + return FirstArgOwnershipForwardingSingleValueInst::classof(inst) || + AllArgOwnershipForwardingSingleValueInst::classof(inst) || OwnershipForwardingTermInst::classof(inst) || OwnershipForwardingConversionInst::classof(inst) || OwnershipForwardingSelectEnumInstBase::classof(inst) || @@ -9091,7 +9239,8 @@ inline bool OwnershipForwardingInst::classof(const SILInstruction *inst) { } inline bool OwnershipForwardingInst::classof(SILInstructionKind kind) { - return OwnershipForwardingSingleValueInst::classof(kind) || + return FirstArgOwnershipForwardingSingleValueInst::classof(kind) || + AllArgOwnershipForwardingSingleValueInst::classof(kind) || OwnershipForwardingTermInst::classof(kind) || OwnershipForwardingConversionInst::classof(kind) || OwnershipForwardingSelectEnumInstBase::classof(kind) || diff --git a/include/swift/SIL/SILNode.h b/include/swift/SIL/SILNode.h index e6cce751c6ce3..5cab8b05686dc 100644 --- a/include/swift/SIL/SILNode.h +++ b/include/swift/SIL/SILNode.h @@ -317,9 +317,11 @@ class alignas(8) SILNode { KeepUnique : 1 ); - SWIFT_INLINE_BITFIELD_FULL(FieldIndexCacheBase, SingleValueInstruction, 32, - : NumPadBits, - FieldIndex : 32); + SWIFT_INLINE_BITFIELD_FULL_TEMPLATE(FieldIndexCacheBase, + SingleValueInstruction, 32, + : NumPadBits, + FieldIndex : 32 + ); SWIFT_INLINE_BITFIELD_EMPTY(MethodInst, SingleValueInstruction); // Ensure that WitnessMethodInst bitfield does not overflow. diff --git a/include/swift/SIL/TypeSubstCloner.h b/include/swift/SIL/TypeSubstCloner.h index 06ec63403593a..75479af62b689 100644 --- a/include/swift/SIL/TypeSubstCloner.h +++ b/include/swift/SIL/TypeSubstCloner.h @@ -428,9 +428,10 @@ class TypeSubstCloner : public SILClonerWithScopes { return ParentFunction; // Clone the function with the substituted type for the debug info. - Mangle::GenericSpecializationMangler Mangler( - ParentFunction, SubsMap, IsNotSerialized, false, ForInlining); - std::string MangledName = Mangler.mangle(RemappedSig); + Mangle::GenericSpecializationMangler Mangler(ParentFunction, + IsNotSerialized); + std::string MangledName = + Mangler.mangleForDebugInfo(RemappedSig, SubsMap, ForInlining); if (ParentFunction->getName() == MangledName) return ParentFunction; diff --git a/include/swift/SILOptimizer/Analysis/AliasAnalysis.h b/include/swift/SILOptimizer/Analysis/AliasAnalysis.h index a1794a3f01ca5..af78f997c30c4 100644 --- a/include/swift/SILOptimizer/Analysis/AliasAnalysis.h +++ b/include/swift/SILOptimizer/Analysis/AliasAnalysis.h @@ -164,7 +164,16 @@ class AliasAnalysis : public SILAnalysis { } virtual void initialize(SILPassManager *PM) override; - + + /// Explicitly invalidate an instruction. + /// + /// This can be useful to update the alias analysis within a pass. + /// It's needed if e.g. \p inst is an address projection and its operand gets + /// replaced with a different underlying object. + void invalidateInstruction(SILInstruction *inst) { + handleDeleteNotification(inst); + } + /// Perform an alias query to see if V1, V2 refer to the same values. AliasResult alias(SILValue V1, SILValue V2, SILType TBAAType1 = SILType(), SILType TBAAType2 = SILType()); diff --git a/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h b/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h index 3997a118aae15..fa7aeede472fe 100644 --- a/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h +++ b/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h @@ -30,6 +30,8 @@ class SILInstruction; /// analysis of the operands of the instruction, without looking at its uses /// (e.g. constant folding). If a simpler result can be found, it is /// returned, otherwise a null SILValue is returned. +/// +/// This is assumed to implement read-none transformations. SILValue simplifyInstruction(SILInstruction *I); /// Replace an instruction with a simplified result and erase it. If the @@ -38,9 +40,17 @@ SILValue simplifyInstruction(SILInstruction *I); /// /// If it is nonnull, eraseNotify will be called before each instruction is /// deleted. +/// +/// If it is nonnull and inst is in OSSA, newInstNotify will be called with each +/// new instruction inserted to compensate for ownership. +/// +/// NOTE: When OSSA is enabled this API assumes OSSA is properly formed and will +/// insert compensating instructions. SILBasicBlock::iterator replaceAllSimplifiedUsesAndErase( SILInstruction *I, SILValue result, - std::function eraseNotify = nullptr); + std::function eraseNotify = nullptr, + std::function newInstNotify = nullptr, + DeadEndBlocks *deadEndBlocks = nullptr); /// Simplify invocations of builtin operations that may overflow. /// All such operations return a tuple (result, overflow_flag). diff --git a/include/swift/SILOptimizer/Differentiation/Common.h b/include/swift/SILOptimizer/Differentiation/Common.h index 90e3c26690928..250f836b0fed3 100644 --- a/include/swift/SILOptimizer/Differentiation/Common.h +++ b/include/swift/SILOptimizer/Differentiation/Common.h @@ -22,6 +22,7 @@ #include "swift/AST/SemanticAttrs.h" #include "swift/SIL/SILDifferentiabilityWitness.h" #include "swift/SIL/SILFunction.h" +#include "swift/SIL/Projection.h" #include "swift/SIL/SILModule.h" #include "swift/SIL/TypeSubstCloner.h" #include "swift/SILOptimizer/Analysis/ArraySemantic.h" @@ -166,8 +167,11 @@ VarDecl *getTangentStoredProperty(ADContext &context, VarDecl *originalField, /// Returns the tangent stored property of the original stored property /// referenced by the given projection instruction with the given base type. /// On error, emits diagnostic and returns nullptr. +/// +/// NOTE: Asserts if \p projectionInst is not one of: struct_extract, +/// struct_element_addr, or ref_element_addr. VarDecl *getTangentStoredProperty(ADContext &context, - FieldIndexCacheBase *projectionInst, + SingleValueInstruction *projectionInst, CanType baseType, DifferentiationInvoker invoker); @@ -192,6 +196,16 @@ void extractAllElements(SILValue value, SILBuilder &builder, void emitZeroIntoBuffer(SILBuilder &builder, CanType type, SILValue bufferAccess, SILLocation loc); +/// Emit a `Builtin.Word` value that represents the given type's memory layout +/// size. +SILValue emitMemoryLayoutSize( + SILBuilder &builder, SILLocation loc, CanType type); + +/// Emit a projection of the top-level subcontext from the context object. +SILValue emitProjectTopLevelSubcontext( + SILBuilder &builder, SILLocation loc, SILValue context, + SILType subcontextType); + //===----------------------------------------------------------------------===// // Utilities for looking up derivatives of functions //===----------------------------------------------------------------------===// diff --git a/include/swift/SILOptimizer/Differentiation/JVPCloner.h b/include/swift/SILOptimizer/Differentiation/JVPCloner.h index 5a07003bc4f33..79c84f7026a87 100644 --- a/include/swift/SILOptimizer/Differentiation/JVPCloner.h +++ b/include/swift/SILOptimizer/Differentiation/JVPCloner.h @@ -47,6 +47,8 @@ class JVPCloner final { /// Performs JVP generation on the empty JVP function. Returns true if any /// error occurs. bool run(); + + SILFunction &getJVP() const; }; } // end namespace autodiff diff --git a/include/swift/SILOptimizer/Differentiation/LinearMapInfo.h b/include/swift/SILOptimizer/Differentiation/LinearMapInfo.h index 4a643d304597f..5a392313a5cf1 100644 --- a/include/swift/SILOptimizer/Differentiation/LinearMapInfo.h +++ b/include/swift/SILOptimizer/Differentiation/LinearMapInfo.h @@ -63,6 +63,9 @@ class LinearMapInfo { /// Activity info of the original function. const DifferentiableActivityInfo &activityInfo; + /// The original function's loop info. + SILLoopInfo *loopInfo; + /// Differentiation indices of the function. const SILAutoDiffIndices indices; @@ -86,6 +89,9 @@ class LinearMapInfo { /// Mapping from linear map structs to their branching trace enum fields. llvm::DenseMap linearMapStructEnumFields; + /// Blocks in a loop. + llvm::SmallSetVector blocksInLoop; + /// A synthesized file unit. SynthesizedFileUnit &synthesizedFile; @@ -144,7 +150,8 @@ class LinearMapInfo { explicit LinearMapInfo(ADContext &context, AutoDiffLinearMapKind kind, SILFunction *original, SILFunction *derivative, SILAutoDiffIndices indices, - const DifferentiableActivityInfo &activityInfo); + const DifferentiableActivityInfo &activityInfo, + SILLoopInfo *loopInfo); /// Returns the linear map struct associated with the given original block. StructDecl *getLinearMapStruct(SILBasicBlock *origBB) const { @@ -200,20 +207,28 @@ class LinearMapInfo { /// Returns the branching trace enum field for the linear map struct of the /// given original block. - VarDecl *lookUpLinearMapStructEnumField(SILBasicBlock *origBB) { + VarDecl *lookUpLinearMapStructEnumField(SILBasicBlock *origBB) const { auto *linearMapStruct = getLinearMapStruct(origBB); return linearMapStructEnumFields.lookup(linearMapStruct); } /// Finds the linear map declaration in the pullback struct for the given /// `apply` instruction in the original function. - VarDecl *lookUpLinearMapDecl(ApplyInst *ai) { + VarDecl *lookUpLinearMapDecl(ApplyInst *ai) const { assert(ai->getFunction() == original); auto lookup = linearMapFieldMap.find(ai); assert(lookup != linearMapFieldMap.end() && "No linear map field corresponding to the given `apply`"); return lookup->getSecond(); } + + bool hasLoops() const { + return !blocksInLoop.empty(); + } + + ArrayRef getBlocksInLoop() const { + return blocksInLoop.getArrayRef(); + } }; } // end namespace autodiff diff --git a/include/swift/SILOptimizer/Differentiation/VJPCloner.h b/include/swift/SILOptimizer/Differentiation/VJPCloner.h index ff8ce874021e6..d0b722e5fd29e 100644 --- a/include/swift/SILOptimizer/Differentiation/VJPCloner.h +++ b/include/swift/SILOptimizer/Differentiation/VJPCloner.h @@ -21,6 +21,7 @@ #include "swift/SILOptimizer/Analysis/DifferentiableActivityAnalysis.h" #include "swift/SILOptimizer/Differentiation/DifferentiationInvoker.h" #include "swift/SILOptimizer/Differentiation/LinearMapInfo.h" +#include "swift/SIL/LoopInfo.h" namespace swift { namespace autodiff { @@ -52,6 +53,7 @@ class VJPCloner final { const SILAutoDiffIndices getIndices() const; DifferentiationInvoker getInvoker() const; LinearMapInfo &getPullbackInfo() const; + SILLoopInfo *getLoopInfo() const; const DifferentiableActivityInfo &getActivityInfo() const; /// Performs VJP generation on the empty VJP function. Returns true if any diff --git a/include/swift/SILOptimizer/Utils/CFGOptUtils.h b/include/swift/SILOptimizer/Utils/CFGOptUtils.h index a77c48b3be292..8ee1b7a743fc1 100644 --- a/include/swift/SILOptimizer/Utils/CFGOptUtils.h +++ b/include/swift/SILOptimizer/Utils/CFGOptUtils.h @@ -25,6 +25,7 @@ #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILInstruction.h" +#include "swift/SILOptimizer/Utils/InstOptUtils.h" namespace llvm { template class TinyPtrVector; @@ -35,9 +36,10 @@ namespace swift { class DominanceInfo; class SILLoop; class SILLoopInfo; +struct InstModCallbacks; /// Adds a new argument to an edge between a branch and a destination -/// block. +/// block. Allows for user injected callbacks via \p callbacks. /// /// \param branch The terminator to add the argument to. /// \param dest The destination block of the edge. @@ -45,7 +47,21 @@ class SILLoopInfo; /// \return The created branch. The old branch is deleted. /// The argument is appended at the end of the argument tuple. TermInst *addNewEdgeValueToBranch(TermInst *branch, SILBasicBlock *dest, - SILValue val); + SILValue val, + const InstModCallbacks &callbacks); + +/// Adds a new argument to an edge between a branch and a destination +/// block. +/// +/// \param branch The terminator to add the argument to. +/// \param dest The destination block of the edge. +/// \param val The value to the arguments of the branch. +/// \return The created branch. The old branch is deleted. +/// The argument is appended at the end of the argument tuple. +inline TermInst *addNewEdgeValueToBranch(TermInst *branch, SILBasicBlock *dest, + SILValue val) { + return addNewEdgeValueToBranch(branch, dest, val, InstModCallbacks()); +} /// Changes the edge value between a branch and destination basic block /// at the specified index. Changes all edges from \p Branch to \p Dest to carry diff --git a/include/swift/SILOptimizer/Utils/CanonicalizeInstruction.h b/include/swift/SILOptimizer/Utils/CanonicalizeInstruction.h index a82b117970a23..0cd63b10e5f51 100644 --- a/include/swift/SILOptimizer/Utils/CanonicalizeInstruction.h +++ b/include/swift/SILOptimizer/Utils/CanonicalizeInstruction.h @@ -26,6 +26,7 @@ #ifndef SWIFT_SILOPTIMIZER_UTILS_CANONICALIZEINSTRUCTION_H #define SWIFT_SILOPTIMIZER_UTILS_CANONICALIZEINSTRUCTION_H +#include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/SILBasicBlock.h" #include "swift/SIL/SILInstruction.h" #include "llvm/Support/Debug.h" @@ -38,8 +39,11 @@ struct CanonicalizeInstruction { // May be overriden by passes. static constexpr const char *defaultDebugType = "sil-canonicalize"; const char *debugType = defaultDebugType; + DeadEndBlocks &deadEndBlocks; - CanonicalizeInstruction(const char *passDebugType) { + CanonicalizeInstruction(const char *passDebugType, + DeadEndBlocks &deadEndBlocks) + : deadEndBlocks(deadEndBlocks) { #ifndef NDEBUG if (llvm::DebugFlag && !llvm::isCurrentDebugType(debugType)) debugType = passDebugType; @@ -48,6 +52,8 @@ struct CanonicalizeInstruction { virtual ~CanonicalizeInstruction(); + const SILFunction *getFunction() const { return deadEndBlocks.getFunction(); } + /// Rewrite this instruction, based on its operands and uses, into a more /// canonical representation. /// diff --git a/include/swift/SILOptimizer/Utils/Generics.h b/include/swift/SILOptimizer/Utils/Generics.h index 17cb6656ccf27..0589330ba51e7 100644 --- a/include/swift/SILOptimizer/Utils/Generics.h +++ b/include/swift/SILOptimizer/Utils/Generics.h @@ -61,6 +61,19 @@ class ReabstractionInfo { /// argument has a trivial type. SmallBitVector TrivialArgs; + /// Set to true if the function has a re-abstracted (= converted from + /// indirect to direct) resilient argument or return type. This can happen if + /// the function is compiled within the type's resilience domain, i.e. in + /// its module (where the type is loadable). + /// In this case we need to generate a different mangled name for the + /// function to distinguish it from functions in other modules, which cannot + /// re-abstract this resilient type. + /// Fortunately, a flag is sufficient to describe this: either a function has + /// re-abstracted resilient types or not. It cannot happen that two + /// functions have two different subsets of re-abstracted resilient parameter + /// types. + bool hasConvertedResilientParams = false; + /// If set, indirect to direct conversions should be performed by the generic /// specializer. bool ConvertIndirectToDirect; @@ -128,6 +141,12 @@ class ReabstractionInfo { // Is the generated specialization going to be serialized? IsSerialized_t Serialized; + enum TypeCategory { + NotLoadable, + Loadable, + LoadableAndTrivial + }; + unsigned param2ArgIndex(unsigned ParamIdx) const { return ParamIdx + NumFormalIndirectResults; } @@ -138,6 +157,15 @@ class ReabstractionInfo { bool HasUnboundGenericParams); void createSubstitutedAndSpecializedTypes(); + + TypeCategory getReturnTypeCategory(const SILResultInfo &RI, + const SILFunctionConventions &substConv, + TypeExpansionContext typeExpansion); + + TypeCategory getParamTypeCategory(const SILParameterInfo &PI, + const SILFunctionConventions &substConv, + TypeExpansionContext typeExpansion); + bool prepareAndCheck(ApplySite Apply, SILFunction *Callee, SubstitutionMap ParamSubs, OptRemark::Emitter *ORE = nullptr); @@ -175,6 +203,12 @@ class ReabstractionInfo { IsSerialized_t isSerialized() const { return Serialized; } + + /// Returns true if the specialized function needs an alternative mangling. + /// See hasConvertedResilientParams. + bool needAlternativeMangling() const { + return hasConvertedResilientParams; + } TypeExpansionContext getResilienceExpansion() const { auto resilience = (Serialized ? ResilienceExpansion::Minimal diff --git a/include/swift/SILOptimizer/Utils/InstOptUtils.h b/include/swift/SILOptimizer/Utils/InstOptUtils.h index fcc723adbb9ab..a6138c753a242 100644 --- a/include/swift/SILOptimizer/Utils/InstOptUtils.h +++ b/include/swift/SILOptimizer/Utils/InstOptUtils.h @@ -35,13 +35,13 @@ namespace swift { class DominanceInfo; template class NullablePtr; -/// Transform a Use Range (Operand*) into a User Range (SILInstruction*) +/// Transform a Use Range (Operand*) into a User Range (SILInstruction *) using UserTransform = std::function; using ValueBaseUserRange = TransformRange, UserTransform>; -inline ValueBaseUserRange -makeUserRange(iterator_range range) { +template +inline TransformRange makeUserRange(Range range) { auto toUser = [](Operand *operand) { return operand->getUser(); }; return makeTransformRange(range, UserTransform(toUser)); } diff --git a/include/swift/SILOptimizer/Utils/OwnershipOptUtils.h b/include/swift/SILOptimizer/Utils/OwnershipOptUtils.h new file mode 100644 index 0000000000000..b7ee89810344b --- /dev/null +++ b/include/swift/SILOptimizer/Utils/OwnershipOptUtils.h @@ -0,0 +1,51 @@ +//===--- OwnershipOptUtils.h ----------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// +/// Ownership Utilities that rely on SILOptimizer functionality. +/// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_SILOPTIMIZER_UTILS_OWNERSHIPOPTUTILS_H +#define SWIFT_SILOPTIMIZER_UTILS_OWNERSHIPOPTUTILS_H + +#include "swift/SIL/OwnershipUtils.h" +#include "swift/SIL/SILModule.h" + +namespace swift { + +// Defined in BasicBlockUtils.h +struct JointPostDominanceSetComputer; + +struct OwnershipFixupContext { + std::function eraseNotify; + std::function newInstNotify; + DeadEndBlocks &deBlocks; + JointPostDominanceSetComputer &jointPostDomSetComputer; + + SILBasicBlock::iterator + replaceAllUsesAndEraseFixingOwnership(SingleValueInstruction *oldValue, + SILValue newValue); + + /// We can not RAUW all old values with new values. + /// + /// Namely, we do not support RAUWing values with ValueOwnershipKind::None + /// that have uses that do not require ValueOwnershipKind::None or + /// ValueOwnershipKind::Any. + static bool canFixUpOwnershipForRAUW(SingleValueInstruction *oldValue, + SILValue newValue); +}; + +} // namespace swift + +#endif diff --git a/include/swift/Sema/CSFix.h b/include/swift/Sema/CSFix.h index 7f070c85deee9..a182b25e4e24c 100644 --- a/include/swift/Sema/CSFix.h +++ b/include/swift/Sema/CSFix.h @@ -46,6 +46,7 @@ class ConstraintLocator; class ConstraintLocatorBuilder; enum class ConversionRestrictionKind; class Solution; +struct MemberLookupResult; /// Describes the kind of fix to apply to the given constraint before /// visiting it. @@ -289,6 +290,10 @@ enum class FixKind : uint8_t { /// Treat empty and single-element array literals as if they were incomplete /// dictionary literals when used as such. TreatArrayLiteralAsDictionary, + + /// Explicitly specify the type to disambiguate between possible member base + /// types. + SpecifyBaseTypeForOptionalUnresolvedMember, }; class ConstraintFix { @@ -630,6 +635,26 @@ class DropThrowsAttribute final : public ContextualMismatch { ConstraintLocator *locator); }; +/// This is a contextual mismatch between async and non-async +/// function types, repair it by dropping `async` attribute. +class DropAsyncAttribute final : public ContextualMismatch { + DropAsyncAttribute(ConstraintSystem &cs, FunctionType *fromType, + FunctionType *toType, ConstraintLocator *locator) + : ContextualMismatch(cs, fromType, toType, locator) { + assert(fromType->isAsync() != toType->isAsync()); + } + +public: + std::string getName() const override { return "drop 'async' attribute"; } + + bool diagnose(const Solution &solution, bool asNote = false) const override; + + static DropAsyncAttribute *create(ConstraintSystem &cs, + FunctionType *fromType, + FunctionType *toType, + ConstraintLocator *locator); +}; + /// Append 'as! T' to force a downcast to the specified type. class ForceDowncast final : public ContextualMismatch { ForceDowncast(ConstraintSystem &cs, Type fromType, Type toType, @@ -1728,20 +1753,25 @@ class UseRawValue final : public ConstraintFix { Type expectedType, ConstraintLocator *locator); }; -/// Replace a coercion ('as') with a forced checked cast ('as!'). +/// Replace a coercion ('as') with runtime checked cast ('as!' or 'as?'). class CoerceToCheckedCast final : public ContextualMismatch { CoerceToCheckedCast(ConstraintSystem &cs, Type fromType, Type toType, - ConstraintLocator *locator) + bool useConditionalCast, ConstraintLocator *locator) : ContextualMismatch(cs, FixKind::CoerceToCheckedCast, fromType, toType, - locator) {} + locator), + UseConditionalCast(useConditionalCast) {} + bool UseConditionalCast = false; public: - std::string getName() const override { return "as to as!"; } + std::string getName() const override { + return UseConditionalCast ? "as to as?" : "as to as!"; + } bool diagnose(const Solution &solution, bool asNote = false) const override; static CoerceToCheckedCast *attempt(ConstraintSystem &cs, Type fromType, - Type toType, ConstraintLocator *locator); + Type toType, bool useConditionalCast, + ConstraintLocator *locator); }; class RemoveInvalidCall final : public ConstraintFix { @@ -2023,7 +2053,8 @@ class AllowKeyPathWithoutComponents final : public ConstraintFix { ConstraintLocator *locator); }; -class IgnoreInvalidResultBuilderBody final : public ConstraintFix { +class IgnoreInvalidResultBuilderBody : public ConstraintFix { +protected: enum class ErrorInPhase { PreCheck, ConstraintGeneration, @@ -2062,6 +2093,22 @@ class IgnoreInvalidResultBuilderBody final : public ConstraintFix { create(ConstraintSystem &cs, ErrorInPhase phase, ConstraintLocator *locator); }; +class IgnoreResultBuilderWithReturnStmts final + : public IgnoreInvalidResultBuilderBody { + Type BuilderType; + + IgnoreResultBuilderWithReturnStmts(ConstraintSystem &cs, Type builderTy, + ConstraintLocator *locator) + : IgnoreInvalidResultBuilderBody(cs, ErrorInPhase::PreCheck, locator), + BuilderType(builderTy) {} + +public: + bool diagnose(const Solution &solution, bool asNote = false) const override; + + static IgnoreResultBuilderWithReturnStmts * + create(ConstraintSystem &cs, Type builderTy, ConstraintLocator *locator); +}; + class SpecifyContextualTypeForNil final : public ConstraintFix { SpecifyContextualTypeForNil(ConstraintSystem &cs, ConstraintLocator *locator) @@ -2101,6 +2148,34 @@ class AllowRefToInvalidDecl final : public ConstraintFix { ConstraintLocator *locator); }; +/// Diagnose if the base type is optional, we're referring to a nominal +/// type member via the dot syntax and the member name matches +/// Optional.{member} or a .none member inferred as non-optional static +/// member e.g. let _ : Foo? = .none where Foo has a static member none. +class SpecifyBaseTypeForOptionalUnresolvedMember final : public ConstraintFix { + SpecifyBaseTypeForOptionalUnresolvedMember(ConstraintSystem &cs, + DeclNameRef memberName, + ConstraintLocator *locator) + : ConstraintFix(cs, FixKind::SpecifyBaseTypeForOptionalUnresolvedMember, + locator, /*isWarning=*/true), + MemberName(memberName) {} + DeclNameRef MemberName; + +public: + std::string getName() const override { + const auto name = MemberName.getBaseName(); + return "specify unresolved member optional base type explicitly '" + + name.userFacingName().str() + "'"; + } + + bool diagnose(const Solution &solution, bool asNote = false) const override; + + static SpecifyBaseTypeForOptionalUnresolvedMember * + attempt(ConstraintSystem &cs, ConstraintKind kind, Type baseTy, + DeclNameRef memberName, FunctionRefKind functionRefKind, + MemberLookupResult result, ConstraintLocator *locator); +}; + } // end namespace constraints } // end namespace swift diff --git a/include/swift/Sema/ConstraintSystem.h b/include/swift/Sema/ConstraintSystem.h index 1a9b32a0ed536..c787640c1146e 100644 --- a/include/swift/Sema/ConstraintSystem.h +++ b/include/swift/Sema/ConstraintSystem.h @@ -785,6 +785,8 @@ enum ScoreKind { SK_ForwardTrailingClosure, /// A use of a disfavored overload. SK_DisfavoredOverload, + /// A member for an \c UnresolvedMemberExpr found via unwrapped optional base. + SK_UnresolvedMemberViaOptional, /// An implicit force of an implicitly unwrapped optional value. SK_ForceUnchecked, /// A user-defined conversion. @@ -4706,6 +4708,11 @@ class ConstraintSystem { return {type, kind, BindingSource}; } + /// Determine whether this binding could be a viable candidate + /// to be "joined" with some other binding. It has to be at least + /// a non-default r-value supertype binding with no type variables. + bool isViableForJoin() const; + static PotentialBinding forHole(TypeVariableType *typeVar, ConstraintLocator *locator) { return {HoleType::get(typeVar->getASTContext(), typeVar), @@ -4733,21 +4740,22 @@ class ConstraintSystem { /// The set of constraints which would be used to infer default types. llvm::TinyPtrVector Defaults; - /// Whether these bindings should be delayed until the rest of the - /// constraint system is considered "fully bound". - bool FullyBound = false; + /// The set of constraints which delay attempting this type variable. + llvm::TinyPtrVector DelayedBy; - /// Whether the bindings of this type involve other type variables. - bool InvolvesTypeVariables = false; + /// The set of type variables adjacent to the current one. + /// + /// Type variables contained here are either related through the + /// bindings (contained in the binding type e.g. `Foo<$T0>`), or + /// reachable through subtype/conversion relationship e.g. + /// `$T0 subtype of $T1` or `$T0 arg conversion $T1`. + llvm::SmallPtrSet AdjacentVars; ASTNode AssociatedCodeCompletionToken = ASTNode(); /// Whether this type variable has literal bindings. LiteralBindingKind LiteralBinding = LiteralBindingKind::None; - /// Tracks the position of the last known supertype in the group. - Optional lastSupertypeIndex; - /// A set of all not-yet-resolved type variables this type variable /// is a subtype of, supertype of or is equivalent to. This is used /// to determine ordering inside of a chain of subtypes to help infer @@ -4761,6 +4769,22 @@ class ConstraintSystem { /// Determine whether the set of bindings is non-empty. explicit operator bool() const { return !Bindings.empty(); } + /// Determine whether attempting this type variable should be + /// delayed until the rest of the constraint system is considered + /// "fully bound" meaning constraints, which affect completeness + /// of the binding set, for this type variable such as - member + /// constraint, disjunction, function application etc. - are simplified. + /// + /// Note that in some situations i.e. when there are no more + /// disjunctions or type variables left to attempt, it's still + /// okay to attempt "delayed" type variable to make forward progress. + bool isDelayed() const; + + /// Whether the bindings of this type involve other type variables, + /// or the type variable itself is adjacent to other type variables + /// that could become valid bindings in the future. + bool involvesTypeVariables() const; + /// Whether the bindings represent (potentially) incomplete set, /// there is no way to say with absolute certainty if that's the /// case, but that could happen when certain constraints like @@ -4803,9 +4827,9 @@ class ConstraintSystem { return std::make_tuple(b.isHole(), !hasNoDefaultableBindings, - b.FullyBound, + b.isDelayed(), b.isSubtypeOfExistentialType(), - b.InvolvesTypeVariables, + b.involvesTypeVariables(), static_cast(b.LiteralBinding), -(b.Bindings.size() - numDefaults)); } @@ -4950,13 +4974,13 @@ class ConstraintSystem { out.indent(indent); if (isPotentiallyIncomplete()) out << "potentially_incomplete "; - if (FullyBound) - out << "fully_bound "; + if (isDelayed()) + out << "delayed "; if (isSubtypeOfExistentialType()) out << "subtype_of_existential "; if (LiteralBinding != LiteralBindingKind::None) out << "literal=" << static_cast(LiteralBinding) << " "; - if (InvolvesTypeVariables) + if (involvesTypeVariables()) out << "involves_type_vars "; auto numDefaultable = getNumDefaultableBindings(); diff --git a/include/swift/Strings.h b/include/swift/Strings.h index 4fa1ca1e20231..a869c425f3109 100644 --- a/include/swift/Strings.h +++ b/include/swift/Strings.h @@ -110,9 +110,15 @@ constexpr static BuiltinNameStringLiteral BUILTIN_TYPE_NAME_BRIDGEOBJECT = { /// The name of the Builtin type for RawPointer constexpr static BuiltinNameStringLiteral BUILTIN_TYPE_NAME_RAWPOINTER = { "Builtin.RawPointer"}; +/// The name of the Builtin type for RawUnsafeContinuation +constexpr static BuiltinNameStringLiteral BUILTIN_TYPE_NAME_RAWUNSAFECONTINUATION = { + "Builtin.RawUnsafeContinuation"}; /// The name of the Builtin type for UnsafeValueBuffer constexpr static BuiltinNameStringLiteral BUILTIN_TYPE_NAME_UNSAFEVALUEBUFFER = {"Builtin.UnsafeValueBuffer"}; +/// The name of the Builtin type for Job +constexpr static BuiltinNameStringLiteral BUILTIN_TYPE_NAME_JOB = { + "Builtin.Job"}; /// The name of the Builtin type for UnknownObject /// /// This no longer exists as an AST-accessible type, but it's still used for diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index 7eb3d255e3a6b..d4126807c62d3 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -227,6 +227,12 @@ struct ASTContext::Implementation { /// The declaration of Swift.AutoreleasingUnsafeMutablePointer.memory. VarDecl *AutoreleasingUnsafeMutablePointerMemoryDecl = nullptr; + /// The declaration of _Concurrency.DefaultActor. + ClassDecl *DefaultActorDecl = nullptr; + + /// The declaration of _Concurrency.NSObjectDefaultActor. + ClassDecl *NSObjectDefaultActorDecl = nullptr; + // Declare cached declarations for each of the known declarations. #define FUNC_DECL(Name, Id) FuncDecl *Get##Name = nullptr; #include "swift/AST/KnownDecls.def" @@ -589,18 +595,10 @@ ASTContext::ASTContext(LangOptions &langOpts, TypeCheckerOptions &typeckOpts, TheEmptyTupleType(TupleType::get(ArrayRef(), *this)), TheAnyType(ProtocolCompositionType::get(*this, ArrayRef(), /*HasExplicitAnyObject=*/false)), - TheNativeObjectType(new (*this, AllocationArena::Permanent) - BuiltinNativeObjectType(*this)), - TheBridgeObjectType(new (*this, AllocationArena::Permanent) - BuiltinBridgeObjectType(*this)), - TheRawPointerType(new (*this, AllocationArena::Permanent) - BuiltinRawPointerType(*this)), - TheUnsafeValueBufferType(new (*this, AllocationArena::Permanent) - BuiltinUnsafeValueBufferType(*this)), - TheSILTokenType(new (*this, AllocationArena::Permanent) - SILTokenType(*this)), - TheIntegerLiteralType(new (*this, AllocationArena::Permanent) - BuiltinIntegerLiteralType(*this)), +#define SINGLETON_TYPE(SHORT_ID, ID) \ + The##SHORT_ID##Type(new (*this, AllocationArena::Permanent) \ + ID##Type(*this)), +#include "swift/AST/TypeNodes.def" TheIEEE32Type(new (*this, AllocationArena::Permanent) BuiltinFloatType(BuiltinFloatType::IEEE32,*this)), TheIEEE64Type(new (*this, AllocationArena::Permanent) @@ -1921,6 +1919,11 @@ ASTContext::getModule(ImportPath::Module ModulePath) { auto moduleID = ModulePath[0]; for (auto &importer : getImpl().ModuleLoaders) { if (ModuleDecl *M = importer->loadModule(moduleID.Loc, ModulePath)) { + if (LangOpts.EnableModuleLoadingRemarks) { + Diags.diagnose(ModulePath.getSourceRange().Start, + diag::module_loaded, + M->getModuleFilename()); + } return M; } } diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp index 480f8031acab2..2b529ac8b61cf 100644 --- a/lib/AST/ASTDumper.cpp +++ b/lib/AST/ASTDumper.cpp @@ -3560,7 +3560,9 @@ namespace { } TRIVIAL_TYPE_PRINTER(BuiltinIntegerLiteral, builtin_integer_literal) + TRIVIAL_TYPE_PRINTER(BuiltinJob, builtin_job) TRIVIAL_TYPE_PRINTER(BuiltinRawPointer, builtin_raw_pointer) + TRIVIAL_TYPE_PRINTER(BuiltinRawUnsafeContinuation, builtin_raw_unsafe_continuation) TRIVIAL_TYPE_PRINTER(BuiltinNativeObject, builtin_native_object) TRIVIAL_TYPE_PRINTER(BuiltinBridgeObject, builtin_bridge_object) TRIVIAL_TYPE_PRINTER(BuiltinUnsafeValueBuffer, builtin_unsafe_value_buffer) diff --git a/lib/AST/ASTMangler.cpp b/lib/AST/ASTMangler.cpp index b6433344dc0e4..955c7bca084f8 100644 --- a/lib/AST/ASTMangler.cpp +++ b/lib/AST/ASTMangler.cpp @@ -88,7 +88,7 @@ std::string ASTMangler::mangleClosureEntity(const AbstractClosureExpr *closure, std::string ASTMangler::mangleEntity(const ValueDecl *decl, SymbolKind SKind) { beginMangling(); - appendEntity(decl); + appendEntity(decl, SKind == SymbolKind::AsyncHandlerBody); appendSymbolKind(SKind); return finalize(); } @@ -657,7 +657,7 @@ std::string ASTMangler::mangleTypeAsUSR(Type Ty) { Ty = getTypeForDWARFMangling(Ty); if (auto *fnType = Ty->getAs()) { - appendFunction(fnType, false); + appendFunction(fnType); } else { appendType(Ty); } @@ -744,6 +744,7 @@ std::string ASTMangler::mangleOpaqueTypeDecl(const ValueDecl *decl) { void ASTMangler::appendSymbolKind(SymbolKind SKind) { switch (SKind) { case SymbolKind::Default: return; + case SymbolKind::AsyncHandlerBody: return; case SymbolKind::DynamicThunk: return appendOperator("TD"); case SymbolKind::SwiftAsObjCThunk: return appendOperator("To"); case SymbolKind::ObjCAsSwiftThunk: return appendOperator("TO"); @@ -785,8 +786,8 @@ static StringRef getPrivateDiscriminatorIfNecessary(const ValueDecl *decl) { // Mangle non-local private declarations with a textual discriminator // based on their enclosing file. - auto topLevelContext = decl->getDeclContext()->getModuleScopeContext(); - auto fileUnit = cast(topLevelContext); + auto topLevelSubcontext = decl->getDeclContext()->getModuleScopeContext(); + auto fileUnit = cast(topLevelSubcontext); Identifier discriminator = fileUnit->getDiscriminatorForPrivateValue(decl); @@ -972,8 +973,12 @@ void ASTMangler::appendType(Type type, const ValueDecl *forDecl) { } case TypeKind::BuiltinIntegerLiteral: return appendOperator("BI"); + case TypeKind::BuiltinJob: + return appendOperator("Bj"); case TypeKind::BuiltinRawPointer: return appendOperator("Bp"); + case TypeKind::BuiltinRawUnsafeContinuation: + return appendOperator("Bc"); case TypeKind::BuiltinNativeObject: return appendOperator("Bo"); case TypeKind::BuiltinBridgeObject: @@ -1836,6 +1841,13 @@ ASTMangler::getSpecialManglingContext(const ValueDecl *decl, return ASTMangler::ObjCContext; } } + + // Types apparently defined in the Builtin module are actually + // synthetic declarations for types defined in the runtime, + // and they should be mangled as C-namespace entities; see e.g. + // IRGenModule::getObjCRuntimeBaseClass. + if (decl->getModuleContext()->isBuiltinModule()) + return ASTMangler::ObjCContext; } // Importer-synthesized types should always be mangled in the @@ -2249,7 +2261,8 @@ void ASTMangler::appendAnyGenericType(const GenericTypeDecl *decl) { addSubstitution(cast(decl)); } -void ASTMangler::appendFunction(AnyFunctionType *fn, bool isFunctionMangling, +void ASTMangler::appendFunction(AnyFunctionType *fn, + FunctionManglingKind functionMangling, const ValueDecl *forDecl) { // Append parameter labels right before the signature/type. auto parameters = fn->getParams(); @@ -2269,8 +2282,8 @@ void ASTMangler::appendFunction(AnyFunctionType *fn, bool isFunctionMangling, appendOperator("y"); } - if (isFunctionMangling) { - appendFunctionSignature(fn, forDecl); + if (functionMangling != NoFunctionMangling) { + appendFunctionSignature(fn, forDecl, functionMangling); } else { appendFunctionType(fn, /*autoclosure*/ false, forDecl); } @@ -2281,7 +2294,7 @@ void ASTMangler::appendFunctionType(AnyFunctionType *fn, bool isAutoClosure, assert((DWARFMangling || fn->isCanonical()) && "expecting canonical types when not mangling for the debugger"); - appendFunctionSignature(fn, forDecl); + appendFunctionSignature(fn, forDecl, NoFunctionMangling); bool mangleClangType = fn->getASTContext().LangOpts.UseClangFunctionTypes && fn->hasNonDerivableClangType(); @@ -2359,10 +2372,11 @@ void ASTMangler::appendClangType(AnyFunctionType *fn) { } void ASTMangler::appendFunctionSignature(AnyFunctionType *fn, - const ValueDecl *forDecl) { + const ValueDecl *forDecl, + FunctionManglingKind functionMangling) { appendFunctionResultType(fn->getResult(), forDecl); appendFunctionInputType(fn->getParams(), forDecl); - if (fn->isAsync()) + if (fn->isAsync() || functionMangling == AsyncHandlerBodyMangling) appendOperator("Y"); if (fn->isThrowing()) appendOperator("K"); @@ -2780,14 +2794,15 @@ CanType ASTMangler::getDeclTypeForMangling( return canTy; } -void ASTMangler::appendDeclType(const ValueDecl *decl, bool isFunctionMangling) { +void ASTMangler::appendDeclType(const ValueDecl *decl, + FunctionManglingKind functionMangling) { Mod = decl->getModuleContext(); GenericSignature genericSig; GenericSignature parentGenericSig; auto type = getDeclTypeForMangling(decl, genericSig, parentGenericSig); if (AnyFunctionType *FuncTy = type->getAs()) { - appendFunction(FuncTy, isFunctionMangling, decl); + appendFunction(FuncTy, functionMangling, decl); } else { appendType(type, decl); } @@ -2795,7 +2810,7 @@ void ASTMangler::appendDeclType(const ValueDecl *decl, bool isFunctionMangling) // Mangle the generic signature, if any. if (genericSig && appendGenericSignature(genericSig, parentGenericSig)) { // The 'F' function mangling doesn't need a 'u' for its generic signature. - if (!isFunctionMangling) + if (functionMangling == NoFunctionMangling) appendOperator("u"); } } @@ -2870,7 +2885,7 @@ void ASTMangler::appendEntity(const ValueDecl *decl, StringRef EntityOp, appendOperator("Z"); } -void ASTMangler::appendEntity(const ValueDecl *decl) { +void ASTMangler::appendEntity(const ValueDecl *decl, bool isAsyncHandlerBody) { assert(!isa(decl)); assert(!isa(decl)); @@ -2891,7 +2906,8 @@ void ASTMangler::appendEntity(const ValueDecl *decl) { appendContextOf(decl); appendDeclName(decl); - appendDeclType(decl, /*isFunctionMangling*/ true); + appendDeclType(decl, isAsyncHandlerBody ? AsyncHandlerBodyMangling + : FunctionMangling); appendOperator("F"); if (decl->isStatic()) appendOperator("Z"); @@ -2900,9 +2916,9 @@ void ASTMangler::appendEntity(const ValueDecl *decl) { void ASTMangler::appendProtocolConformance(const ProtocolConformance *conformance) { GenericSignature contextSig; - auto topLevelContext = + auto topLevelSubcontext = conformance->getDeclContext()->getModuleScopeContext(); - Mod = topLevelContext->getParentModule(); + Mod = topLevelSubcontext->getParentModule(); auto conformingType = conformance->getType(); appendType(conformingType->getCanonicalType()); @@ -2910,7 +2926,7 @@ ASTMangler::appendProtocolConformance(const ProtocolConformance *conformance) { appendProtocolName(conformance->getProtocol()); bool needsModule = true; - if (auto *file = dyn_cast(topLevelContext)) { + if (auto *file = dyn_cast(topLevelSubcontext)) { if (file->getKind() == FileUnitKind::ClangModule || file->getKind() == FileUnitKind::DWARFModule) { if (conformance->getProtocol()->hasClangNode()) diff --git a/lib/AST/ASTPrinter.cpp b/lib/AST/ASTPrinter.cpp index 03608629511bb..5d212a5d044b8 100644 --- a/lib/AST/ASTPrinter.cpp +++ b/lib/AST/ASTPrinter.cpp @@ -3853,6 +3853,8 @@ class TypePrinter : public TypeVisitor { Printer << buffer; \ } ASTPRINTER_PRINT_BUILTINTYPE(BuiltinRawPointerType) + ASTPRINTER_PRINT_BUILTINTYPE(BuiltinRawUnsafeContinuationType) + ASTPRINTER_PRINT_BUILTINTYPE(BuiltinJobType) ASTPRINTER_PRINT_BUILTINTYPE(BuiltinNativeObjectType) ASTPRINTER_PRINT_BUILTINTYPE(BuiltinBridgeObjectType) ASTPRINTER_PRINT_BUILTINTYPE(BuiltinUnsafeValueBufferType) diff --git a/lib/AST/ASTVerifier.cpp b/lib/AST/ASTVerifier.cpp index 6f502b96319c1..200a485eec0e4 100644 --- a/lib/AST/ASTVerifier.cpp +++ b/lib/AST/ASTVerifier.cpp @@ -229,7 +229,7 @@ class Verifier : public ASTWalker { typedef llvm::PointerIntPair ClosureDiscriminatorKey; llvm::DenseMap ClosureDiscriminators; - DeclContext *CanonicalTopLevelContext = nullptr; + DeclContext *CanonicalTopLevelSubcontext = nullptr; Verifier(PointerUnion M, DeclContext *DC) : M(M), @@ -898,9 +898,9 @@ class Verifier : public ASTWalker { DeclContext *getCanonicalDeclContext(DeclContext *DC) { // All we really need to do is use a single TopLevelCodeDecl. if (auto topLevel = dyn_cast(DC)) { - if (!CanonicalTopLevelContext) - CanonicalTopLevelContext = topLevel; - return CanonicalTopLevelContext; + if (!CanonicalTopLevelSubcontext) + CanonicalTopLevelSubcontext = topLevel; + return CanonicalTopLevelSubcontext; } // TODO: check for uniqueness of initializer contexts? diff --git a/lib/AST/Attr.cpp b/lib/AST/Attr.cpp index 29877bb2cb625..3e1d453f03292 100644 --- a/lib/AST/Attr.cpp +++ b/lib/AST/Attr.cpp @@ -746,7 +746,8 @@ bool DeclAttribute::printImpl(ASTPrinter &Printer, const PrintOptions &Options, if (auto *VD = dyn_cast(D)) { if (VD->getAttachedResultBuilder() == this) { if (!isa(D) && - !(isa(D) && isa(D->getDeclContext()))) + !((isa(D) || isa(D)) && + isa(D->getDeclContext()))) return false; } } diff --git a/lib/AST/Availability.cpp b/lib/AST/Availability.cpp index e042a3dda668f..1fee0851ffcf6 100644 --- a/lib/AST/Availability.cpp +++ b/lib/AST/Availability.cpp @@ -327,6 +327,10 @@ AvailabilityContext ASTContext::getConcurrencyAvailability() { return getSwiftFutureAvailability(); } +AvailabilityContext ASTContext::getDifferentiationAvailability() { + return getSwiftFutureAvailability(); +} + AvailabilityContext ASTContext::getSwift52Availability() { auto target = LangOpts.Target; diff --git a/lib/AST/Builtins.cpp b/lib/AST/Builtins.cpp index 6125b87112d9e..9c2da9647a849 100644 --- a/lib/AST/Builtins.cpp +++ b/lib/AST/Builtins.cpp @@ -78,6 +78,10 @@ Type swift::getBuiltinType(ASTContext &Context, StringRef Name) { if (Name == "RawPointer") return Context.TheRawPointerType; + if (Name == "RawUnsafeContinuation") + return Context.TheRawUnsafeContinuationType; + if (Name == "Job") + return Context.TheJobType; if (Name == "NativeObject") return Context.TheNativeObjectType; if (Name == "BridgeObject") @@ -176,6 +180,16 @@ getBuiltinFunction(Identifier Id, ArrayRef argTypes, Type ResType) { return FD; } +namespace { + +enum class BuiltinThrowsKind : uint8_t { + None, + Throws, + Rethrows +}; + +} + /// Build a builtin function declaration. static FuncDecl * getBuiltinGenericFunction(Identifier Id, @@ -183,7 +197,7 @@ getBuiltinGenericFunction(Identifier Id, Type ResType, GenericParamList *GenericParams, GenericSignature Sig, - bool Rethrows = false) { + bool Async, BuiltinThrowsKind Throws) { assert(GenericParams && "Missing generic parameters"); auto &Context = ResType->getASTContext(); @@ -209,13 +223,15 @@ getBuiltinGenericFunction(Identifier Id, DeclName Name(Context, Id, paramList); auto *const func = FuncDecl::createImplicit( - Context, StaticSpellingKind::None, Name, /*NameLoc=*/SourceLoc(), - /*Async=*/false, - /*Throws=*/Rethrows, GenericParams, paramList, ResType, DC); + Context, StaticSpellingKind::None, Name, + /*NameLoc=*/SourceLoc(), + Async, + Throws != BuiltinThrowsKind::None, + GenericParams, paramList, ResType, DC); func->setAccess(AccessLevel::Public); func->setGenericSignature(Sig); - if (Rethrows) + if (Throws == BuiltinThrowsKind::Rethrows) func->getAttrs().add(new (Context) RethrowsAttr(/*ThrowsLoc*/ SourceLoc())); return func; @@ -443,7 +459,8 @@ namespace { GenericParamList *TheGenericParamList; SmallVector InterfaceParams; Type InterfaceResult; - bool Rethrows = false; + bool Async = false; + BuiltinThrowsKind Throws = BuiltinThrowsKind::None; // Accumulate params and requirements here, so that we can make the // appropriate `AbstractGenericSignatureRequest` when `build()` is called. @@ -488,8 +505,16 @@ namespace { addedRequirements.push_back(req); } - void setRethrows(bool rethrows = true) { - Rethrows = rethrows; + void setAsync() { + Async = true; + } + + void setThrows() { + Throws = BuiltinThrowsKind::Throws; + } + + void setRethrows() { + Throws = BuiltinThrowsKind::Rethrows; } FuncDecl *build(Identifier name) { @@ -500,7 +525,8 @@ namespace { nullptr); return getBuiltinGenericFunction(name, InterfaceParams, InterfaceResult, - TheGenericParamList, GenericSig); + TheGenericParamList, GenericSig, + Async, Throws); } // Don't use these generator classes directly; call the make{...} @@ -1383,6 +1409,25 @@ static ValueDecl *getCreateAsyncTaskFuture(ASTContext &ctx, Identifier id) { return builder.build(id); } +static ValueDecl *getAutoDiffCreateLinearMapContext(ASTContext &ctx, + Identifier id) { + return getBuiltinFunction( + id, {BuiltinIntegerType::getWordType(ctx)}, ctx.TheNativeObjectType); +} + +static ValueDecl *getAutoDiffProjectTopLevelSubcontext(ASTContext &ctx, + Identifier id) { + return getBuiltinFunction( + id, {ctx.TheNativeObjectType}, ctx.TheRawPointerType); +} + +static ValueDecl *getAutoDiffAllocateSubcontext(ASTContext &ctx, + Identifier id) { + return getBuiltinFunction( + id, {ctx.TheNativeObjectType, BuiltinIntegerType::getWordType(ctx)}, + ctx.TheRawPointerType); +} + static ValueDecl *getPoundAssert(ASTContext &Context, Identifier Id) { auto int1Type = BuiltinIntegerType::get(1, Context); auto optionalRawPointerType = BoundGenericEnumType::get( @@ -1612,6 +1657,29 @@ static ValueDecl *getPolymorphicBinaryOperation(ASTContext &ctx, return builder.build(id); } +static ValueDecl *getWithUnsafeContinuation(ASTContext &ctx, + Identifier id, + bool throws) { + BuiltinFunctionBuilder builder(ctx); + + auto contTy = ctx.TheRawUnsafeContinuationType; + SmallVector params; + params.emplace_back(contTy); + + auto voidTy = ctx.TheEmptyTupleType; + auto extInfo = FunctionType::ExtInfoBuilder().withNoEscape().build(); + auto *fnTy = FunctionType::get(params, voidTy, extInfo); + + builder.addParameter(makeConcrete(fnTy)); + builder.setResult(makeGenericParam()); + + builder.setAsync(); + if (throws) + builder.setThrows(); + + return builder.build(id); +} + /// An array of the overloaded builtin kinds. static const OverloadedBuiltinKind OverloadedBuiltinKinds[] = { OverloadedBuiltinKind::None, @@ -2549,6 +2617,21 @@ ValueDecl *swift::getBuiltinValueDecl(ASTContext &Context, Identifier Id) { case BuiltinValueKind::TriggerFallbackDiagnostic: return getTriggerFallbackDiagnosticOperation(Context, Id); + + case BuiltinValueKind::WithUnsafeContinuation: + return getWithUnsafeContinuation(Context, Id, /*throws=*/false); + + case BuiltinValueKind::WithUnsafeThrowingContinuation: + return getWithUnsafeContinuation(Context, Id, /*throws=*/true); + + case BuiltinValueKind::AutoDiffCreateLinearMapContext: + return getAutoDiffCreateLinearMapContext(Context, Id); + + case BuiltinValueKind::AutoDiffProjectTopLevelSubcontext: + return getAutoDiffProjectTopLevelSubcontext(Context, Id); + + case BuiltinValueKind::AutoDiffAllocateSubcontext: + return getAutoDiffAllocateSubcontext(Context, Id); } llvm_unreachable("bad builtin value!"); @@ -2600,6 +2683,12 @@ StringRef BuiltinType::getTypeName(SmallVectorImpl &result, case BuiltinTypeKind::BuiltinRawPointer: printer << MAYBE_GET_NAMESPACED_BUILTIN(BUILTIN_TYPE_NAME_RAWPOINTER); break; + case BuiltinTypeKind::BuiltinRawUnsafeContinuation: + printer << MAYBE_GET_NAMESPACED_BUILTIN(BUILTIN_TYPE_NAME_RAWUNSAFECONTINUATION); + break; + case BuiltinTypeKind::BuiltinJob: + printer << MAYBE_GET_NAMESPACED_BUILTIN(BUILTIN_TYPE_NAME_JOB); + break; case BuiltinTypeKind::BuiltinNativeObject: printer << MAYBE_GET_NAMESPACED_BUILTIN(BUILTIN_TYPE_NAME_NATIVEOBJECT); break; diff --git a/lib/AST/ClangTypeConverter.cpp b/lib/AST/ClangTypeConverter.cpp index 52e0df90de641..2abb77e5800fe 100644 --- a/lib/AST/ClangTypeConverter.cpp +++ b/lib/AST/ClangTypeConverter.cpp @@ -120,6 +120,10 @@ const clang::Type *ClangTypeConverter::getFunctionType( ArrayRef params, Type resultTy, AnyFunctionType::Representation repr) { +#if SWIFT_BUILD_ONLY_SYNTAXPARSERLIB + return nullptr; +#endif + auto resultClangTy = convert(resultTy); if (resultClangTy.isNull()) return nullptr; @@ -163,6 +167,10 @@ const clang::Type *ClangTypeConverter::getFunctionType( ArrayRef params, Optional result, SILFunctionType::Representation repr) { +#if SWIFT_BUILD_ONLY_SYNTAXPARSERLIB + return nullptr; +#endif + // Using the interface type is sufficient as type parameters get mapped to // `id`, since ObjC lightweight generics use type erasure. (See also: SE-0057) auto resultClangTy = result.hasValue() @@ -564,8 +572,10 @@ ClangTypeConverter::visitBoundGenericType(BoundGenericType *type) { case StructKind::Invalid: return clang::QualType(); - case StructKind::UnsafeMutablePointer: case StructKind::Unmanaged: + return convert(argCanonicalTy); + + case StructKind::UnsafeMutablePointer: case StructKind::AutoreleasingUnsafeMutablePointer: { auto clangTy = convert(argCanonicalTy); if (clangTy.isNull()) diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp index d949870924c33..736d6f8fc5e9d 100644 --- a/lib/AST/Decl.cpp +++ b/lib/AST/Decl.cpp @@ -2403,10 +2403,6 @@ bool swift::conflicting(const OverloadSignature& sig1, (sig2.IsVariable && !sig1.Name.getArgumentNames().empty())); } - // If one is asynchronous and the other is not, they can't conflict. - if (sig1.HasAsync != sig2.HasAsync) - return false; - // Note that we intentionally ignore the HasOpaqueReturnType bit here. // For declarations that can't be overloaded by type, we want them to be // considered conflicting independent of their type. @@ -2630,8 +2626,6 @@ OverloadSignature ValueDecl::getOverloadSignature() const { signature.IsTypeAlias = isa(this); signature.HasOpaqueReturnType = !signature.IsVariable && (bool)getOpaqueResultTypeDecl(); - signature.HasAsync = isa(this) && - cast(this)->hasAsync(); // Unary operators also include prefix/postfix. if (auto func = dyn_cast(this)) { @@ -4187,6 +4181,13 @@ bool ClassDecl::isActor() const { false); } +bool ClassDecl::isDefaultActor() const { + auto mutableThis = const_cast(this); + return evaluateOrDefault(getASTContext().evaluator, + IsDefaultActorRequest{mutableThis}, + false); +} + bool ClassDecl::hasMissingDesignatedInitializers() const { return evaluateOrDefault( getASTContext().evaluator, @@ -4236,6 +4237,7 @@ ClassAncestryFlagsRequest::evaluate(Evaluator &evaluator, ClassDecl *value) const { AncestryOptions result; const ClassDecl *CD = value; + const ClassDecl *PreviousCD = nullptr; auto *M = value->getParentModule(); do { @@ -4250,9 +4252,17 @@ ClassAncestryFlagsRequest::evaluate(Evaluator &evaluator, if (CD->getAttrs().hasAttribute()) result |= AncestryFlags::ObjCMembers; - if (CD->hasClangNode()) + if (CD->hasClangNode()) { result |= AncestryFlags::ClangImported; + // Inheriting from an ObjC-defined class generally forces the use + // of the ObjC object model, but certain classes that directly + // inherit from NSObject can change that. + if (!PreviousCD || + !(CD->isNSObject() && PreviousCD->isNativeNSObjectSubclass())) + result |= AncestryFlags::ObjCObjectModel; + } + if (CD->hasResilientMetadata()) result |= AncestryFlags::Resilient; @@ -4262,6 +4272,7 @@ ClassAncestryFlagsRequest::evaluate(Evaluator &evaluator, if (CD->getAttrs().hasAttribute()) result |= AncestryFlags::RequiresStoredPropertyInits; + PreviousCD = CD; CD = CD->getSuperclassDecl(); } while (CD != nullptr); @@ -7896,6 +7907,46 @@ Type TypeBase::getSwiftNewtypeUnderlyingType() { return {}; } +bool ClassDecl::hasExplicitCustomActorMethods() const { + auto &ctx = getASTContext(); + for (auto member: getMembers()) { + if (member->isImplicit()) continue; + + // Methods called enqueue(partialTask:) + if (auto func = dyn_cast(member)) { + if (FuncDecl::isEnqueuePartialTaskName(ctx, func->getName())) + return true; + } + } + + return false; +} + +bool ClassDecl::isRootDefaultActor() const { + if (!isDefaultActor()) return false; + auto superclass = getSuperclassDecl(); + return (!superclass || superclass->isNSObject()); +} + +bool ClassDecl::isNativeNSObjectSubclass() const { + // Only if we inherit from NSObject. + auto superclass = getSuperclassDecl(); + if (!superclass || !superclass->isNSObject()) + return false; + + // For now, only actors (regardless of whether they're default actors). + // Eventually we should roll this out to more classes, but we have to + // do it with ABI compatibility. + return isActor(); +} + +bool ClassDecl::isNSObject() const { + if (!getName().is("NSObject")) return false; + ASTContext &ctx = getASTContext(); + return (getModuleContext()->getName() == ctx.Id_Foundation || + getModuleContext()->getName() == ctx.Id_ObjectiveC); +} + Type ClassDecl::getSuperclass() const { ASTContext &ctx = getASTContext(); return evaluateOrDefault(ctx.evaluator, diff --git a/lib/AST/FineGrainedDependencies.cpp b/lib/AST/FineGrainedDependencies.cpp index 80a4a1fbc5d72..253596efe446c 100644 --- a/lib/AST/FineGrainedDependencies.cpp +++ b/lib/AST/FineGrainedDependencies.cpp @@ -339,9 +339,9 @@ void DepGraphNode::dump() const { void DepGraphNode::dump(raw_ostream &os) const { key.dump(os); if (fingerprint.hasValue()) - llvm::errs() << "fingerprint: " << fingerprint.getValue() << ""; + os << "fingerprint: " << fingerprint.getValue() << ""; else - llvm::errs() << "no fingerprint"; + os << "no fingerprint"; } void SourceFileDepGraphNode::dump() const { diff --git a/lib/AST/FrontendSourceFileDepGraphFactory.cpp b/lib/AST/FrontendSourceFileDepGraphFactory.cpp index 7b5ff910b730f..7c90b6af7074d 100644 --- a/lib/AST/FrontendSourceFileDepGraphFactory.cpp +++ b/lib/AST/FrontendSourceFileDepGraphFactory.cpp @@ -406,26 +406,19 @@ class UsedDeclEnumerator { StringRef swiftDeps; /// Cache these for efficiency - const DependencyKey sourceFileInterface; const DependencyKey sourceFileImplementation; - function_ref createDefUse; - public: - UsedDeclEnumerator( - const SourceFile *SF, const DependencyTracker &depTracker, - StringRef swiftDeps, - function_ref - createDefUse) + UsedDeclEnumerator(const SourceFile *SF, const DependencyTracker &depTracker, + StringRef swiftDeps) : SF(SF), depTracker(depTracker), swiftDeps(swiftDeps), - sourceFileInterface(DependencyKey::createKeyForWholeSourceFile( - DeclAspect::interface, swiftDeps)), sourceFileImplementation(DependencyKey::createKeyForWholeSourceFile( - DeclAspect::implementation, swiftDeps)), - createDefUse(createDefUse) {} + DeclAspect::implementation, swiftDeps)) {} public: - void enumerateAllUses() { + using UseEnumerator = + llvm::function_ref; + void enumerateAllUses(UseEnumerator enumerator) { auto &Ctx = SF->getASTContext(); Ctx.evaluator.enumerateReferencesInFile(SF, [&](const auto &ref) { std::string name = ref.name.userFacingName().str(); @@ -437,36 +430,37 @@ class UsedDeclEnumerator { case Kind::Tombstone: llvm_unreachable("Cannot enumerate dead reference!"); case Kind::TopLevel: - return enumerateUse("", name); + return enumerateUse("", name, enumerator); case Kind::Dynamic: - return enumerateUse("", name); + return enumerateUse("", name, enumerator); case Kind::PotentialMember: { std::string context = DependencyKey::computeContextForProvidedEntity< NodeKind::potentialMember>(nominal); - return enumerateUse(context, ""); + return enumerateUse(context, "", enumerator); } case Kind::UsedMember: { std::string context = DependencyKey::computeContextForProvidedEntity( nominal); - return enumerateUse(context, name); + return enumerateUse(context, name, enumerator); } } }); - enumerateExternalUses(); - enumerateNominalUses(); + enumerateExternalUses(enumerator); + enumerateNominalUses(enumerator); } private: template - void enumerateUse(StringRef context, StringRef name) { + void enumerateUse(StringRef context, StringRef name, + UseEnumerator createDefUse) { // Assume that what is depended-upon is the interface createDefUse( DependencyKey(kind, DeclAspect::interface, context.str(), name.str()), sourceFileImplementation); } - void enumerateNominalUses() { + void enumerateNominalUses(UseEnumerator enumerator) { auto &Ctx = SF->getASTContext(); Ctx.evaluator.enumerateReferencesInFile(SF, [&](const auto &ref) { const NominalTypeDecl *subject = ref.subject; @@ -477,26 +471,26 @@ class UsedDeclEnumerator { std::string context = DependencyKey::computeContextForProvidedEntity( subject); - enumerateUse(context, ""); + enumerateUse(context, "", enumerator); }); } - void enumerateExternalUses() { + void enumerateExternalUses(UseEnumerator enumerator) { for (StringRef s : depTracker.getIncrementalDependencies()) - enumerateUse("", s); + enumerateUse("", s, enumerator); for (StringRef s : depTracker.getDependencies()) - enumerateUse("", s); + enumerateUse("", s, enumerator); } }; } // end namespace void FrontendSourceFileDepGraphFactory::addAllUsedDecls() { - UsedDeclEnumerator(SF, depTracker, swiftDeps, - [&](const DependencyKey &def, const DependencyKey &use) { - addAUsedDecl(def, use); - }) - .enumerateAllUses(); + UsedDeclEnumerator(SF, depTracker, swiftDeps) + .enumerateAllUses( + [&](const DependencyKey &def, const DependencyKey &use) { + addAUsedDecl(def, use); + }); } //============================================================================== diff --git a/lib/AST/NameLookup.cpp b/lib/AST/NameLookup.cpp index 32b5e4bd9f399..9d75f1a1d9321 100644 --- a/lib/AST/NameLookup.cpp +++ b/lib/AST/NameLookup.cpp @@ -1872,6 +1872,11 @@ AnyObjectLookupRequest::evaluate(Evaluator &evaluator, const DeclContext *dc, using namespace namelookup; QualifiedLookupResult decls; +#if SWIFT_BUILD_ONLY_SYNTAXPARSERLIB + // Avoid calling `clang::ObjCMethodDecl::isDirectMethod()`. + return decls; +#endif + // Type-only lookup won't find anything on AnyObject. if (options & NL_OnlyTypes) return decls; diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp index 76113f053a572..9535ab9982a15 100644 --- a/lib/AST/Type.cpp +++ b/lib/AST/Type.cpp @@ -208,6 +208,8 @@ bool CanType::isReferenceTypeImpl(CanType type, const GenericSignatureImpl *sig, case TypeKind::BuiltinIntegerLiteral: case TypeKind::BuiltinFloat: case TypeKind::BuiltinRawPointer: + case TypeKind::BuiltinRawUnsafeContinuation: + case TypeKind::BuiltinJob: case TypeKind::BuiltinUnsafeValueBuffer: case TypeKind::BuiltinVector: case TypeKind::Tuple: @@ -4928,7 +4930,7 @@ bool UnownedStorageType::isLoadable(ResilienceExpansion resilience) const { } static ReferenceCounting getClassReferenceCounting(ClassDecl *theClass) { - return (theClass->checkAncestry(AncestryFlags::ClangImported) + return (theClass->usesObjCObjectModel() ? ReferenceCounting::ObjC : ReferenceCounting::Native); } @@ -5003,6 +5005,8 @@ ReferenceCounting TypeBase::getReferenceCounting() { case TypeKind::BuiltinIntegerLiteral: case TypeKind::BuiltinFloat: case TypeKind::BuiltinRawPointer: + case TypeKind::BuiltinRawUnsafeContinuation: + case TypeKind::BuiltinJob: case TypeKind::BuiltinUnsafeValueBuffer: case TypeKind::BuiltinVector: case TypeKind::Tuple: diff --git a/lib/Basic/StringExtras.cpp b/lib/Basic/StringExtras.cpp index 3884a8d72deb6..53825cd29c1f8 100644 --- a/lib/Basic/StringExtras.cpp +++ b/lib/Basic/StringExtras.cpp @@ -427,42 +427,34 @@ StringRef swift::matchLeadingTypeName(StringRef name, // ending of the type name. auto nameWords = camel_case::getWords(name); auto typeWords = camel_case::getWords(typeName.Name); - auto nameWordIter = nameWords.begin(), - nameWordIterEnd = nameWords.end(); - auto typeWordRevIter = typeWords.rbegin(), - typeWordRevIterEnd = typeWords.rend(); + auto nameWordIter = nameWords.begin(); + auto typeWordRevIter = typeWords.rbegin(); // Find the last instance of the first word in the name within // the words in the type name. - typeWordRevIter = std::find_if(typeWordRevIter, typeWordRevIterEnd, - [nameWordIter](StringRef word) { - return matchNameWordToTypeWord(*nameWordIter, word); - }); + typeWordRevIter = std::find_if( + typeWordRevIter, typeWords.rend(), [nameWordIter](StringRef word) { + return matchNameWordToTypeWord(*nameWordIter, word); + }); // If we didn't find the first word in the name at all, we're // done. - if (typeWordRevIter == typeWordRevIterEnd) + if (typeWordRevIter == typeWords.rend()) return name; // Now, match from the first word up until the end of the type name. - auto typeWordIter = typeWordRevIter.base(), - typeWordIterEnd = typeWords.end(); - ++nameWordIter; - - // FIXME: Use std::mismatch once we update to C++14. - while (typeWordIter != typeWordIterEnd && - nameWordIter != nameWordIterEnd && - matchNameWordToTypeWord(*nameWordIter, *typeWordIter)) { - ++typeWordIter; - ++nameWordIter; - } + std::advance(nameWordIter, 1); + WordIterator typeMismatch = typeWords.end(), nameMismatch = nameWords.end(); + std::tie(typeMismatch, nameMismatch) = + std::mismatch(typeWordRevIter.base(), typeWords.end(), nameWordIter, + nameWords.end(), matchNameWordToTypeWord); // If we didn't reach the end of the type name, don't match. - if (typeWordIter != typeWordIterEnd) + if (typeMismatch != typeWords.end()) return name; // Chop of the beginning of the name. - return nameWordIter.getRestOfStr(); + return nameMismatch.getRestOfStr(); } StringRef StringScratchSpace::copyString(StringRef string) { @@ -1322,10 +1314,10 @@ bool swift::omitNeedlessWords(StringRef &baseName, } // If this is an asynchronous function where the completion handler is - // the second parameter, and the corresponding name has some additional - // information prior to WithCompletion(Handler), append that + // past the first parameter the corresponding name has some additional + // information prior to the completion-handled suffix, append that // additional text to the base name. - if (isAsync && *completionHandlerIndex == 1 && completionHandlerName) { + if (isAsync && *completionHandlerIndex >= 1 && completionHandlerName) { if (auto extraParamText = stripWithCompletionHandlerSuffix( *completionHandlerName)) { SmallString<32> newBaseName; @@ -1355,20 +1347,6 @@ bool swift::omitNeedlessWords(StringRef &baseName, name, paramTypes[i], role, role == NameRole::BaseName ? allPropertyNames : nullptr); - // If this is an asynchronous function where the completion handler is - // past the second parameter and has additional information in the name, - // add that information to the prior argument name. - if (isAsync && completionHandlerName && *completionHandlerIndex > 1 && - *completionHandlerIndex == i + 1) { - if (auto extraParamText = stripWithCompletionHandlerSuffix( - *completionHandlerName)) { - SmallString<32> extendedName; - extendedName += newName; - appendSentenceCase(extendedName, *extraParamText); - newName = scratch.copyString(extendedName); - } - } - if (name == newName) continue; // Record this change. @@ -1392,5 +1370,17 @@ Optional swift::stripWithCompletionHandlerSuffix(StringRef name) { return name.drop_back(strlen("WithCompletion")); } + if (name.endswith("WithCompletionBlock")) { + return name.drop_back(strlen("WithCompletionBlock")); + } + + if (name.endswith("WithReplyTo")) { + return name.drop_back(strlen("WithReplyTo")); + } + + if (name.endswith("WithReply")) { + return name.drop_back(strlen("WithReply")); + } + return None; } diff --git a/lib/ClangImporter/ClangAdapter.cpp b/lib/ClangImporter/ClangAdapter.cpp index 6c38957639336..7f0dd6644cf8e 100644 --- a/lib/ClangImporter/ClangAdapter.cpp +++ b/lib/ClangImporter/ClangAdapter.cpp @@ -607,16 +607,25 @@ bool importer::hasNativeSwiftDecl(const clang::Decl *decl) { /// Translate the "nullability" notion from API notes into an optional type /// kind. -OptionalTypeKind importer::translateNullability(clang::NullabilityKind kind) { +OptionalTypeKind importer::translateNullability( + clang::NullabilityKind kind, bool stripNonResultOptionality) { + if (stripNonResultOptionality && + kind != clang::NullabilityKind::NullableResult) + return OptionalTypeKind::OTK_None; + switch (kind) { case clang::NullabilityKind::NonNull: return OptionalTypeKind::OTK_None; case clang::NullabilityKind::Nullable: + case clang::NullabilityKind::NullableResult: return OptionalTypeKind::OTK_Optional; case clang::NullabilityKind::Unspecified: return OptionalTypeKind::OTK_ImplicitlyUnwrappedOptional; + + default: + return OptionalTypeKind::OTK_Optional; } llvm_unreachable("Invalid NullabilityKind."); diff --git a/lib/ClangImporter/ClangAdapter.h b/lib/ClangImporter/ClangAdapter.h index 9a15bbd523efa..ed7f7897825b2 100644 --- a/lib/ClangImporter/ClangAdapter.h +++ b/lib/ClangImporter/ClangAdapter.h @@ -137,7 +137,11 @@ bool isNSString(clang::QualType); bool hasNativeSwiftDecl(const clang::Decl *decl); /// Translation API nullability from an API note into an optional kind. -OptionalTypeKind translateNullability(clang::NullabilityKind kind); +/// +/// \param stripNonResultOptionality Whether strip optionality from +/// \c _Nullable but not \c _Nullable_result. +OptionalTypeKind translateNullability( + clang::NullabilityKind kind, bool stripNonResultOptionality = false); /// Determine whether the given method is a required initializer /// of the given class. diff --git a/lib/ClangImporter/ClangModuleDependencyScanner.cpp b/lib/ClangImporter/ClangModuleDependencyScanner.cpp index b0919422f3f9d..fc40508394c34 100644 --- a/lib/ClangImporter/ClangModuleDependencyScanner.cpp +++ b/lib/ClangImporter/ClangModuleDependencyScanner.cpp @@ -196,7 +196,6 @@ void ClangImporter::recordModuleDependencies( std::string PCMPath; std::string ModuleMapPath; }; - auto ModuleCacheDir = swift::getModuleCachePathFromClang(getClangInstance()); for (const auto &clangModuleDep : clangDependencies.DiscoveredModules) { // If we've already cached this information, we're done. diff --git a/lib/ClangImporter/ImportDecl.cpp b/lib/ClangImporter/ImportDecl.cpp index 924dec0857d45..6a631367947dd 100644 --- a/lib/ClangImporter/ImportDecl.cpp +++ b/lib/ClangImporter/ImportDecl.cpp @@ -3326,6 +3326,14 @@ namespace { continue; } + if (auto recordDecl = dyn_cast(m)) { + // An injected class name decl will just point back to the parent + // decl, so don't import it. + if (recordDecl->isInjectedClassName()) { + continue; + } + } + auto nd = dyn_cast(m); if (!nd) { // We couldn't import the member, so we can't reference it in Swift. @@ -3569,6 +3577,19 @@ namespace { return VisitCXXRecordDecl(def); } + Decl *VisitClassTemplateDecl(const clang::ClassTemplateDecl *decl) { + // When loading a namespace's sub-decls, we won't add template + // specilizations, so make sure to do that here. + for (auto spec : decl->specializations()) { + if (auto importedSpec = Impl.importDecl(spec, getVersion())) { + if (auto namespaceDecl = + dyn_cast(importedSpec->getDeclContext())) + namespaceDecl->addMember(importedSpec); + } + } + return nullptr; + } + Decl *VisitClassTemplatePartialSpecializationDecl( const clang::ClassTemplatePartialSpecializationDecl *decl) { // Note: partial template specializations are not imported. @@ -3591,8 +3612,10 @@ namespace { if (name.empty()) return nullptr; - switch (Impl.getEnumKind(clangEnum)) { - case EnumKind::Constants: { + auto enumKind = Impl.getEnumKind(clangEnum); + switch (enumKind) { + case EnumKind::Constants: + case EnumKind::Unknown: { // The enumeration was simply mapped to an integral type. Create a // constant with that integral type. @@ -3609,54 +3632,14 @@ namespace { isInSystemModule(dc), Bridgeability::None); if (!type) return nullptr; - // FIXME: Importing the type will recursively revisit this same - // EnumConstantDecl. Short-circuit out if we already emitted the import - // for this decl. - if (auto Known = Impl.importDeclCached(decl, getVersion())) - return Known; // Create the global constant. - auto result = Impl.createConstant(name, dc, type, - clang::APValue(decl->getInitVal()), - ConstantConvertKind::None, - /*static*/dc->isTypeContext(), decl); - Impl.ImportedDecls[{decl->getCanonicalDecl(), getVersion()}] = result; - - // If this is a compatibility stub, mark it as such. - if (correctSwiftName) - markAsVariant(result, *correctSwiftName); - - return result; - } - - case EnumKind::Unknown: { - // The enumeration was mapped to a struct containing the integral - // type. Create a constant with that struct type. - - // The context where the constant will be introduced. - auto dc = - Impl.importDeclContextOf(decl, importedName.getEffectiveContext()); - if (!dc) - return nullptr; - - // Import the enumeration type. - auto enumType = Impl.importTypeIgnoreIUO( - Impl.getClangASTContext().getTagDeclType(clangEnum), - ImportTypeKind::Value, isInSystemModule(dc), Bridgeability::None); - if (!enumType) - return nullptr; - - // FIXME: Importing the type will can recursively revisit this same - // EnumConstantDecl. Short-circuit out if we already emitted the import - // for this decl. - if (auto Known = Impl.importDeclCached(decl, getVersion())) - return Known; - - // Create the global constant. - auto result = Impl.createConstant(name, dc, enumType, - clang::APValue(decl->getInitVal()), - ConstantConvertKind::Construction, - /*static*/ false, decl); + bool isStatic = enumKind != EnumKind::Unknown && dc->isTypeContext(); + auto result = Impl.createConstant( + name, dc, type, clang::APValue(decl->getInitVal()), + enumKind == EnumKind::Unknown ? ConstantConvertKind::Construction + : ConstantConvertKind::None, + isStatic, decl); Impl.ImportedDecls[{decl->getCanonicalDecl(), getVersion()}] = result; // If this is a compatibility stub, mark it as such. @@ -3678,7 +3661,7 @@ namespace { return nullptr; } } - + llvm_unreachable("Invalid EnumKind."); } @@ -3738,7 +3721,9 @@ namespace { assert(((decl->getNumParams() == argNames.size() + 1) || isAccessor) && (*selfIdx < decl->getNumParams()) && "where's self?"); } else { - assert(decl->getNumParams() == argNames.size() || isAccessor); + unsigned numParamsAdjusted = + decl->getNumParams() + (decl->isVariadic() ? 1 : 0); + assert(numParamsAdjusted == argNames.size() || isAccessor); } SmallVector nonSelfParams; @@ -3888,6 +3873,10 @@ namespace { bodyParams = getNonSelfParamList(dc, decl, selfIdx, name.getArgumentNames(), allowNSUIntegerAsInt, !name, templateParams); + // If we can't import a param for some reason (ex. it's a dependent + // type), bail. + if (!bodyParams) + return nullptr; importedType = Impl.importFunctionReturnType(dc, decl, allowNSUIntegerAsInt); diff --git a/lib/ClangImporter/ImportName.cpp b/lib/ClangImporter/ImportName.cpp index 156e63e3fd5c6..005c2fa3bbe3f 100644 --- a/lib/ClangImporter/ImportName.cpp +++ b/lib/ClangImporter/ImportName.cpp @@ -539,12 +539,26 @@ determineFactoryInitializerKind(const clang::ObjCMethodDecl *method) { } namespace { +/// Describes the details of any swift_name or swift_async_name +/// attribute found via +struct AnySwiftNameAttr { + /// The name itself. + StringRef name; + + /// Whether this was a swift_async_name attribute. + bool isAsync; + + friend bool operator==(AnySwiftNameAttr lhs, AnySwiftNameAttr rhs) { + return lhs.name == rhs.name && lhs.isAsync == rhs.isAsync; + } +}; + /// Aggregate struct for the common members of clang::SwiftVersionedAttr and /// clang::SwiftVersionedRemovalAttr. /// /// For a SwiftVersionedRemovalAttr, the Attr member will be null. struct VersionedSwiftNameInfo { - const clang::SwiftNameAttr *Attr; + Optional Attr; llvm::VersionTuple Version; bool IsReplacedByActive; }; @@ -594,8 +608,7 @@ checkVersionedSwiftName(VersionedSwiftNameInfo info, return VersionedSwiftNameAction::Use; } - -static const clang::SwiftNameAttr * +static Optional findSwiftNameAttr(const clang::Decl *decl, ImportNameVersion version) { #ifndef NDEBUG if (Optional def = getDefinitionForClangTypeDecl(decl)) { @@ -605,7 +618,24 @@ findSwiftNameAttr(const clang::Decl *decl, ImportNameVersion version) { #endif if (version == ImportNameVersion::raw()) - return nullptr; + return None; + + /// Decode the given Clang attribute to try to determine whether it is + /// a Swift name attribute. + auto decodeAttr = + [&](const clang::Attr *attr) -> Optional { + if (version.supportsConcurrency()) { + if (auto asyncAttr = dyn_cast(attr)) { + return AnySwiftNameAttr { asyncAttr->getName(), /*isAsync=*/true }; + } + } + + if (auto nameAttr = dyn_cast(attr)) { + return AnySwiftNameAttr { nameAttr->getName(), /*isAsync=*/false }; + } + + return None; + }; // Handle versioned API notes for Swift 3 and later. This is the common case. if (version > ImportNameVersion::swift2()) { @@ -615,15 +645,22 @@ findSwiftNameAttr(const clang::Decl *decl, ImportNameVersion version) { if (importer::isSpecialUIKitStructZeroProperty(namedDecl)) version = ImportNameVersion::swift4_2(); - const auto *activeAttr = decl->getAttr(); - const clang::SwiftNameAttr *result = activeAttr; + // Dig out the attribute that specifies the Swift name. + Optional activeAttr; + if (auto asyncAttr = decl->getAttr()) + activeAttr = decodeAttr(asyncAttr); + if (!activeAttr) { + if (auto nameAttr = decl->getAttr()) + activeAttr = decodeAttr(nameAttr); + } + + Optional result = activeAttr; llvm::VersionTuple bestSoFar; for (auto *attr : decl->attrs()) { VersionedSwiftNameInfo info; if (auto *versionedAttr = dyn_cast(attr)) { - auto *added = - dyn_cast(versionedAttr->getAttrToAdd()); + auto added = decodeAttr(versionedAttr->getAttrToAdd()); if (!added) continue; @@ -634,7 +671,7 @@ findSwiftNameAttr(const clang::Decl *decl, ImportNameVersion version) { dyn_cast(attr)) { if (removeAttr->getAttrKindToRemove() != clang::attr::SwiftName) continue; - info = {nullptr, removeAttr->getVersion(), + info = {None, removeAttr->getVersion(), removeAttr->getIsReplacedByActive()}; } else { @@ -673,11 +710,11 @@ findSwiftNameAttr(const clang::Decl *decl, ImportNameVersion version) { // The remainder of this function emulates the limited form of swift_name // supported in Swift 2. auto attr = decl->getAttr(); - if (!attr) return nullptr; + if (!attr) return None; // API notes produce attributes with no source location; ignore them because // they weren't used for naming in Swift 2. - if (attr->getLocation().isInvalid()) return nullptr; + if (attr->getLocation().isInvalid()) return None; // Hardcode certain kinds of explicitly-written Swift names that were // permitted and used in Swift 2. All others are ignored, so that we are @@ -686,8 +723,8 @@ findSwiftNameAttr(const clang::Decl *decl, ImportNameVersion version) { if (auto enumerator = dyn_cast(decl)) { // Foundation's NSXMLDTDKind had an explicit swift_name attribute in // Swift 2. Honor it. - if (enumerator->getName() == "NSXMLDTDKind") return attr; - return nullptr; + if (enumerator->getName() == "NSXMLDTDKind") return decodeAttr(attr); + return None; } if (auto method = dyn_cast(decl)) { @@ -695,19 +732,19 @@ findSwiftNameAttr(const clang::Decl *decl, ImportNameVersion version) { if (attr->getName().startswith("init(")) { // If we have a class method, honor the annotation to turn a class // method into an initializer. - if (method->isClassMethod()) return attr; + if (method->isClassMethod()) return decodeAttr(attr); - return nullptr; + return None; } // Special case: preventing a mapping to an initializer. if (matchFactoryAsInitName(method) && determineFactoryInitializerKind(method)) - return attr; + return decodeAttr(attr); - return nullptr; + return None; } - return nullptr; + return None; } /// Determine whether the given class method should be imported as @@ -716,8 +753,8 @@ static FactoryAsInitKind getFactoryAsInit(const clang::ObjCInterfaceDecl *classDecl, const clang::ObjCMethodDecl *method, ImportNameVersion version) { - if (auto *customNameAttr = findSwiftNameAttr(method, version)) { - if (customNameAttr->getName().startswith("init(")) + if (auto customNameAttr = findSwiftNameAttr(method, version)) { + if (customNameAttr->name.startswith("init(")) return FactoryAsInitKind::AsInitializer; else return FactoryAsInitKind::AsClassMethod; @@ -1135,8 +1172,12 @@ Optional NameImporter::considerErrorImport( /// Whether the given parameter name identifies a completion handler. static bool isCompletionHandlerParamName(StringRef paramName) { - return paramName == "completionHandler" || paramName == "completion" || - paramName == "withCompletionHandler" || paramName == "withCompletion"; + return paramName == "completionHandler" || + paramName == "withCompletionHandler" || + paramName == "completion" || paramName == "withCompletion" || + paramName == "completionBlock" || paramName == "withCompletionBlock" || + paramName == "reply" || paramName == "withReply" || + paramName == "replyTo" || paramName == "withReplyTo"; } // Determine whether the given type is a nullable NSError type. @@ -1172,17 +1213,23 @@ NameImporter::considerAsyncImport( StringRef baseName, SmallVectorImpl ¶mNames, ArrayRef params, - bool isInitializer, bool hasCustomName, + bool isInitializer, CustomAsyncName customName, Optional errorInfo) { // If there are no unclaimed parameters, there's no . unsigned errorParamAdjust = errorInfo ? 1 : 0; if (params.size() - errorParamAdjust == 0) return None; + // When there is a custom async name, it will have removed the completion + // handler parameter already. + unsigned customAsyncNameAdjust = + customName == CustomAsyncName::SwiftAsyncName ? 1 : 0; + // If the # of parameter names doesn't line up with the # of parameters, // bail out. There are extra C parameters on the method or a custom name // was incorrect. - if (params.size() != paramNames.size() + errorParamAdjust) + if (params.size() != + paramNames.size() + errorParamAdjust + customAsyncNameAdjust) return None; // The last parameter will be the completion handler for an async function. @@ -1191,20 +1238,37 @@ NameImporter::considerAsyncImport( // Determine whether the naming indicates that this is a completion // handler. - if (isCompletionHandlerParamName( - paramNames[completionHandlerParamNameIndex]) || - (completionHandlerParamNameIndex > 0 && - stripWithCompletionHandlerSuffix( - paramNames[completionHandlerParamNameIndex]))) { - // The argument label itself has an appropriate name. - } else if (!hasCustomName && completionHandlerParamIndex == 0 && - stripWithCompletionHandlerSuffix(baseName)) { - // The base name implies that the first parameter is a completion handler. - } else if (isCompletionHandlerParamName( - params[completionHandlerParamIndex]->getName())) { - // The parameter has an appropriate name. - } else { + switch (customName) { + case CustomAsyncName::None: + // Check whether the first parameter is the completion handler and the + // base name has a suitable completion-handler suffix. + if (completionHandlerParamIndex == 0 && + stripWithCompletionHandlerSuffix(baseName)) + break; + + LLVM_FALLTHROUGH; + + case CustomAsyncName::SwiftName: + // Check whether the argument label itself has an appropriate name. + if (isCompletionHandlerParamName( + paramNames[completionHandlerParamNameIndex]) || + (completionHandlerParamNameIndex > 0 && + stripWithCompletionHandlerSuffix( + paramNames[completionHandlerParamNameIndex]))) { + break; + } + + // Check whether the parameter itself has a name that indicates that + // it is a completion handelr. + if (isCompletionHandlerParamName( + params[completionHandlerParamIndex]->getName())) + break; + return None; + + case CustomAsyncName::SwiftAsyncName: + // Having a custom async name implies that this is a completion handler. + break; } // Used for returns once we've determined that the method cannot be @@ -1284,8 +1348,16 @@ NameImporter::considerAsyncImport( break; } - // Drop the completion handler parameter name. - paramNames.erase(paramNames.begin() + completionHandlerParamNameIndex); + // Drop the completion handler parameter name when needed. + switch (customName) { + case CustomAsyncName::None: + case CustomAsyncName::SwiftName: + paramNames.erase(paramNames.begin() + completionHandlerParamNameIndex); + break; + + case CustomAsyncName::SwiftAsyncName: + break; + } return ForeignAsyncConvention::Info( completionHandlerParamIndex, completionHandlerErrorParamIndex); @@ -1449,11 +1521,11 @@ ImportedName NameImporter::importNameImpl(const clang::NamedDecl *D, } // If we have a swift_name attribute, use that. - if (auto *nameAttr = findSwiftNameAttr(D, version)) { + if (auto nameAttr = findSwiftNameAttr(D, version)) { bool skipCustomName = false; // Parse the name. - ParsedDeclName parsedName = parseDeclName(nameAttr->getName()); + ParsedDeclName parsedName = parseDeclName(nameAttr->name); if (!parsedName || parsedName.isOperator()) return result; @@ -1528,7 +1600,9 @@ ImportedName NameImporter::importNameImpl(const clang::NamedDecl *D, if (version.supportsConcurrency()) { if (auto asyncInfo = considerAsyncImport( method, parsedName.BaseName, parsedName.ArgumentLabels, - params, isInitializer, /*hasCustomName=*/true, + params, isInitializer, + nameAttr->isAsync ? CustomAsyncName::SwiftAsyncName + : CustomAsyncName::SwiftName, result.getErrorInfo())) { result.info.hasAsyncInfo = true; result.info.asyncInfo = *asyncInfo; @@ -1537,6 +1611,10 @@ ImportedName NameImporter::importNameImpl(const clang::NamedDecl *D, result.declName = formDeclName( swiftCtx, parsedName.BaseName, parsedName.ArgumentLabels, /*isFunction=*/true, isInitializer); + } else if (nameAttr->isAsync) { + // The custom name was for an async import, but we didn't in fact + // import as async for some reason. Ignore this import. + return ImportedName(); } } } @@ -1812,7 +1890,7 @@ ImportedName NameImporter::importNameImpl(const clang::NamedDecl *D, result.info.accessorKind == ImportedAccessorKind::None) { if (auto asyncInfo = considerAsyncImport( objcMethod, baseName, argumentNames, params, isInitializer, - /*hasCustomName=*/false, result.getErrorInfo())) { + CustomAsyncName::None, result.getErrorInfo())) { result.info.hasAsyncInfo = true; result.info.asyncInfo = *asyncInfo; } diff --git a/lib/ClangImporter/ImportName.h b/lib/ClangImporter/ImportName.h index a35fb6760e6ba..567530f9d9188 100644 --- a/lib/ClangImporter/ImportName.h +++ b/lib/ClangImporter/ImportName.h @@ -335,6 +335,17 @@ class ImportedName { /// in "Notification", or it there would be nothing left. StringRef stripNotification(StringRef name); +/// Describes how a custom name was provided for 'async' import. +enum class CustomAsyncName { + /// No custom name was provided. + None, + /// A custom swift_name (but not swift_async_name) was provided. + SwiftName, + /// A custom swift_async_name was provided, which won't have a completion + /// handler argument label. + SwiftAsyncName, +}; + /// Class to determine the Swift name of foreign entities. Currently fairly /// stateless and borrows from the ClangImporter::Implementation, but in the /// future will be more self-contained and encapsulated. @@ -458,7 +469,7 @@ class NameImporter { StringRef baseName, SmallVectorImpl ¶mNames, ArrayRef params, - bool isInitializer, bool hasCustomName, + bool isInitializer, CustomAsyncName customName, Optional errorInfo); EffectiveClangContext determineEffectiveContext(const clang::NamedDecl *, diff --git a/lib/ClangImporter/ImportType.cpp b/lib/ClangImporter/ImportType.cpp index afb547ce4229f..82212344f5162 100644 --- a/lib/ClangImporter/ImportType.cpp +++ b/lib/ClangImporter/ImportType.cpp @@ -195,13 +195,18 @@ namespace { ClangImporter::Implementation &Impl; bool AllowNSUIntegerAsInt; Bridgeability Bridging; + const clang::FunctionType *CompletionHandlerType; + Optional CompletionHandlerErrorParamIndex; public: SwiftTypeConverter(ClangImporter::Implementation &impl, bool allowNSUIntegerAsInt, - Bridgeability bridging) + Bridgeability bridging, + const clang::FunctionType *completionHandlerType, + Optional completionHandlerErrorParamIndex) : Impl(impl), AllowNSUIntegerAsInt(allowNSUIntegerAsInt), - Bridging(bridging) {} + Bridging(bridging), CompletionHandlerType(completionHandlerType), + CompletionHandlerErrorParamIndex(completionHandlerErrorParamIndex) {} using TypeVisitor::Visit; ImportResult Visit(clang::QualType type) { @@ -612,8 +617,19 @@ namespace { for (auto param = type->param_type_begin(), paramEnd = type->param_type_end(); param != paramEnd; ++param) { + // Determine whether we have a result parameter of a completion + // handler that can also express a thrown error. + ImportTypeKind paramImportKind = ImportTypeKind::Parameter; + unsigned paramIdx = param - type->param_type_begin(); + if (CompletionHandlerType && + Impl.getClangASTContext().hasSameType( + CompletionHandlerType, type) && + paramIdx != CompletionHandlerErrorParamIndex) { + paramImportKind = ImportTypeKind::CompletionHandlerResultParameter; + } + auto swiftParamTy = Impl.importTypeIgnoreIUO( - *param, ImportTypeKind::Parameter, AllowNSUIntegerAsInt, Bridging, + *param, paramImportKind, AllowNSUIntegerAsInt, Bridging, OTK_Optional); if (!swiftParamTy) return Type(); @@ -1191,6 +1207,7 @@ static bool canBridgeTypes(ImportTypeKind importKind) { case ImportTypeKind::Result: case ImportTypeKind::AuditedResult: case ImportTypeKind::Parameter: + case ImportTypeKind::CompletionHandlerResultParameter: case ImportTypeKind::CFRetainedOutParameter: case ImportTypeKind::CFUnretainedOutParameter: case ImportTypeKind::Property: @@ -1218,6 +1235,7 @@ static bool isCFAudited(ImportTypeKind importKind) { case ImportTypeKind::AuditedVariable: case ImportTypeKind::AuditedResult: case ImportTypeKind::Parameter: + case ImportTypeKind::CompletionHandlerResultParameter: case ImportTypeKind::CFRetainedOutParameter: case ImportTypeKind::CFUnretainedOutParameter: case ImportTypeKind::Property: @@ -1520,7 +1538,8 @@ static ImportedType adjustTypeForConcreteImport( ImportedType ClangImporter::Implementation::importType( clang::QualType type, ImportTypeKind importKind, bool allowNSUIntegerAsInt, Bridgeability bridging, OptionalTypeKind optionality, - bool resugarNSErrorPointer) { + bool resugarNSErrorPointer, + Optional completionHandlerErrorParamIndex) { if (type.isNull()) return {Type(), false}; @@ -1555,11 +1574,28 @@ ImportedType ClangImporter::Implementation::importType( // If nullability is provided as part of the type, that overrides // optionality provided externally. if (auto nullability = type->getNullability(clangContext)) { - optionality = translateNullability(*nullability); + bool stripNonResultOptionality = + importKind == ImportTypeKind::CompletionHandlerResultParameter; + + optionality = translateNullability(*nullability, stripNonResultOptionality); + } + + // If this is a completion handler parameter, record the function type whose + // parameters will act as the results of the completion handler. + const clang::FunctionType *completionHandlerType = nullptr; + if (completionHandlerErrorParamIndex) { + if (auto blockPtrType = type->getAs()) { + completionHandlerType = + blockPtrType->getPointeeType()->castAs(); + + type = clang::QualType(blockPtrType, 0); + } } // Perform abstract conversion, ignoring how the type is actually used. - SwiftTypeConverter converter(*this, allowNSUIntegerAsInt, bridging); + SwiftTypeConverter converter( + *this, allowNSUIntegerAsInt, bridging, + completionHandlerType, completionHandlerErrorParamIndex); auto importResult = converter.Visit(type); // Now fix up the type based on how we're concretely using it. @@ -2085,13 +2121,7 @@ static Type decomposeCompletionHandlerType( paramIdx == *info.completionHandlerErrorParamIndex()) continue; - // If there is an error parameter, remove nullability. - Type paramType = param.getPlainType(); - // TODO: Clang should gain a nullability form that overrides this. - if (info.completionHandlerErrorParamIndex()) - paramType = paramType->lookThroughAllOptionalTypes(); - - resultTypeElts.push_back(paramType); + resultTypeElts.push_back(param.getPlainType()); } switch (resultTypeElts.size()) { @@ -2266,6 +2296,7 @@ ImportedType ClangImporter::Implementation::importMethodParamsAndReturnType( } // Special case for NSDictionary's subscript. + ImportTypeKind importKind = ImportTypeKind::Parameter; Type swiftParamTy; bool paramIsIUO; if (kind == SpecialMethodKind::NSDictionarySubscriptGetter && @@ -2276,12 +2307,19 @@ ImportedType ClangImporter::Implementation::importMethodParamsAndReturnType( paramIsIUO = optionalityOfParam == OTK_ImplicitlyUnwrappedOptional; } else { - ImportTypeKind importKind = ImportTypeKind::Parameter; if (param->hasAttr()) importKind = ImportTypeKind::CFRetainedOutParameter; else if (param->hasAttr()) importKind = ImportTypeKind::CFUnretainedOutParameter; + // Figure out if this is a completion handler parameter whose error + // parameter is used to indicate throwing. + Optional completionHandlerErrorParamIndex; + if (paramIsCompletionHandler) { + completionHandlerErrorParamIndex = + asyncInfo->completionHandlerErrorParamIndex(); + } + // If this is the throws error parameter, we don't need to convert any // NSError** arguments to the sugared NSErrorPointer typealias form, // because all that is done with it is retrieving the canonical @@ -2293,7 +2331,8 @@ ImportedType ClangImporter::Implementation::importMethodParamsAndReturnType( auto importedParamType = importType(paramTy, importKind, allowNSUIntegerAsIntInParam, Bridgeability::Full, optionalityOfParam, - /*resugarNSErrorPointer=*/!paramIsError); + /*resugarNSErrorPointer=*/!paramIsError, + completionHandlerErrorParamIndex); paramIsIUO = importedParamType.isImplicitlyUnwrapped(); swiftParamTy = importedParamType.getType(); } @@ -2321,7 +2360,14 @@ ImportedType ClangImporter::Implementation::importMethodParamsAndReturnType( if (Type replacedSwiftResultTy = decomposeCompletionHandlerType(swiftParamTy, *asyncInfo)) { swiftResultTy = replacedSwiftResultTy; - completionHandlerType = swiftParamTy->getCanonicalType(); + + // Import the original completion handler type without adjustments. + Type origSwiftParamTy = importType( + paramTy, importKind, allowNSUIntegerAsIntInParam, + Bridgeability::Full, optionalityOfParam, + /*resugarNSErrorPointer=*/!paramIsError, None).getType(); + completionHandlerType = mapGenericArgs(origDC, dc, origSwiftParamTy) + ->getCanonicalType(); continue; } diff --git a/lib/ClangImporter/ImporterImpl.h b/lib/ClangImporter/ImporterImpl.h index 6140dd7bfab94..264353b86a303 100644 --- a/lib/ClangImporter/ImporterImpl.h +++ b/lib/ClangImporter/ImporterImpl.h @@ -142,6 +142,13 @@ enum class ImportTypeKind { /// Parameters are always considered CF-audited. Parameter, + /// Import the type of a parameter to a completion handler that can indicate + /// a thrown error. + /// + /// Special handling: + /// * _Nullable_result is treated as _Nonnull rather than _Nullable_result. + CompletionHandlerResultParameter, + /// Import the type of a parameter declared with /// \c CF_RETURNS_RETAINED. /// @@ -1036,7 +1043,8 @@ class LLVM_LIBRARY_VISIBILITY ClangImporter::Implementation importType(clang::QualType type, ImportTypeKind kind, bool allowNSUIntegerAsInt, Bridgeability topLevelBridgeability, OptionalTypeKind optional = OTK_ImplicitlyUnwrappedOptional, - bool resugarNSErrorPointer = true); + bool resugarNSErrorPointer = true, + Optional completionHandlerErrorParamIndex = None); /// Import the given Clang type into Swift. /// diff --git a/lib/ClangImporter/Serializability.cpp b/lib/ClangImporter/Serializability.cpp index 2606964d6d078..067336af04d02 100644 --- a/lib/ClangImporter/Serializability.cpp +++ b/lib/ClangImporter/Serializability.cpp @@ -53,8 +53,10 @@ class SerializationPathFinder { Impl.SwiftContext.getSwiftDeclForExportedClangDecl(decl)) return swiftDecl; - // Otherwise we have no way to find it. - return StableSerializationPath(); + // Allow serialization for non-modular headers as well, with the hope that + // we find the same header when doing unqualified lookup during + // deserialization. + return findImportedPath(named); } private: diff --git a/lib/Demangling/Demangler.cpp b/lib/Demangling/Demangler.cpp index f80f9c677bba0..6e7b133fb4255 100644 --- a/lib/Demangling/Demangler.cpp +++ b/lib/Demangling/Demangler.cpp @@ -108,6 +108,7 @@ bool swift::Demangle::isFunctionAttr(Node::Kind kind) { case Node::Kind::GenericSpecializationNotReAbstracted: case Node::Kind::GenericPartialSpecialization: case Node::Kind::GenericPartialSpecializationNotReAbstracted: + case Node::Kind::GenericSpecializationInResilienceDomain: case Node::Kind::ObjCAttribute: case Node::Kind::NonObjCAttribute: case Node::Kind::DynamicAttribute: @@ -1179,6 +1180,14 @@ NodePointer Demangler::demangleBuiltinType() { Ty = createNode(Node::Kind::BuiltinTypeName, BUILTIN_TYPE_NAME_RAWPOINTER); break; + case 'j': + Ty = createNode(Node::Kind::BuiltinTypeName, + BUILTIN_TYPE_NAME_JOB); + break; + case 'c': + Ty = createNode(Node::Kind::BuiltinTypeName, + BUILTIN_TYPE_NAME_RAWUNSAFECONTINUATION); + break; case 't': Ty = createNode(Node::Kind::BuiltinTypeName, BUILTIN_TYPE_NAME_SILTOKEN); break; @@ -2309,6 +2318,9 @@ NodePointer Demangler::popProtocolConformance() { case 'G': return demangleGenericSpecialization(Node::Kind:: GenericSpecializationNotReAbstracted); + case 'B': + return demangleGenericSpecialization(Node::Kind:: + GenericSpecializationInResilienceDomain); case 's': return demangleGenericSpecialization( Node::Kind::GenericSpecializationPrespecialized); diff --git a/lib/Demangling/NodePrinter.cpp b/lib/Demangling/NodePrinter.cpp index a2410a0afbf93..b3da6cef0c555 100644 --- a/lib/Demangling/NodePrinter.cpp +++ b/lib/Demangling/NodePrinter.cpp @@ -388,6 +388,7 @@ class NodePrinter { case Node::Kind::GenericPartialSpecializationNotReAbstracted: case Node::Kind::GenericSpecialization: case Node::Kind::GenericSpecializationNotReAbstracted: + case Node::Kind::GenericSpecializationInResilienceDomain: case Node::Kind::GenericSpecializationParam: case Node::Kind::GenericSpecializationPrespecialized: case Node::Kind::InlinedGenericFunction: @@ -1415,6 +1416,7 @@ NodePointer NodePrinter::print(NodePointer Node, bool asPrefixContext) { "generic not-reabstracted partial specialization", "Signature = "); return nullptr; case Node::Kind::GenericSpecialization: + case Node::Kind::GenericSpecializationInResilienceDomain: printSpecializationPrefix(Node, "generic specialization"); return nullptr; case Node::Kind::GenericSpecializationPrespecialized: diff --git a/lib/Demangling/OldRemangler.cpp b/lib/Demangling/OldRemangler.cpp index 747954cf2e169..b29e8c842a690 100644 --- a/lib/Demangling/OldRemangler.cpp +++ b/lib/Demangling/OldRemangler.cpp @@ -369,6 +369,9 @@ void Remangler::mangleGenericSpecializationPrespecialized(Node *node) { void Remangler::mangleGenericSpecializationNotReAbstracted(Node *node) { unreachable("unsupported"); } +void Remangler::mangleGenericSpecializationInResilienceDomain(Node *node) { + unreachable("unsupported"); +} void Remangler::mangleInlinedGenericFunction(Node *node) { unreachable("unsupported"); diff --git a/lib/Demangling/Remangler.cpp b/lib/Demangling/Remangler.cpp index d55d8e77583e3..5fde0aab6b50a 100644 --- a/lib/Demangling/Remangler.cpp +++ b/lib/Demangling/Remangler.cpp @@ -300,6 +300,7 @@ class Remangler : public RemanglerBase { mangleChildNodesReversed(FuncType); } + void mangleGenericSpecializationNode(Node *node, const char *operatorStr); void mangleAnyNominalType(Node *node); void mangleAnyGenericType(Node *node, StringRef TypeOp); void mangleGenericArgs(Node *node, char &Separator, @@ -743,6 +744,10 @@ void Remangler::mangleBuiltinTypeName(Node *node) { Buffer << 'o'; } else if (text == BUILTIN_TYPE_NAME_RAWPOINTER) { Buffer << 'p'; + } else if (text == BUILTIN_TYPE_NAME_RAWUNSAFECONTINUATION) { + Buffer << 'c'; + } else if (text == BUILTIN_TYPE_NAME_JOB) { + Buffer << 'j'; } else if (text == BUILTIN_TYPE_NAME_SILTOKEN) { Buffer << 't'; } else if (text == BUILTIN_TYPE_NAME_INTLITERAL) { @@ -1294,7 +1299,8 @@ void Remangler::mangleGenericPartialSpecializationNotReAbstracted(Node *node) { mangleGenericPartialSpecialization(node); } -void Remangler::mangleGenericSpecialization(Node *node) { +void Remangler:: +mangleGenericSpecializationNode(Node *node, const char *operatorStr) { bool FirstParam = true; for (NodePointer Child : *node) { if (Child->getKind() == Node::Kind::GenericSpecializationParam) { @@ -1304,22 +1310,7 @@ void Remangler::mangleGenericSpecialization(Node *node) { } assert(!FirstParam && "generic specialization with no substitutions"); - switch (node->getKind()) { - case Node::Kind::GenericSpecialization: - Buffer << "Tg"; - break; - case Node::Kind::GenericSpecializationPrespecialized: - Buffer << "Ts"; - break; - case Node::Kind::GenericSpecializationNotReAbstracted: - Buffer << "TG"; - break; - case Node::Kind::InlinedGenericFunction: - Buffer << "Ti"; - break; - default: - unreachable("unsupported node"); - } + Buffer << operatorStr; for (NodePointer Child : *node) { if (Child->getKind() != Node::Kind::GenericSpecializationParam) @@ -1327,16 +1318,24 @@ void Remangler::mangleGenericSpecialization(Node *node) { } } +void Remangler::mangleGenericSpecialization(Node *node) { + mangleGenericSpecializationNode(node, "Tg"); +} + void Remangler::mangleGenericSpecializationPrespecialized(Node *node) { - mangleGenericSpecialization(node); + mangleGenericSpecializationNode(node, "Ts"); } void Remangler::mangleGenericSpecializationNotReAbstracted(Node *node) { - mangleGenericSpecialization(node); + mangleGenericSpecializationNode(node, "TG"); +} + +void Remangler::mangleGenericSpecializationInResilienceDomain(Node *node) { + mangleGenericSpecializationNode(node, "TB"); } void Remangler::mangleInlinedGenericFunction(Node *node) { - mangleGenericSpecialization(node); + mangleGenericSpecializationNode(node, "Ti"); } @@ -1368,6 +1367,7 @@ void Remangler::mangleGlobal(Node *node) { case Node::Kind::GenericSpecialization: case Node::Kind::GenericSpecializationPrespecialized: case Node::Kind::GenericSpecializationNotReAbstracted: + case Node::Kind::GenericSpecializationInResilienceDomain: case Node::Kind::InlinedGenericFunction: case Node::Kind::GenericPartialSpecialization: case Node::Kind::GenericPartialSpecializationNotReAbstracted: diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp index 10c418565888f..79d7e9584ebde 100644 --- a/lib/Driver/Compilation.cpp +++ b/lib/Driver/Compilation.cpp @@ -244,7 +244,7 @@ namespace driver { std::unique_ptr TQ; /// Cumulative result of PerformJobs(), accumulated from subprocesses. - int Result = EXIT_SUCCESS; + int ResultCode = EXIT_SUCCESS; /// True if any Job crashed. bool AnyAbnormalExit = false; @@ -488,13 +488,6 @@ namespace driver { reloadAndRemarkDepsOnNormalExit(const Job *FinishedCmd, const bool cmdFailed, const bool forRanges, StringRef DependenciesFile) { - return reloadAndRemarkFineGrainedDepsOnNormalExit( - FinishedCmd, cmdFailed, forRanges, DependenciesFile); - } - - std::vector reloadAndRemarkFineGrainedDepsOnNormalExit( - const Job *FinishedCmd, const bool cmdFailed, const bool forRanges, - StringRef DependenciesFile) { const auto changedNodes = getFineGrainedDepGraph(forRanges).loadFromPath( FinishedCmd, DependenciesFile, Comp.getDiags()); const bool loadFailed = !changedNodes; @@ -726,8 +719,8 @@ namespace driver { // Store this task's ReturnCode as our Result if we haven't stored // anything yet. - if (Result == EXIT_SUCCESS) - Result = ReturnCode; + if (ResultCode == EXIT_SUCCESS) + ResultCode = ReturnCode; if (!isa(FinishedCmd->getSource()) || ReturnCode != EXIT_FAILURE) { @@ -832,7 +825,7 @@ namespace driver { } // Since the task signalled, unconditionally set result to -2. - Result = -2; + ResultCode = -2; AnyAbnormalExit = true; return TaskFinishedResponse::StopExecution; @@ -1543,7 +1536,7 @@ namespace driver { _3, _4, _5, _6), std::bind(&PerformJobsState::taskSignalled, this, _1, _2, _3, _4, _5, _6, _7))) { - if (Result == EXIT_SUCCESS) { + if (ResultCode == EXIT_SUCCESS) { // FIXME: Error from task queue while Result == EXIT_SUCCESS most // likely means some fork/exec or posix_spawn failed; TaskQueue saw // "an error" at some stage before even calling us with a process @@ -1553,7 +1546,7 @@ namespace driver { Comp.getDiags().diagnose(SourceLoc(), diag::error_unable_to_execute_command, ""); - Result = -2; + ResultCode = -2; AnyAbnormalExit = true; return; } @@ -1561,13 +1554,13 @@ namespace driver { // Returning without error from TaskQueue::execute should mean either an // empty TaskQueue or a failed subprocess. - assert(!(Result == 0 && TQ->hasRemainingTasks())); + assert(!(ResultCode == 0 && TQ->hasRemainingTasks())); // Task-exit callbacks from TaskQueue::execute may have unblocked jobs, // which means there might be PendingExecution jobs to enqueue here. If // there are, we need to continue trying to make progress on the // TaskQueue before we start marking deferred jobs as skipped, below. - if (!PendingExecution.empty() && Result == 0) { + if (!PendingExecution.empty() && ResultCode == 0) { formBatchJobsAndAddPendingJobsToTaskQueue(); continue; } @@ -1592,11 +1585,11 @@ namespace driver { // If we added jobs to the TaskQueue, and we are not in an error state, // we want to give the TaskQueue another run. - } while (Result == 0 && TQ->hasRemainingTasks()); + } while (ResultCode == 0 && TQ->hasRemainingTasks()); } void checkUnfinishedJobs() { - if (Result == 0) { + if (ResultCode == 0) { assert(BlockingCommands.empty() && "some blocking commands never finished properly"); } else { @@ -1700,10 +1693,14 @@ namespace driver { }); } - int getResult() { - if (Result == 0) - Result = Comp.getDiags().hadAnyError(); - return Result; + Compilation::Result takeResult() && { + if (ResultCode == 0) + ResultCode = Comp.getDiags().hadAnyError(); + const bool forRanges = Comp.getEnableSourceRangeDependencies(); + const bool hadAbnormalExit = hadAnyAbnormalExit(); + const auto resultCode = ResultCode; + auto &&graph = std::move(*this).takeFineGrainedDepGraph(forRanges); + return Compilation::Result{hadAbnormalExit, resultCode, std::move(graph)}; } bool hadAnyAbnormalExit() { @@ -1763,6 +1760,12 @@ namespace driver { getFineGrainedDepGraph(const bool forRanges) const { return forRanges ? FineGrainedDepGraphForRanges : FineGrainedDepGraph; } + + fine_grained_dependencies::ModuleDepGraph && + takeFineGrainedDepGraph(const bool forRanges) && { + return forRanges ? std::move(FineGrainedDepGraphForRanges) + : std::move(FineGrainedDepGraph); + } }; } // namespace driver } // namespace swift @@ -1943,8 +1946,8 @@ static bool writeFilelistIfNecessary(const Job *job, const ArgList &args, return ok; } -int Compilation::performJobsImpl(bool &abnormalExit, - std::unique_ptr &&TQ) { +Compilation::Result +Compilation::performJobsImpl(std::unique_ptr &&TQ) { PerformJobsState State(*this, std::move(TQ)); State.runJobs(); @@ -1953,20 +1956,23 @@ int Compilation::performJobsImpl(bool &abnormalExit, InputInfoMap InputInfo; State.populateInputInfoMap(InputInfo); checkForOutOfDateInputs(Diags, InputInfo); + + auto result = std::move(State).takeResult(); writeCompilationRecord(CompilationRecordPath, ArgsHash, BuildStartTime, InputInfo); + return result; + } else { + return std::move(State).takeResult(); } - abnormalExit = State.hadAnyAbnormalExit(); - return State.getResult(); } -int Compilation::performSingleCommand(const Job *Cmd) { +Compilation::Result Compilation::performSingleCommand(const Job *Cmd) { assert(Cmd->getInputs().empty() && "This can only be used to run a single command with no inputs"); switch (Cmd->getCondition()) { case Job::Condition::CheckDependencies: - return 0; + return Compilation::Result::code(0); case Job::Condition::RunWithoutCascading: case Job::Condition::Always: case Job::Condition::NewlyAdded: @@ -1974,7 +1980,7 @@ int Compilation::performSingleCommand(const Job *Cmd) { } if (!writeFilelistIfNecessary(Cmd, *TranslatedArgs.get(), Diags)) - return 1; + return Compilation::Result::code(1); switch (Level) { case OutputLevel::Normal: @@ -1982,7 +1988,7 @@ int Compilation::performSingleCommand(const Job *Cmd) { break; case OutputLevel::PrintJobs: Cmd->printCommandLineAndEnvironment(llvm::outs()); - return 0; + return Compilation::Result::code(0); case OutputLevel::Verbose: Cmd->printCommandLine(llvm::errs()); break; @@ -2006,11 +2012,12 @@ int Compilation::performSingleCommand(const Job *Cmd) { "expected environment variable to be set successfully"); // Bail out early in release builds. if (envResult != 0) { - return envResult; + return Compilation::Result::code(envResult); } } - return ExecuteInPlace(ExecPath, argv); + const auto returnCode = ExecuteInPlace(ExecPath, argv); + return Compilation::Result::code(returnCode); } static bool writeAllSourcesFile(DiagnosticEngine &diags, StringRef path, @@ -2033,10 +2040,10 @@ static bool writeAllSourcesFile(DiagnosticEngine &diags, StringRef path, return true; } -int Compilation::performJobs(std::unique_ptr &&TQ) { +Compilation::Result Compilation::performJobs(std::unique_ptr &&TQ) { if (AllSourceFilesPath) if (!writeAllSourcesFile(Diags, AllSourceFilesPath, getInputFiles())) - return EXIT_FAILURE; + return Compilation::Result::code(EXIT_FAILURE); // If we don't have to do any cleanup work, just exec the subprocess. if (Level < OutputLevel::Parseable && @@ -2051,20 +2058,19 @@ int Compilation::performJobs(std::unique_ptr &&TQ) { Diags.diagnose(SourceLoc(), diag::warning_parallel_execution_not_supported); } - bool abnormalExit; - int result = performJobsImpl(abnormalExit, std::move(TQ)); + auto result = performJobsImpl(std::move(TQ)); if (IncrementalComparator) IncrementalComparator->outputComparison(); if (!SaveTemps) { for (const auto &pathPair : TempFilePaths) { - if (!abnormalExit || pathPair.getValue() == PreserveOnSignal::No) + if (!result.hadAbnormalExit || pathPair.getValue() == PreserveOnSignal::No) (void)llvm::sys::fs::remove(pathPair.getKey()); } } if (Stats) - Stats->noteCurrentProcessExitStatus(result); + Stats->noteCurrentProcessExitStatus(result.exitCode); return result; } diff --git a/lib/Driver/FineGrainedDependencyDriverGraph.cpp b/lib/Driver/FineGrainedDependencyDriverGraph.cpp index d7cf58be0cb8e..7d1b2d9eea319 100644 --- a/lib/Driver/FineGrainedDependencyDriverGraph.cpp +++ b/lib/Driver/FineGrainedDependencyDriverGraph.cpp @@ -156,25 +156,6 @@ std::vector ModuleDepGraph::findJobsToRecompileWhenWholeJobChanges( return findJobsToRecompileWhenNodesChange(allNodesInJob); } -template -std::vector -ModuleDepGraph::findJobsToRecompileWhenNodesChange(const Nodes &nodes) { - std::vector foundDependents; - for (ModuleDepGraphNode *n : nodes) - findPreviouslyUntracedDependents(foundDependents, n); - return jobsContaining(foundDependents); -} - -template std::vector -ModuleDepGraph::findJobsToRecompileWhenNodesChange< - std::unordered_set>( - const std::unordered_set &); - -template std::vector -ModuleDepGraph::findJobsToRecompileWhenNodesChange< - std::vector>( - const std::vector &); - std::vector ModuleDepGraph::computeSwiftDepsFromNodes( ArrayRef nodes) const { llvm::StringSet<> swiftDepsOfNodes; diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index bdb1d0bc48d52..690df338bdf2b 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -481,6 +481,9 @@ static bool ParseLangArgs(LangOptions &Opts, ArgList &Args, = A->getOption().matches(OPT_enable_objc_attr_requires_foundation_module); } + Opts.EnableExperimentalPrespecialization |= + Args.hasArg(OPT_enable_experimental_prespecialization); + if (auto A = Args.getLastArg(OPT_enable_testable_attr_requires_testable_module, OPT_disable_testable_attr_requires_testable_module)) { Opts.EnableTestableAttrRequiresTestableModule @@ -574,6 +577,8 @@ static bool ParseLangArgs(LangOptions &Opts, ArgList &Args, Opts.EnableCrossImportRemarks = Args.hasArg(OPT_emit_cross_import_remarks); + Opts.EnableModuleLoadingRemarks = Args.hasArg(OPT_remark_loading_module); + llvm::Triple Target = Opts.Target; StringRef TargetArg; std::string TargetArgScratch; diff --git a/lib/Frontend/Frontend.cpp b/lib/Frontend/Frontend.cpp index 205683f9e0a02..15657a92d01f7 100644 --- a/lib/Frontend/Frontend.cpp +++ b/lib/Frontend/Frontend.cpp @@ -599,17 +599,23 @@ bool CompilerInstance::setUpInputs() { const auto &Inputs = Invocation.getFrontendOptions().InputsAndOutputs.getAllInputs(); + const bool shouldRecover = Invocation.getFrontendOptions() + .InputsAndOutputs.shouldRecoverMissingInputs(); + + bool hasFailed = false; for (const InputFile &input : Inputs) { bool failed = false; - Optional bufferID = getRecordedBufferID(input, failed); - if (failed) - return true; + Optional bufferID = + getRecordedBufferID(input, shouldRecover, failed); + hasFailed |= failed; if (!bufferID.hasValue() || !input.isPrimary()) continue; recordPrimaryInputBuffer(*bufferID); } + if (hasFailed) + return true; // Set the primary file to the code-completion point if one exists. if (codeCompletionBufferID.hasValue() && @@ -621,8 +627,9 @@ bool CompilerInstance::setUpInputs() { return false; } -Optional CompilerInstance::getRecordedBufferID(const InputFile &input, - bool &failed) { +Optional +CompilerInstance::getRecordedBufferID(const InputFile &input, + const bool shouldRecover, bool &failed) { if (!input.getBuffer()) { if (Optional existingBufferID = SourceMgr.getIDForBufferIdentifier(input.getFileName())) { @@ -631,6 +638,13 @@ Optional CompilerInstance::getRecordedBufferID(const InputFile &input, } auto buffers = getInputBuffersIfPresent(input); + // Recover by dummy buffer if requested. + if (!buffers.hasValue() && shouldRecover && + input.getType() == file_types::TY_Swift && !input.isPrimary()) { + buffers = ModuleBuffers(llvm::MemoryBuffer::getMemBuffer( + "// missing file\n", input.getFileName())); + } + if (!buffers.hasValue()) { failed = true; return None; diff --git a/lib/Frontend/FrontendInputsAndOutputs.cpp b/lib/Frontend/FrontendInputsAndOutputs.cpp index c2ba20769b103..35c95be61a3a4 100644 --- a/lib/Frontend/FrontendInputsAndOutputs.cpp +++ b/lib/Frontend/FrontendInputsAndOutputs.cpp @@ -38,6 +38,7 @@ FrontendInputsAndOutputs::FrontendInputsAndOutputs( for (InputFile input : other.AllInputs) addInput(input); IsSingleThreadedWMO = other.IsSingleThreadedWMO; + ShouldRecoverMissingInputs = other.ShouldRecoverMissingInputs; } FrontendInputsAndOutputs &FrontendInputsAndOutputs:: @@ -46,6 +47,7 @@ operator=(const FrontendInputsAndOutputs &other) { for (InputFile input : other.AllInputs) addInput(input); IsSingleThreadedWMO = other.IsSingleThreadedWMO; + ShouldRecoverMissingInputs = other.ShouldRecoverMissingInputs; return *this; } diff --git a/lib/IDE/CodeCompletion.cpp b/lib/IDE/CodeCompletion.cpp index 48433efc41bd6..a724b609c33b0 100644 --- a/lib/IDE/CodeCompletion.cpp +++ b/lib/IDE/CodeCompletion.cpp @@ -1677,7 +1677,7 @@ class CodeCompletionCallbacksImpl : public CodeCompletionCallbacks { } // end anonymous namespace namespace { -static bool isTopLevelContext(const DeclContext *DC) { +static bool isTopLevelSubcontext(const DeclContext *DC) { for (; DC && DC->isLocalContext(); DC = DC->getParent()) { switch (DC->getContextKind()) { case DeclContextKind::TopLevelCodeDecl: @@ -2139,7 +2139,7 @@ class CompletionLookup final : public swift::VisibleDeclConsumer { if (CurrDeclContext && D->getModuleContext() == CurrModule) { // Treat global variables from the same source file as local when // completing at top-level. - if (isa(D) && isTopLevelContext(CurrDeclContext) && + if (isa(D) && isTopLevelSubcontext(CurrDeclContext) && D->getDeclContext()->getParentSourceFile() == CurrDeclContext->getParentSourceFile()) { return SemanticContextKind::Local; diff --git a/lib/IDE/Utils.cpp b/lib/IDE/Utils.cpp index f205d658f1246..05a61fe6a3eb8 100644 --- a/lib/IDE/Utils.cpp +++ b/lib/IDE/Utils.cpp @@ -215,6 +215,7 @@ static FrontendInputsAndOutputs resolveSymbolicLinksInInputs( ++primaryCount; } assert(primaryCount < 2 && "cannot handle multiple primaries"); + replacementInputsAndOutputs.addInput( InputFile(newFilename.str(), newIsPrimary, input.getBuffer())); } @@ -310,6 +311,11 @@ bool ide::initCompilerInvocation( resolveSymbolicLinksInInputs( Invocation.getFrontendOptions().InputsAndOutputs, UnresolvedPrimaryFile, FileSystem, Error); + + // SourceKit functionalities want to proceed even if there are missing inputs. + Invocation.getFrontendOptions().InputsAndOutputs + .setShouldRecoverMissingInputs(); + if (!Error.empty()) return true; diff --git a/lib/IRGen/ExtraInhabitants.cpp b/lib/IRGen/ExtraInhabitants.cpp index a97c48e6ea272..75c6d122c14e0 100644 --- a/lib/IRGen/ExtraInhabitants.cpp +++ b/lib/IRGen/ExtraInhabitants.cpp @@ -61,6 +61,16 @@ unsigned irgen::getFunctionPointerExtraInhabitantCount(IRGenModule &IGM) { return getPointerExtraInhabitantCount(IGM, 0); } +unsigned irgen::getAlignedPointerExtraInhabitantCount(IRGenModule &IGM, + Alignment align) { + // For all of the operations on current platforms, we totally ignore + // alignment because we assume we can get what we consider to be an + // adequate number of extra inhabitants from LeastValidPointerValue. + // If we have to revisit that for a future ABI, we can take advantage + // of alignment bits. + return getPointerExtraInhabitantCount(IGM, 0); +} + /*****************************************************************************/ static APInt @@ -99,6 +109,14 @@ APInt irgen::getFunctionPointerFixedExtraInhabitantValue(IRGenModule &IGM, return getPointerFixedExtraInhabitantValue(IGM, bits, index, offset, 0); } +APInt irgen::getAlignedPointerExtraInhabitantValue(IRGenModule &IGM, + Alignment align, + unsigned bits, + unsigned index, + unsigned offset) { + return getPointerFixedExtraInhabitantValue(IGM, bits, index, offset, 0); +} + /*****************************************************************************/ static llvm::Value *getPointerExtraInhabitantIndex(IRGenFunction &IGF, @@ -180,6 +198,12 @@ llvm::Value *irgen::getFunctionPointerExtraInhabitantIndex(IRGenFunction &IGF, return getPointerExtraInhabitantIndex(IGF, src, 0); } +llvm::Value *irgen::getAlignedPointerExtraInhabitantIndex(IRGenFunction &IGF, + Alignment align, + Address src) { + return getPointerExtraInhabitantIndex(IGF, src, 0); +} + /*****************************************************************************/ static void storePointerExtraInhabitant(IRGenFunction &IGF, @@ -214,3 +238,10 @@ void irgen::storeFunctionPointerExtraInhabitant(IRGenFunction &IGF, Address dest) { storePointerExtraInhabitant(IGF, index, dest, 0); } + +void irgen::storeAlignedPointerExtraInhabitant(IRGenFunction &IGF, + Alignment align, + llvm::Value *index, + Address dest) { + storePointerExtraInhabitant(IGF, index, dest, 0); +} diff --git a/lib/IRGen/ExtraInhabitants.h b/lib/IRGen/ExtraInhabitants.h index 3b13d35dc48a1..10bca67108854 100644 --- a/lib/IRGen/ExtraInhabitants.h +++ b/lib/IRGen/ExtraInhabitants.h @@ -27,6 +27,7 @@ namespace swift { namespace irgen { class Address; +class Alignment; class IRGenFunction; class IRGenModule; @@ -62,6 +63,39 @@ void storeHeapObjectExtraInhabitant(IRGenFunction &IGF, /*****************************************************************************/ +/// \group Extra inhabitants of aligned object pointers. + +/// Return the number of extra inhabitant representations for aligned +/// object pointers. +unsigned getAlignedPointerExtraInhabitantCount(IRGenModule &IGM, + Alignment pointeeAlign); + +/// Return an indexed extra inhabitant constant for an aligned pointer. +/// +/// If the pointer appears within a larger aggregate, the 'bits' and 'offset' +/// arguments can be used to position the inhabitant within the larger integer +/// constant. +llvm::APInt getAlignedPointerExtraInhabitantValue(IRGenModule &IGM, + Alignment pointeeAlign, + unsigned bits, + unsigned index, + unsigned offset); + +/// Calculate the index of an aligned pointer extra inhabitant +/// representation stored in memory. +llvm::Value *getAlignedPointerExtraInhabitantIndex(IRGenFunction &IGF, + Alignment pointeeAlign, + Address src); + +/// Calculate an extra inhabitant representation from an index and store it to +/// memory. +void storeAlignedPointerExtraInhabitant(IRGenFunction &IGF, + Alignment pointeeAlign, + llvm::Value *index, + Address dest); + +/*****************************************************************************/ + /// \group Extra inhabitants of function pointers. /// Return the number of extra inhabitant representations for function pointers, diff --git a/lib/IRGen/GenBuiltin.cpp b/lib/IRGen/GenBuiltin.cpp index 58efe1a268d23..8ad1baba8efb3 100644 --- a/lib/IRGen/GenBuiltin.cpp +++ b/lib/IRGen/GenBuiltin.cpp @@ -1115,5 +1115,27 @@ if (Builtin.ID == BuiltinValueKind::id) { \ return; } + if (Builtin.ID == BuiltinValueKind::AutoDiffCreateLinearMapContext) { + auto topLevelSubcontextSize = args.claimNext(); + out.add(emitAutoDiffCreateLinearMapContext(IGF, topLevelSubcontextSize) + .getAddress()); + return; + } + + if (Builtin.ID == BuiltinValueKind::AutoDiffProjectTopLevelSubcontext) { + Address allocatorAddr(args.claimNext(), IGF.IGM.getPointerAlignment()); + out.add( + emitAutoDiffProjectTopLevelSubcontext(IGF, allocatorAddr).getAddress()); + return; + } + + if (Builtin.ID == BuiltinValueKind::AutoDiffAllocateSubcontext) { + Address allocatorAddr(args.claimNext(), IGF.IGM.getPointerAlignment()); + auto size = args.claimNext(); + out.add( + emitAutoDiffAllocateSubcontext(IGF, allocatorAddr, size).getAddress()); + return; + } + llvm_unreachable("IRGen unimplemented for this builtin!"); } diff --git a/lib/IRGen/GenCall.cpp b/lib/IRGen/GenCall.cpp index 02c10a11bf829..3f05c64365eb5 100644 --- a/lib/IRGen/GenCall.cpp +++ b/lib/IRGen/GenCall.cpp @@ -331,25 +331,60 @@ static Alignment getAsyncContextAlignment(IRGenModule &IGM) { return IGM.getPointerAlignment(); } +void IRGenFunction::setupAsync() { + llvm::Value *t = CurFn->getArg((unsigned)AsyncFunctionArgumentIndex::Task); + asyncTaskLocation = createAlloca(t->getType(), IGM.getPointerAlignment()); + Builder.CreateStore(t, asyncTaskLocation); + + llvm::Value *e = CurFn->getArg((unsigned)AsyncFunctionArgumentIndex::Executor); + asyncExecutorLocation = createAlloca(e->getType(), IGM.getPointerAlignment()); + Builder.CreateStore(e, asyncExecutorLocation); + + llvm::Value *c = CurFn->getArg((unsigned)AsyncFunctionArgumentIndex::Context); + asyncContextLocation = createAlloca(c->getType(), IGM.getPointerAlignment()); + Builder.CreateStore(c, asyncContextLocation); +} + llvm::Value *IRGenFunction::getAsyncTask() { assert(isAsync()); - auto *value = CurFn->getArg((unsigned)AsyncFunctionArgumentIndex::Task); - assert(value->getType() == IGM.SwiftTaskPtrTy); - return value; + return Builder.CreateLoad(asyncTaskLocation); } llvm::Value *IRGenFunction::getAsyncExecutor() { assert(isAsync()); - auto *value = CurFn->getArg((unsigned)AsyncFunctionArgumentIndex::Executor); - assert(value->getType() == IGM.SwiftExecutorPtrTy); - return value; + return Builder.CreateLoad(asyncExecutorLocation); } llvm::Value *IRGenFunction::getAsyncContext() { assert(isAsync()); - auto *value = CurFn->getArg((unsigned)AsyncFunctionArgumentIndex::Context); - assert(value->getType() == IGM.SwiftContextPtrTy); - return value; + return Builder.CreateLoad(asyncContextLocation); +} + +llvm::CallInst *IRGenFunction::emitSuspendAsyncCall(ArrayRef args) { + auto *id = + Builder.CreateIntrinsicCall(llvm::Intrinsic::coro_suspend_async, args); + + // Update the current values of task, executor and context. + + auto *rawTask = Builder.CreateExtractValue(id, + (unsigned)AsyncFunctionArgumentIndex::Task); + auto *task = Builder.CreateBitCast(rawTask, IGM.SwiftTaskPtrTy); + Builder.CreateStore(task, asyncTaskLocation); + + auto *rawExecutor = Builder.CreateExtractValue(id, + (unsigned)AsyncFunctionArgumentIndex::Executor); + auto *executor = Builder.CreateBitCast(rawExecutor, IGM.SwiftExecutorPtrTy); + Builder.CreateStore(executor, asyncExecutorLocation); + + auto *calleeContext = Builder.CreateExtractValue(id, + (unsigned)AsyncFunctionArgumentIndex::Context); + llvm::Constant *projectFn = cast(args[1])->stripPointerCasts(); + // Get the caller context from the calle context. + llvm::Value *context = Builder.CreateCall(projectFn, {calleeContext}); + context = Builder.CreateBitCast(context, IGM.SwiftContextPtrTy); + Builder.CreateStore(context, asyncContextLocation); + + return id; } llvm::Type *ExplosionSchema::getScalarResultType(IRGenModule &IGM) const { @@ -1832,12 +1867,15 @@ std::pair irgen::getAsyncFunctionAndSize( FunctionPointer functionPointer, llvm::Value *thickContext, std::pair values, Size initialContextSize) { assert(values.first || values.second); + assert(functionPointer.getKind() == + FunctionPointer::KindTy::AsyncFunctionPointer); bool emitFunction = values.first; bool emitSize = values.second; // TODO: This calculation should be extracted out into standalone functions // emitted on-demand per-module to improve codesize. switch (representation) { case SILFunctionTypeRepresentation::Thick: { + assert(!functionPointer.useStaticContextSize()); // If the called function is thick, the size of the called function's // async context is not statically knowable. // @@ -1906,16 +1944,25 @@ std::pair irgen::getAsyncFunctionAndSize( SmallVector, 2> sizePhiValues; { // thin IGF.Builder.emitBlock(thinBlock); + auto *ptr = functionPointer.getRawPointer(); + if (auto authInfo = functionPointer.getAuthInfo()) { + ptr = emitPointerAuthAuth(IGF, ptr, authInfo); + } + auto *afpPtr = + IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); if (emitFunction) { - auto *uncastFnPtr = functionPointer.getPointer(IGF); + llvm::Value *addrPtr = IGF.Builder.CreateStructGEP(afpPtr, 0); + auto *uncastFnPtr = IGF.emitLoadOfRelativePointer( + Address(addrPtr, IGF.IGM.getPointerAlignment()), /*isFar*/ false, + /*expectedType*/ functionPointer.getFunctionType()->getPointerTo()); auto *fnPtr = IGF.Builder.CreateBitCast(uncastFnPtr, IGF.IGM.Int8PtrTy); + if (auto authInfo = functionPointer.getAuthInfo()) { + fnPtr = emitPointerAuthSign(IGF, fnPtr, authInfo); + } fnPhiValues.push_back({thinBlock, fnPtr}); } if (emitSize) { - auto *ptr = functionPointer.getRawPointer(); - auto *descriptorPtr = - IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); - auto *sizePtr = IGF.Builder.CreateStructGEP(descriptorPtr, 1); + auto *sizePtr = IGF.Builder.CreateStructGEP(afpPtr, 1); auto *size = IGF.Builder.CreateLoad(sizePtr, IGF.IGM.getPointerAlignment()); sizePhiValues.push_back({thinBlock, size}); @@ -1991,20 +2038,34 @@ std::pair irgen::getAsyncFunctionAndSize( case SILFunctionTypeRepresentation::WitnessMethod: case SILFunctionTypeRepresentation::Closure: case SILFunctionTypeRepresentation::Block: { + auto *ptr = functionPointer.getRawPointer(); + if (auto authInfo = functionPointer.getAuthInfo()) { + ptr = emitPointerAuthAuth(IGF, ptr, authInfo); + } + auto *afpPtr = + IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); llvm::Value *fn = nullptr; if (emitFunction) { - fn = functionPointer.getPointer(IGF); + if (functionPointer.useStaticContextSize()) { + fn = functionPointer.getRawPointer(); + } else { + llvm::Value *addrPtr = IGF.Builder.CreateStructGEP(afpPtr, 0); + fn = IGF.emitLoadOfRelativePointer( + Address(addrPtr, IGF.IGM.getPointerAlignment()), /*isFar*/ false, + /*expectedType*/ functionPointer.getFunctionType()->getPointerTo()); + } + if (auto authInfo = functionPointer.getAuthInfo()) { + fn = emitPointerAuthSign(IGF, fn, authInfo); + } } llvm::Value *size = nullptr; if (emitSize) { if (functionPointer.useStaticContextSize()) { size = llvm::ConstantInt::get(IGF.IGM.Int32Ty, initialContextSize.getValue()); - } else { - auto *ptr = functionPointer.getRawPointer(); - auto *descriptorPtr = - IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); - auto *sizePtr = IGF.Builder.CreateStructGEP(descriptorPtr, 1); + } else { + assert(!functionPointer.useStaticContextSize()); + auto *sizePtr = IGF.Builder.CreateStructGEP(afpPtr, 1); size = IGF.Builder.CreateLoad(sizePtr, IGF.IGM.getPointerAlignment()); } } @@ -2301,7 +2362,8 @@ class AsyncCallEmission final : public CallEmission { } FunctionPointer getCalleeFunctionPointer() override { return FunctionPointer( - FunctionPointer::KindTy::Function, calleeFunction, PointerAuthInfo(), + FunctionPointer::KindTy::Function, calleeFunction, + CurCallee.getFunctionPointer().getAuthInfo(), IGF.IGM.getSignature(getCallee().getSubstFunctionType())); } SILType getParameterType(unsigned index) override { @@ -2339,8 +2401,7 @@ class AsyncCallEmission final : public CallEmission { llvm::Intrinsic::coro_async_resume, {}); auto fnVal = currentResumeFn; // Sign the pointer. - // TODO: use a distinct schema. - if (auto schema = IGF.IGM.getOptions().PointerAuth.AsyncContextParent) { + if (auto schema = IGF.IGM.getOptions().PointerAuth.AsyncContextResume) { Address fieldAddr = fieldLayout.project(IGF, this->context, /*offsets*/ llvm::None); auto authInfo = PointerAuthInfo::emit( @@ -2390,28 +2451,12 @@ class AsyncCallEmission final : public CallEmission { } } void emitCallToUnmappedExplosion(llvm::CallInst *call, Explosion &out) override { - SILFunctionConventions fnConv(getCallee().getSubstFunctionType(), - IGF.getSILModule()); - auto resultType = - fnConv.getSILResultType(IGF.IGM.getMaximalTypeExpansionContext()); - auto &nativeSchema = - IGF.IGM.getTypeInfo(resultType).nativeReturnValueSchema(IGF.IGM); - auto expectedNativeResultType = nativeSchema.getExpandedType(IGF.IGM); - if (expectedNativeResultType->isVoidTy()) { - // If the async return is void, there is no return to move out of the - // argument buffer. - return; - } - // Gather the values. - Explosion nativeExplosion; auto layout = getAsyncContextLayout(); for (unsigned index = 0, count = layout.getDirectReturnCount(); index < count; ++index) { auto fieldLayout = layout.getDirectReturnLayout(index); - loadValue(fieldLayout, nativeExplosion); + loadValue(fieldLayout, out); } - - out = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeExplosion, resultType); } Address getCalleeErrorSlot(SILType errorType) override { auto layout = getAsyncContextLayout(); @@ -2435,11 +2480,12 @@ class AsyncCallEmission final : public CallEmission { Builder.CreateBitOrPointerCast(dispatchFn, IGM.Int8PtrTy)); arguments.push_back( Builder.CreateBitOrPointerCast(fn.getRawPointer(), IGM.Int8PtrTy)); + if (auto authInfo = fn.getAuthInfo()) { + arguments.push_back(fn.getAuthInfo().getDiscriminator()); + } for (auto arg: args) arguments.push_back(arg); - auto *id = Builder.CreateIntrinsicCall(llvm::Intrinsic::coro_suspend_async, - arguments); - return id; + return IGF.emitSuspendAsyncCall(arguments); } }; @@ -3623,14 +3669,34 @@ llvm::Value *irgen::emitTaskCreate( auto layout = getAsyncContextLayout( IGF.IGM, taskFunctionCanSILType, taskFunctionCanSILType, subs); + CanSILFunctionType taskContinuationFunctionTy = [&]() { + ASTContext &ctx = IGF.IGM.IRGen.SIL.getASTContext(); + auto extInfo = + ASTExtInfoBuilder() + .withRepresentation(FunctionTypeRepresentation::CFunctionPointer) + .build(); + // FIXME: Use the appropriate signature for TaskContinuationFunction: + // + // using TaskContinuationFunction = + // SWIFT_CC(swift) + // void (AsyncTask *, ExecutorRef, AsyncContext *); + auto ty = FunctionType::get({}, ctx.TheEmptyTupleType, extInfo); + return IGF.IGM.getLoweredType(ty).castTo(); + }(); + // Call the function. llvm::CallInst *result; llvm::Value *theSize, *theFunction; + auto taskFunctionPointer = FunctionPointer::forExplosionValue( + IGF, taskFunction, taskFunctionCanSILType); std::tie(theFunction, theSize) = getAsyncFunctionAndSize(IGF, SILFunctionTypeRepresentation::Thick, - FunctionPointer::forExplosionValue( - IGF, taskFunction, taskFunctionCanSILType), - localContextInfo); + taskFunctionPointer, localContextInfo); + if (auto authInfo = PointerAuthInfo::forFunctionPointer( + IGF.IGM, taskContinuationFunctionTy)) { + theFunction = emitPointerAuthResign( + IGF, theFunction, taskFunctionPointer.getAuthInfo(), authInfo); + } theFunction = IGF.Builder.CreateBitOrPointerCast( theFunction, IGF.IGM.TaskContinuationFunctionPtrTy); theSize = IGF.Builder.CreateZExtOrBitCast(theSize, IGF.IGM.SizeTy); @@ -4502,18 +4568,25 @@ llvm::Value *FunctionPointer::getPointer(IRGenFunction &IGF) const { return Value; case KindTy::Value::AsyncFunctionPointer: { if (!isFunctionPointerWithoutContext) { + auto *fnPtr = Value; + if (auto authInfo = AuthInfo) { + fnPtr = emitPointerAuthAuth(IGF, fnPtr, authInfo); + } auto *descriptorPtr = - IGF.Builder.CreateBitCast(Value, IGF.IGM.AsyncFunctionPointerPtrTy); + IGF.Builder.CreateBitCast(fnPtr, IGF.IGM.AsyncFunctionPointerPtrTy); auto *addrPtr = IGF.Builder.CreateStructGEP(descriptorPtr, 0); - return IGF.emitLoadOfRelativePointer( + auto *result = IGF.emitLoadOfRelativePointer( Address(addrPtr, IGF.IGM.getPointerAlignment()), /*isFar*/ false, /*expectedType*/ getFunctionType()->getPointerTo()); + if (auto authInfo = AuthInfo) { + result = emitPointerAuthSign(IGF, result, authInfo); + } + return result; } else { return IGF.Builder.CreateBitOrPointerCast( Value, getFunctionType()->getPointerTo()); } } - } } @@ -4560,8 +4633,7 @@ void irgen::emitAsyncReturn(IRGenFunction &IGF, AsyncContextLayout &asyncLayout, .loadAsCopy(IGF, returnToCallerAddr, fn); llvm::Value *fnVal = fn.claimNext(); - // TODO: use distinct schema - if (auto schema = IGF.IGM.getOptions().PointerAuth.AsyncContextParent) { + if (auto schema = IGF.IGM.getOptions().PointerAuth.AsyncContextResume) { Address fieldAddr = returnToCallerLayout.project(IGF, contextAddr, /*offsets*/ llvm::None); auto authInfo = PointerAuthInfo::emit(IGF, schema, fieldAddr.getAddress(), @@ -4581,3 +4653,46 @@ void irgen::emitAsyncReturn(IRGenFunction &IGF, AsyncContextLayout &asyncLayout, auto call = IGF.Builder.CreateCall(fnPtr, Args); call->setTailCall(); } + +FunctionPointer +IRGenFunction::getFunctionPointerForResumeIntrinsic(llvm::Value *resume) { + auto *fnTy = llvm::FunctionType::get( + IGM.VoidTy, {IGM.Int8PtrTy, IGM.Int8PtrTy, IGM.Int8PtrTy}, + false /*vaargs*/); + auto signature = + Signature(fnTy, IGM.constructInitialAttributes(), IGM.SwiftCC); + auto fnPtr = FunctionPointer( + FunctionPointer::KindTy::Function, + Builder.CreateBitOrPointerCast(resume, fnTy->getPointerTo()), + PointerAuthInfo(), signature); + return fnPtr; +} + +Address irgen::emitAutoDiffCreateLinearMapContext( + IRGenFunction &IGF, llvm::Value *topLevelSubcontextSize) { + auto *call = IGF.Builder.CreateCall( + IGF.IGM.getAutoDiffCreateLinearMapContextFn(), {topLevelSubcontextSize}); + call->setDoesNotThrow(); + call->setCallingConv(IGF.IGM.SwiftCC); + return Address(call, IGF.IGM.getPointerAlignment()); +} + +Address irgen::emitAutoDiffProjectTopLevelSubcontext( + IRGenFunction &IGF, Address context) { + auto *call = IGF.Builder.CreateCall( + IGF.IGM.getAutoDiffProjectTopLevelSubcontextFn(), + {context.getAddress()}); + call->setDoesNotThrow(); + call->setCallingConv(IGF.IGM.SwiftCC); + return Address(call, IGF.IGM.getPointerAlignment()); +} + +Address irgen::emitAutoDiffAllocateSubcontext( + IRGenFunction &IGF, Address context, llvm::Value *size) { + auto *call = IGF.Builder.CreateCall( + IGF.IGM.getAutoDiffAllocateSubcontextFn(), + {context.getAddress(), size}); + call->setDoesNotThrow(); + call->setCallingConv(IGF.IGM.SwiftCC); + return Address(call, IGF.IGM.getPointerAlignment()); +} diff --git a/lib/IRGen/GenCall.h b/lib/IRGen/GenCall.h index f97833fc77e5e..f3bbddf5e3c19 100644 --- a/lib/IRGen/GenCall.h +++ b/lib/IRGen/GenCall.h @@ -432,6 +432,13 @@ namespace irgen { void emitAsyncReturn(IRGenFunction &IGF, AsyncContextLayout &layout, CanSILFunctionType fnType); + + Address emitAutoDiffCreateLinearMapContext( + IRGenFunction &IGF, llvm::Value *topLevelSubcontextSize); + Address emitAutoDiffProjectTopLevelSubcontext( + IRGenFunction &IGF, Address context); + Address emitAutoDiffAllocateSubcontext( + IRGenFunction &IGF, Address context, llvm::Value *size); } // end namespace irgen } // end namespace swift diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp index a103fb0dc1d27..e90ea54fd0f3b 100644 --- a/lib/IRGen/GenClass.cpp +++ b/lib/IRGen/GenClass.cpp @@ -25,6 +25,7 @@ #include "swift/AST/Module.h" #include "swift/AST/Pattern.h" #include "swift/AST/PrettyStackTrace.h" +#include "swift/AST/SemanticAttrs.h" #include "swift/AST/TypeMemberVisitor.h" #include "swift/AST/Types.h" #include "swift/ClangImporter/ClangModule.h" @@ -219,7 +220,9 @@ namespace { return; } - if (theClass->hasSuperclass()) { + if (theClass->isNativeNSObjectSubclass()) { + // For layout purposes, we don't have ObjC ancestry. + } else if (theClass->hasSuperclass()) { SILType superclassType = classType.getSuperclass(); auto superclassDecl = superclassType.getClassOrBoundGenericClass(); assert(superclassType && superclassDecl); @@ -274,6 +277,9 @@ namespace { void addDirectFieldsFromClass(ClassDecl *rootClass, SILType rootClassType, ClassDecl *theClass, SILType classType, bool superclass) { + if (theClass->isRootDefaultActor()) + addDefaultActorHeader(); + for (VarDecl *var : theClass->getStoredProperties()) { SILType type = classType.getFieldType(var, IGM.getSILModule(), TypeExpansionContext::minimal()); @@ -1158,15 +1164,15 @@ namespace { void buildMetaclassStub() { assert(FieldLayout && "can't build a metaclass from a category"); - auto specializedGenericType = getSpecializedGenericType().map( - [](auto canType) { return (Type)canType; }); + Optional specializedGenericType = getSpecializedGenericType(); // The isa is the metaclass pointer for the root class. auto rootClass = getRootClassForMetaclass(IGM, getClass()); Type rootType; if (specializedGenericType && rootClass->isGenericContext()) { rootType = - (*specializedGenericType)->getRootClass(/*useArchetypes=*/false); + (*specializedGenericType)->getRootClass( + /*useArchetypes=*/false); } else { rootType = Type(); } @@ -1179,11 +1185,11 @@ namespace { // If this class has no formal superclass, then its actual // superclass is SwiftObject, i.e. the root class. llvm::Constant *superPtr; - if (getClass()->hasSuperclass()) { - auto base = getClass()->getSuperclassDecl(); + if (auto base = getSuperclassDeclForMetadata(IGM, getClass())) { if (specializedGenericType && base->isGenericContext()) { superPtr = getMetaclassRefOrNull( - (*specializedGenericType)->getSuperclass(/*useArchetypes=*/false), + getSuperclassForMetadata(IGM, *specializedGenericType, + /*useArchetypes=*/false), base); } else { superPtr = getMetaclassRefOrNull(Type(), base); @@ -1206,10 +1212,10 @@ namespace { auto init = llvm::ConstantStruct::get(IGM.ObjCClassStructTy, makeArrayRef(fields)); llvm::Constant *uncastMetaclass; - if (auto theType = getSpecializedGenericType()) { + if (specializedGenericType) { uncastMetaclass = IGM.getAddrOfCanonicalSpecializedGenericMetaclassObject( - *theType, ForDefinition); + *specializedGenericType, ForDefinition); } else { uncastMetaclass = IGM.getAddrOfMetaclassObject(getClass(), ForDefinition); @@ -1443,6 +1449,32 @@ namespace { } private: + /// If we should set the forbids associated objects on instances metadata + /// flag. + /// + /// We currently do this on: + /// + /// * Actor classes. + /// * classes marked with @_semantics("objc.forbidAssociatedObjects") + /// (for testing purposes) + /// + /// TODO: Expand this as appropriate over time. + bool doesClassForbidAssociatedObjectsOnInstances() const { + auto *clsDecl = getClass(); + + // We ban this on actors without objc ancestry. + if (clsDecl->isActor() && !clsDecl->checkAncestry(AncestryFlags::ObjC)) + return true; + + // Otherwise, we only do it if our special semantics attribute is on the + // relevant class. This is for testing purposes. + if (clsDecl->hasSemanticsAttr(semantics::OBJC_FORBID_ASSOCIATED_OBJECTS)) + return true; + + // TODO: Add new cases here as appropriate over time. + return false; + } + ObjCClassFlags buildFlags(ForMetaClass_t forMeta, HasUpdateCallback_t hasUpdater) { ObjCClassFlags flags = ObjCClassFlags::CompiledByARC; @@ -1463,6 +1495,11 @@ namespace { if (hasUpdater) flags |= ObjCClassFlags::HasMetadataUpdateCallback; + // If we know that our class does not support having associated objects + // placed upon instances, set the forbid associated object flag. + if (doesClassForbidAssociatedObjectsOnInstances()) + flags |= ObjCClassFlags::ForbidsAssociatedObjects; + // FIXME: set ObjCClassFlags::Hidden when appropriate return flags; } @@ -2039,6 +2076,15 @@ namespace { /// Get the name of the class or protocol to mangle into the ObjC symbol /// name. StringRef getEntityName(llvm::SmallVectorImpl &buffer) const { + if (auto prespecialization = getSpecializedGenericType()) { + buffer.clear(); + llvm::raw_svector_ostream os(buffer); + os << LinkEntity::forTypeMetadata(*prespecialization, + TypeMetadataAddress::FullMetadata) + .mangleAsString(); + return os.str(); + } + if (auto theClass = getClass()) { return theClass->getObjCRuntimeName(buffer); } @@ -2378,6 +2424,13 @@ IRGenModule::getObjCRuntimeBaseForSwiftRootClass(ClassDecl *theClass) { return getObjCRuntimeBaseClass(name, name); } +/// Lazily declare the base class for a Swift class that inherits from +/// NSObject but uses native reference-counting. +ClassDecl *IRGenModule::getSwiftNativeNSObjectDecl() { + Identifier name = Context.Id_SwiftNativeNSObject; + return getObjCRuntimeBaseClass(name, name); +} + ClassDecl *irgen::getRootClassForMetaclass(IRGenModule &IGM, ClassDecl *C) { while (auto superclass = C->getSuperclassDecl()) C = superclass; @@ -2404,6 +2457,33 @@ ClassDecl *irgen::getRootClassForMetaclass(IRGenModule &IGM, ClassDecl *C) { IGM.Context.Id_SwiftObject); } +ClassDecl * +irgen::getSuperclassDeclForMetadata(IRGenModule &IGM, ClassDecl *C) { + if (C->isNativeNSObjectSubclass()) + return IGM.getSwiftNativeNSObjectDecl(); + return C->getSuperclassDecl(); +} + +CanType irgen::getSuperclassForMetadata(IRGenModule &IGM, ClassDecl *C) { + if (C->isNativeNSObjectSubclass()) + return IGM.getSwiftNativeNSObjectDecl()->getDeclaredInterfaceType() + ->getCanonicalType(); + if (auto superclass = C->getSuperclass()) + return superclass->getCanonicalType(); + return CanType(); +} + +CanType irgen::getSuperclassForMetadata(IRGenModule &IGM, CanType type, + bool useArchetypes) { + auto cls = type->getClassOrBoundGenericClass(); + if (cls->isNativeNSObjectSubclass()) + return IGM.getSwiftNativeNSObjectDecl()->getDeclaredInterfaceType() + ->getCanonicalType(); + if (auto superclass = type->getSuperclass(useArchetypes)) + return superclass->getCanonicalType(); + return CanType(); +} + ClassMetadataStrategy IRGenModule::getClassMetadataStrategy(const ClassDecl *theClass) { SILType selfType = getSelfType(theClass); @@ -2483,6 +2563,13 @@ bool irgen::hasKnownSwiftMetadata(IRGenModule &IGM, CanType type) { /// Is the given class known to have Swift-compatible metadata? bool irgen::hasKnownSwiftMetadata(IRGenModule &IGM, ClassDecl *theClass) { + // Make sure that the fake declarations we make in getObjcRuntimeBaseClass + // are treated this way. + if (theClass->getModuleContext()->isBuiltinModule()) { + assert(IGM.ObjCInterop); + return false; + } + // For now, the fact that a declaration was not implemented in Swift // is enough to conclusively force us into a slower path. // Eventually we might have an attribute here or something based on diff --git a/lib/IRGen/GenClass.h b/lib/IRGen/GenClass.h index 549a4a502ec3c..38f46ef028d88 100644 --- a/lib/IRGen/GenClass.h +++ b/lib/IRGen/GenClass.h @@ -85,6 +85,11 @@ namespace irgen { ClassDecl *getRootClassForMetaclass(IRGenModule &IGM, ClassDecl *theClass); + ClassDecl *getSuperclassDeclForMetadata(IRGenModule &IGM, ClassDecl *theClass); + CanType getSuperclassForMetadata(IRGenModule &IGM, ClassDecl *theClass); + CanType getSuperclassForMetadata(IRGenModule &IGM, CanType theClass, + bool useArchetypes = true); + enum class ClassMetadataStrategy { /// Does the given class have resilient ancestry, or is the class itself /// generic? diff --git a/lib/IRGen/GenDecl.cpp b/lib/IRGen/GenDecl.cpp index 1219e701de2c5..a6377b8d0780e 100644 --- a/lib/IRGen/GenDecl.cpp +++ b/lib/IRGen/GenDecl.cpp @@ -3043,11 +3043,19 @@ static llvm::GlobalVariable *createGOTEquivalent(IRGenModule &IGM, if (IGM.Triple.getObjectFormat() == llvm::Triple::COFF) { if (cast(global)->hasDLLImportStorageClass()) { + // Add the user label prefix *prior* to the introduction of the linker + // synthetic marker `__imp_`. + // Failure to do so will re-decorate the generated symbol and miss the + // user label prefix, generating e.g. `___imp_$sBoW` instead of + // `__imp__$sBoW`. + if (auto prefix = IGM.DataLayout.getGlobalPrefix()) + globalName = (llvm::Twine(prefix) + globalName).str(); + // Indicate to LLVM that the symbol should not be re-decorated. llvm::GlobalVariable *GV = new llvm::GlobalVariable(IGM.Module, global->getType(), /*Constant=*/true, - llvm::GlobalValue::ExternalLinkage, - nullptr, llvm::Twine("__imp_") + globalName); + llvm::GlobalValue::ExternalLinkage, nullptr, + "\01__imp_" + globalName); GV->setExternallyInitialized(true); return GV; } diff --git a/lib/IRGen/GenFunc.cpp b/lib/IRGen/GenFunc.cpp index c430c140c8eae..9d280f72cdd29 100644 --- a/lib/IRGen/GenFunc.cpp +++ b/lib/IRGen/GenFunc.cpp @@ -769,7 +769,7 @@ class PartialApplicationForwarderEmission { virtual void addArgument(llvm::Value *argValue, unsigned index) = 0; virtual SILParameterInfo getParameterInfo(unsigned index) = 0; virtual llvm::Value *getContext() = 0; - virtual llvm::Value *getDynamicFunctionPointer() = 0; + virtual llvm::Value *getDynamicFunctionPointer(PointerAuthInfo &authInfo) = 0; virtual llvm::Value *getDynamicFunctionContext() = 0; virtual void addDynamicFunctionContext(Explosion &explosion, DynamicFunctionKind kind) = 0; @@ -931,7 +931,9 @@ class SyncPartialApplicationForwarderEmission return substType->getParameters()[index]; } llvm::Value *getContext() override { return origParams.claimNext(); } - llvm::Value *getDynamicFunctionPointer() override { return args.takeLast(); } + llvm::Value *getDynamicFunctionPointer(PointerAuthInfo &authInfo) override { + return args.takeLast(); + } llvm::Value *getDynamicFunctionContext() override { return args.takeLast(); } void addDynamicFunctionContext(Explosion &explosion, DynamicFunctionKind kind) override { @@ -1127,7 +1129,7 @@ class AsyncPartialApplicationForwarderEmission llvm::Value *getContext() override { return loadValue(layout.getLocalContextLayout()); } - llvm::Value *getDynamicFunctionPointer() override { + llvm::Value *getDynamicFunctionPointer(PointerAuthInfo &authInfo) override { assert(dynamicFunction && dynamicFunction->pointer); auto *context = dynamicFunction->context; if (!context) { @@ -1135,7 +1137,6 @@ class AsyncPartialApplicationForwarderEmission } auto *rawFunction = subIGF.Builder.CreateBitCast( dynamicFunction->pointer, origSig.getType()->getPointerTo()); - auto authInfo = PointerAuthInfo::forFunctionPointer(IGM, origType); auto functionPointer = FunctionPointer(FunctionPointer::KindTy::AsyncFunctionPointer, rawFunction, authInfo, origSig); @@ -1332,7 +1333,8 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, fwd->addAttributes(llvm::AttributeList::FunctionIndex, b); IRGenFunction subIGF(IGM, fwd); - subIGF.setAsync(origType->isAsync()); + if (origType->isAsync()) + subIGF.setupAsync(); if (IGM.DebugInfo) IGM.DebugInfo->emitArtificialFunction(subIGF, fwd); @@ -1718,19 +1720,19 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, // Otherwise, it was the last thing we added to the layout. - // The dynamic function pointer is packed "last" into the context, - // and we pulled it out as an argument. Just pop it off. - auto fnPtr = emission->getDynamicFunctionPointer(); - - // It comes out of the context as an i8*. Cast to the function type. - fnPtr = subIGF.Builder.CreateBitCast(fnPtr, fnTy); - assert(lastCapturedFieldPtr); auto authInfo = PointerAuthInfo::emit(subIGF, IGM.getOptions().PointerAuth.PartialApplyCapture, lastCapturedFieldPtr, PointerAuthEntity::Special::PartialApplyCapture); + // The dynamic function pointer is packed "last" into the context, + // and we pulled it out as an argument. Just pop it off. + auto fnPtr = emission->getDynamicFunctionPointer(authInfo); + + // It comes out of the context as an i8*. Cast to the function type. + fnPtr = subIGF.Builder.CreateBitCast(fnPtr, fnTy); + return FunctionPointer(FunctionPointer::KindTy::Function, fnPtr, authInfo, origSig); }(); @@ -2390,26 +2392,45 @@ llvm::Function *IRGenFunction::getOrCreateResumePrjFn() { auto &Builder = IGF.Builder; auto addr = Builder.CreateBitOrPointerCast(&(*it), IGF.IGM.Int8PtrPtrTy); Address callerContextAddr(addr, IGF.IGM.getPointerAlignment()); - auto callerContext = Builder.CreateLoad(callerContextAddr); + llvm::Value *callerContext = Builder.CreateLoad(callerContextAddr); + if (auto schema = IGF.IGM.getOptions().PointerAuth.AsyncContextParent) { + auto authInfo = + PointerAuthInfo::emit(IGF, schema, addr, PointerAuthEntity()); + callerContext = emitPointerAuthAuth(IGF, callerContext, authInfo); + } Builder.CreateRet(callerContext); }, false /*isNoInline*/)); } - llvm::Function * IRGenFunction::createAsyncDispatchFn(const FunctionPointer &fnPtr, ArrayRef args) { SmallVector argTys; - argTys.push_back(IGM.Int8PtrTy); // Function pointer to be called. for (auto arg : args) { auto *ty = arg->getType(); argTys.push_back(ty); } + return createAsyncDispatchFn(fnPtr, argTys); +} + +llvm::Function * +IRGenFunction::createAsyncDispatchFn(const FunctionPointer &fnPtr, + ArrayRef argTypes) { + SmallVector argTys; + argTys.push_back(IGM.Int8PtrTy); // Function pointer to be called. + auto originalAuthInfo = fnPtr.getAuthInfo(); + if (fnPtr.getAuthInfo()) { + argTys.push_back(IGM.Int64Ty); // Discriminator for the function pointer. + } + for (auto ty : argTypes) { + argTys.push_back(ty); + } auto calleeFnPtrType = fnPtr.getRawPointer()->getType(); auto *dispatchFnTy = llvm::FunctionType::get(IGM.VoidTy, argTys, false /*vaargs*/); llvm::SmallString<40> name; - llvm::raw_svector_ostream(name) << "__swift_suspend_dispatch_" << args.size(); + llvm::raw_svector_ostream(name) + << "__swift_suspend_dispatch_" << argTypes.size(); llvm::Function *dispatch = llvm::Function::Create(dispatchFnTy, llvm::Function::InternalLinkage, llvm::StringRef(name), &IGM.Module); @@ -2420,16 +2441,101 @@ IRGenFunction::createAsyncDispatchFn(const FunctionPointer &fnPtr, IGM.DebugInfo->emitArtificialFunction(dispatchIGF, dispatch); auto &Builder = dispatchIGF.Builder; auto it = dispatchIGF.CurFn->arg_begin(), end = dispatchIGF.CurFn->arg_end(); - llvm::Value *ptrArg = &*(it++); + llvm::Value *fnPtrArg = &*(it++); + llvm::Value *discriminatorArg = ((bool)originalAuthInfo) ? &*(it++) : nullptr; SmallVector callArgs; for (; it != end; ++it) { callArgs.push_back(&*it); } - ptrArg = Builder.CreateBitOrPointerCast(ptrArg, calleeFnPtrType); - auto callee = FunctionPointer(fnPtr.getKind(), ptrArg, fnPtr.getAuthInfo(), + fnPtrArg = Builder.CreateBitOrPointerCast(fnPtrArg, calleeFnPtrType); + PointerAuthInfo newAuthInfo = + ((bool)originalAuthInfo) + ? PointerAuthInfo(fnPtr.getAuthInfo().getKey(), discriminatorArg) + : originalAuthInfo; + auto callee = FunctionPointer(fnPtr.getKind(), fnPtrArg, newAuthInfo, fnPtr.getSignature()); auto call = Builder.CreateCall(callee, callArgs); call->setTailCall(); Builder.CreateRetVoid(); return dispatch; } + +void IRGenFunction::emitSuspensionPoint(llvm::Value *toExecutor, + llvm::Value *asyncResume) { + // TODO: pointerauth + + // Setup the suspend point. + SmallVector arguments; + arguments.push_back(asyncResume); + auto resumeProjFn = getOrCreateResumeFromSuspensionFn(); + arguments.push_back( + Builder.CreateBitOrPointerCast(resumeProjFn, IGM.Int8PtrTy)); + llvm::Function *suspendFn = createAsyncSuspendFn(); + arguments.push_back( + Builder.CreateBitOrPointerCast(suspendFn, IGM.Int8PtrTy)); + + arguments.push_back(asyncResume); + arguments.push_back( + Builder.CreateBitOrPointerCast(toExecutor, getAsyncExecutor()->getType())); + arguments.push_back(getAsyncTask()); + arguments.push_back(getAsyncExecutor()); + arguments.push_back(getAsyncContext()); + + emitSuspendAsyncCall(arguments); +} + +llvm::Function *IRGenFunction::getOrCreateResumeFromSuspensionFn() { + auto name = "__swift_async_resume_get_context"; + return cast(IGM.getOrCreateHelperFunction( + name, IGM.Int8PtrTy, {IGM.Int8PtrTy}, + [&](IRGenFunction &IGF) { + auto &Builder = IGF.Builder; + Builder.CreateRet(&*IGF.CurFn->arg_begin()); + }, + false /*isNoInline*/)); +} + +llvm::Function *IRGenFunction::createAsyncSuspendFn() { + SmallVector argTys; + argTys.push_back(IGM.Int8PtrTy); // Resume function. + argTys.push_back(getAsyncExecutor()->getType()); // Executor to hop to. + argTys.push_back(getAsyncTask()->getType()); + argTys.push_back(getAsyncExecutor()->getType()); + argTys.push_back(getAsyncContext()->getType()); + auto *suspendFnTy = + llvm::FunctionType::get(IGM.VoidTy, argTys, false /*vaargs*/); + + StringRef name = "__swift_suspend_point"; + if (llvm::GlobalValue *F = IGM.Module.getNamedValue(name)) + return cast(F); + + llvm::Function *suspendFn = + llvm::Function::Create(suspendFnTy, llvm::Function::InternalLinkage, + name, &IGM.Module); + suspendFn->setCallingConv(IGM.DefaultCC); + suspendFn->setDoesNotThrow(); + IRGenFunction suspendIGF(IGM, suspendFn); + if (IGM.DebugInfo) + IGM.DebugInfo->emitArtificialFunction(suspendIGF, suspendFn); + auto &Builder = suspendIGF.Builder; + + llvm::Value *resumeFunction = suspendFn->getArg(0); + llvm::Value *targetExecutor = suspendFn->getArg(1); + llvm::Value *task = suspendFn->getArg(2); + llvm::Value *executor = suspendFn->getArg(3); + llvm::Value *context = suspendFn->getArg(4); + + Alignment ptrAlign = IGM.getPointerAlignment(); + auto *resumeAddr = Builder.CreateStructGEP(task, 4); + Builder.CreateStore(resumeFunction, Address(resumeAddr, ptrAlign)); + auto *contextAddr = Builder.CreateStructGEP(task, 5); + Builder.CreateStore(context, Address(contextAddr, ptrAlign)); + auto *suspendCall = Builder.CreateCall( + IGM.getTaskSwitchFuncFn(), + { task, executor, targetExecutor }); + suspendCall->setDoesNotThrow(); + suspendCall->setCallingConv(IGM.SwiftCC); + suspendCall->setTailCall(); + Builder.CreateRetVoid(); + return suspendFn; +} diff --git a/lib/IRGen/GenMeta.cpp b/lib/IRGen/GenMeta.cpp index 3bccddbf25d2c..9909858fbd118 100644 --- a/lib/IRGen/GenMeta.cpp +++ b/lib/IRGen/GenMeta.cpp @@ -301,12 +301,16 @@ static void buildMethodDescriptorFields(IRGenModule &IGM, void IRGenModule::emitNonoverriddenMethodDescriptor(const SILVTable *VTable, SILDeclRef declRef) { auto entity = LinkEntity::forMethodDescriptor(declRef); - auto *var = cast(getAddrOfLLVMVariable(entity, ConstantInit(), DebugTypeInfo())); + auto *var = cast( + getAddrOfLLVMVariable(entity, ConstantInit(), DebugTypeInfo())); if (!var->isDeclaration()) { assert(IRGen.isLazilyReemittingNominalTypeDescriptor(VTable->getClass())); return; } + var->setConstant(true); + setTrueConstGlobal(var); + ConstantInitBuilder ib(*this); ConstantStructBuilder sb(ib.beginStruct(MethodDescriptorStructTy)); @@ -1542,8 +1546,11 @@ namespace { MetadataLayout = &IGM.getClassMetadataLayout(Type); if (auto superclassDecl = getType()->getSuperclassDecl()) { - if (MetadataLayout && MetadataLayout->hasResilientSuperclass()) + if (MetadataLayout && MetadataLayout->hasResilientSuperclass()) { + assert(!getType()->isRootDefaultActor() && + "root default actor has a resilient superclass?"); ResilientSuperClassRef = IGM.getTypeEntityReference(superclassDecl); + } } addVTableEntries(getType()); @@ -1772,7 +1779,7 @@ namespace { void addLayoutInfo() { // TargetRelativeDirectPointer SuperclassType; - if (auto superclassType = getType()->getSuperclass()) { + if (auto superclassType = getSuperclassForMetadata(IGM, getType())) { GenericSignature genericSig = getType()->getGenericSignature(); B.addRelativeAddress(IGM.getTypeRef(superclassType, genericSig, MangledTypeRefRole::Metadata) @@ -2937,12 +2944,15 @@ namespace { } } - llvm::Constant *getSuperclassMetadata() { - Type type = Target->mapTypeIntoContext(Target->getSuperclass()); - auto *metadata = - tryEmitConstantHeapMetadataRef(IGM, type->getCanonicalType(), - /*allowUninit*/ false); - return metadata; + CanType getSuperclassTypeForMetadata() { + if (auto superclass = getSuperclassForMetadata(IGM, Target)) + return Target->mapTypeIntoContext(superclass)->getCanonicalType(); + return CanType(); + } + + llvm::Constant *getSuperclassMetadata(CanType superclass) { + return tryEmitConstantHeapMetadataRef(IGM, superclass, + /*allowUninit*/ false); } bool shouldAddNullSuperclass() { @@ -2966,7 +2976,8 @@ namespace { } // If this is a root class, use SwiftObject as our formal parent. - if (!Target->hasSuperclass()) { + CanType superclass = asImpl().getSuperclassTypeForMetadata(); + if (!superclass) { // This is only required for ObjC interoperation. if (!IGM.ObjCInterop) { B.addNullPointer(IGM.TypeMetadataPtrTy); @@ -2982,8 +2993,10 @@ namespace { return; } - auto *metadata = asImpl().getSuperclassMetadata(); - assert(metadata != nullptr); + // This should succeed because the cases where it doesn't should + // lead to shouldAddNullSuperclass returning true above. + auto metadata = asImpl().getSuperclassMetadata(superclass); + assert(metadata); B.add(metadata); } @@ -3668,11 +3681,13 @@ namespace { bool shouldAddNullSuperclass() { return false; } - llvm::Constant *getSuperclassMetadata() { - Type superclass = type->getSuperclass(/*useArchetypes=*/false); - auto *metadata = - IGM.getAddrOfTypeMetadata(superclass->getCanonicalType()); - return metadata; + CanType getSuperclassTypeForMetadata() { + return getSuperclassForMetadata(IGM, type, /*useArchetypes=*/false); + } + + llvm::Constant *getSuperclassMetadata(CanType superclass) { + // We know that this is safe (???) + return IGM.getAddrOfTypeMetadata(superclass); } uint64_t getClassDataPointerHasSwiftMetadataBits() { @@ -4844,7 +4859,7 @@ namespace { // Emit a reference to the superclass. auto superclass = IGF.emitAbstractTypeMetadataRef( - Target->getSuperclass()->getCanonicalType()); + getSuperclassForMetadata(IGM, Target)); // Dig out the address of the superclass field and store. auto &layout = IGF.IGM.getForeignMetadataLayout(Target); diff --git a/lib/IRGen/GenOpaque.cpp b/lib/IRGen/GenOpaque.cpp index f7456162291e5..a38e301a68f3a 100644 --- a/lib/IRGen/GenOpaque.cpp +++ b/lib/IRGen/GenOpaque.cpp @@ -484,7 +484,7 @@ IRGenFunction::emitValueWitnessFunctionRef(SILType type, auto vwtable = emitValueWitnessTableRef(type, &metadataSlot); auto witness = emitLoadOfValueWitnessFunction(*this, vwtable, index); - setScopedLocalTypeDataForLayout(type, key, witness.getPointer(*this)); + setScopedLocalTypeDataForLayout(type, key, witness.getRawPointer()); if (auto &authInfo = witness.getAuthInfo()) { setScopedLocalTypeDataForLayout(type, LocalTypeDataKind::forValueWitnessDiscriminator(index), diff --git a/lib/IRGen/GenPointerAuth.cpp b/lib/IRGen/GenPointerAuth.cpp index 6729655f268a0..2f00456e037ef 100644 --- a/lib/IRGen/GenPointerAuth.cpp +++ b/lib/IRGen/GenPointerAuth.cpp @@ -71,9 +71,7 @@ llvm::Value *irgen::emitPointerAuthStrip(IRGenFunction &IGF, FunctionPointer irgen::emitPointerAuthResign(IRGenFunction &IGF, const FunctionPointer &fn, const PointerAuthInfo &newAuthInfo) { - // TODO: Handle resigning AsyncFunctionPointers. - assert(fn.getKind().value == FunctionPointer::KindTy::Value::Function); - llvm::Value *fnPtr = emitPointerAuthResign(IGF, fn.getPointer(IGF), + llvm::Value *fnPtr = emitPointerAuthResign(IGF, fn.getRawPointer(), fn.getAuthInfo(), newAuthInfo); return FunctionPointer(fn.getKind(), fnPtr, newAuthInfo, fn.getSignature()); } diff --git a/lib/IRGen/GenProto.cpp b/lib/IRGen/GenProto.cpp index 36c0be215a24d..81a48078e756f 100644 --- a/lib/IRGen/GenProto.cpp +++ b/lib/IRGen/GenProto.cpp @@ -2748,8 +2748,16 @@ void NecessaryBindings::save(IRGenFunction &IGF, Address buffer) const { void NecessaryBindings::addTypeMetadata(CanType type) { assert(!isa(type)); + // If the bindings are for an async function, we will always need the type + // metadata. The opportunities to reconstruct it available in the context of + // partial apply forwarders are not available here. + if (forAsyncFunction()) { + addRequirement({type, nullptr}); + return; + } + // Bindings are only necessary at all if the type is dependent. - if (!type->hasArchetype() && !forAsyncFunction()) + if (!type->hasArchetype()) return; // Break down structural types so that we don't eagerly pass metadata diff --git a/lib/IRGen/GenReflection.cpp b/lib/IRGen/GenReflection.cpp index bc3ab970407b2..c6dd84df019db 100644 --- a/lib/IRGen/GenReflection.cpp +++ b/lib/IRGen/GenReflection.cpp @@ -779,7 +779,8 @@ class FieldTypeMetadataBuilder : public ReflectionMetadataBuilder { auto *CD = dyn_cast(NTD); auto *PD = dyn_cast(NTD); if (CD && CD->getSuperclass()) { - addTypeRef(CD->getSuperclass(), CD->getGenericSignature()); + addTypeRef(CD->getSuperclass(), + CD->getGenericSignature()); } else if (PD && PD->getDeclaredInterfaceType()->getSuperclass()) { addTypeRef(PD->getDeclaredInterfaceType()->getSuperclass(), PD->getGenericSignature()); diff --git a/lib/IRGen/GenThunk.cpp b/lib/IRGen/GenThunk.cpp index 3a7febb574658..9e529b1cf73d0 100644 --- a/lib/IRGen/GenThunk.cpp +++ b/lib/IRGen/GenThunk.cpp @@ -144,7 +144,8 @@ void IRGenModule::emitDispatchThunk(SILDeclRef declRef) { } IRGenFunction IGF(*this, f); - IGF.setAsync(declRef.getAbstractFunctionDecl()->hasAsync()); + if (declRef.getAbstractFunctionDecl()->hasAsync()) + IGF.setupAsync(); // Look up the method. auto fn = lookupMethod(IGF, declRef); diff --git a/lib/IRGen/GenType.cpp b/lib/IRGen/GenType.cpp index 4ea4745806455..d9d060c2d66c5 100644 --- a/lib/IRGen/GenType.cpp +++ b/lib/IRGen/GenType.cpp @@ -1005,7 +1005,60 @@ namespace { : PODSingleScalarTypeInfo(storage, size, std::move(spareBits), align) {} }; - /// A TypeInfo implementation for bare non-null pointers (like `void *`). + /// A TypeInfo implementation for pointers that are: + /// - valid (i.e. non-null, and generally >= LeastValidPointerValue), + /// - aligned (i.e. have zero low bits up to some bit), and + /// - trivial (i.e. not reference-counted or otherwise managed). + /// + /// These properties make it suitable for unmanaged pointers with special + /// uses in the ABI. + class AlignedRawPointerTypeInfo final : + public PODSingleScalarTypeInfo { + Alignment PointeeAlign; + public: + AlignedRawPointerTypeInfo(llvm::Type *storage, + Size size, SpareBitVector &&spareBits, + Alignment align, Alignment pointeeAlign) + : PODSingleScalarTypeInfo(storage, size, std::move(spareBits), align), + PointeeAlign(pointeeAlign) {} + + bool mayHaveExtraInhabitants(IRGenModule &IGM) const override { + return true; + } + + unsigned getFixedExtraInhabitantCount(IRGenModule &IGM) const override { + return getAlignedPointerExtraInhabitantCount(IGM, PointeeAlign); + } + + APInt getFixedExtraInhabitantValue(IRGenModule &IGM, unsigned bits, + unsigned index) const override { + return getAlignedPointerExtraInhabitantValue(IGM, PointeeAlign, + bits, index, 0); + } + + llvm::Value *getExtraInhabitantIndex(IRGenFunction &IGF, + Address src, + SILType T, + bool isOutlined) const override { + return getAlignedPointerExtraInhabitantIndex(IGF, PointeeAlign, src); + } + + void storeExtraInhabitant(IRGenFunction &IGF, llvm::Value *index, + Address dest, SILType T, + bool isOutlined) const override { + storeAlignedPointerExtraInhabitant(IGF, PointeeAlign, index, dest); + } + }; + + /// A TypeInfo implementation for Builtin.RawPointer. We intentionally + /// do not make any assumptions about values of this type except that + /// they are not the special "null" extra inhabitant; as a result, an + /// Optional can reliably carry an arbitrary + /// bit-pattern of its size without fear of corruption. Since the + /// primary uses of Builtin.RawPointer are the unsafe pointer APIs, + /// that is exactly what we want. It does mean that Builtin.RawPointer + /// is usually a suboptimal type for representing known-valid pointers. class RawPointerTypeInfo final : public PODSingleScalarTypeInfo { public: @@ -1207,20 +1260,23 @@ TypeConverter::createPrimitive(llvm::Type *type, Size size, Alignment align) { align); } -/// Constructs a type info which performs simple loads and stores of -/// the given IR type, given that it's a pointer to an aligned pointer -/// type. -const LoadableTypeInfo * -TypeConverter::createPrimitiveForAlignedPointer(llvm::PointerType *type, - Size size, - Alignment align, - Alignment pointerAlignment) { +static SpareBitVector getSpareBitsForAlignedPointer(IRGenModule &IGM, + Alignment pointeeAlign) { + // FIXME: this is little-endian SpareBitVector spareBits = IGM.TargetInfo.PointerSpareBits; - for (unsigned bit = 0; Alignment(1ull << bit) != pointerAlignment; ++bit) { + for (unsigned bit = 0; Alignment(1ull << bit) != pointeeAlign; ++bit) { spareBits.setBit(bit); } + return spareBits; +} - return new PrimitiveTypeInfo(type, size, std::move(spareBits), align); +static LoadableTypeInfo *createAlignedPointerTypeInfo(IRGenModule &IGM, + llvm::Type *ty, + Alignment pointeeAlign) { + return new AlignedRawPointerTypeInfo(ty, IGM.getPointerSize(), + getSpareBitsForAlignedPointer(IGM, pointeeAlign), + IGM.getPointerAlignment(), + pointeeAlign); } /// Constructs a fixed-size type info which asserts if you try to copy @@ -1424,11 +1480,20 @@ const TypeInfo &IRGenModule::getWitnessTablePtrTypeInfo() { const LoadableTypeInfo &TypeConverter::getWitnessTablePtrTypeInfo() { if (WitnessTablePtrTI) return *WitnessTablePtrTI; - WitnessTablePtrTI = - createPrimitiveForAlignedPointer(IGM.WitnessTablePtrTy, - IGM.getPointerSize(), - IGM.getPointerAlignment(), - IGM.getWitnessTableAlignment()); + + auto spareBits = + getSpareBitsForAlignedPointer(IGM, IGM.getWitnessTableAlignment()); + + // This is sub-optimal because it doesn't consider that there are + // also potential extra inhabitants in witnesss table pointers, but + // it's what we're currently doing, so we might be stuck. + // TODO: it's likely that this never matters in the current ABI, + // so we can just switch to using AlignedRawPointerTypeInfo; but + // we need to check that first. + WitnessTablePtrTI = new PrimitiveTypeInfo(IGM.WitnessTablePtrTy, + IGM.getPointerSize(), + std::move(spareBits), + IGM.getPointerAlignment()); WitnessTablePtrTI->NextConverted = FirstType; FirstType = WitnessTablePtrTI; return *WitnessTablePtrTI; @@ -1436,6 +1501,7 @@ const LoadableTypeInfo &TypeConverter::getWitnessTablePtrTypeInfo() { const SpareBitVector &IRGenModule::getWitnessTablePtrSpareBits() const { // Witness tables are pointers and have pointer spare bits. + // FIXME: this is not what we use in getWitnessTablePtrTypeInfo() return TargetInfo.PointerSpareBits; } @@ -1563,6 +1629,48 @@ const LoadableTypeInfo &TypeConverter::getRawPointerTypeInfo() { return *RawPointerTI; } +const LoadableTypeInfo &IRGenModule::getRawUnsafeContinuationTypeInfo() { + return Types.getRawUnsafeContinuationTypeInfo(); +} + +const LoadableTypeInfo &TypeConverter::getRawUnsafeContinuationTypeInfo() { + if (RawUnsafeContinuationTI) return *RawUnsafeContinuationTI; + + // A Builtin.RawUnsafeContinuation is an AsyncTask*, which is a heap + // object aligned to 2*alignof(void*). Incomplete tasks are + // self-owning, which is to say that pointers to them can be held + // reliably without retaining or releasing until the task starts + // running again. + // + // TODO: It is possible to retain and release task pointers, which means + // they can be used directly as Swift function contexts. Preserve this + // information to optimize closure-creation (partial apply). + auto ty = IGM.Int8PtrTy; + auto pointeeAlign = Alignment(2 * IGM.getPointerAlignment().getValue()); + RawUnsafeContinuationTI = + createAlignedPointerTypeInfo(IGM, ty, pointeeAlign); + RawUnsafeContinuationTI->NextConverted = FirstType; + FirstType = RawUnsafeContinuationTI; + return *RawUnsafeContinuationTI; +} + +const LoadableTypeInfo &TypeConverter::getJobTypeInfo() { + if (JobTI) return *JobTI; + + // A Builtin.Job is a Job*, which is an arbitrary pointer aligned to + // 2*alignof(void*). Jobs are self-owning, which is to say that + // they're valid until they are scheduled, and then they're responsible + // for destroying themselves. (Jobs are often interior pointers into + // an AsyncTask*, but that's not guaranteed.) + auto ty = llvm::StructType::create(IGM.getLLVMContext(), "swift.job") + ->getPointerTo(); + auto pointeeAlign = Alignment(2 * IGM.getPointerAlignment().getValue()); + JobTI = createAlignedPointerTypeInfo(IGM, ty, pointeeAlign); + JobTI->NextConverted = FirstType; + FirstType = JobTI; + return *JobTI; +} + const LoadableTypeInfo &TypeConverter::getEmptyTypeInfo() { if (EmptyTI) return *EmptyTI; EmptyTI = new EmptyTypeInfo(IGM.Int8Ty); @@ -1967,6 +2075,10 @@ const TypeInfo *TypeConverter::convertType(CanType ty) { getFixedBufferAlignment(IGM)); case TypeKind::BuiltinRawPointer: return &getRawPointerTypeInfo(); + case TypeKind::BuiltinRawUnsafeContinuation: + return &getRawUnsafeContinuationTypeInfo(); + case TypeKind::BuiltinJob: + return &getJobTypeInfo(); case TypeKind::BuiltinIntegerLiteral: return &getIntegerLiteralTypeInfo(); case TypeKind::BuiltinFloat: diff --git a/lib/IRGen/GenType.h b/lib/IRGen/GenType.h index b62e2cfdf7fc5..7d9afa5f5dc15 100644 --- a/lib/IRGen/GenType.h +++ b/lib/IRGen/GenType.h @@ -103,6 +103,8 @@ class TypeConverter { const LoadableTypeInfo *UnknownObjectTI = nullptr; const LoadableTypeInfo *BridgeObjectTI = nullptr; const LoadableTypeInfo *RawPointerTI = nullptr; + const LoadableTypeInfo *RawUnsafeContinuationTI = nullptr; + const LoadableTypeInfo *JobTI = nullptr; const LoadableTypeInfo *WitnessTablePtrTI = nullptr; const TypeInfo *TypeMetadataPtrTI = nullptr; const TypeInfo *SwiftContextPtrTI = nullptr; @@ -133,9 +135,6 @@ class TypeConverter { const LoadableTypeInfo *createPrimitive(llvm::Type *T, Size size, Alignment align); - const LoadableTypeInfo *createPrimitiveForAlignedPointer(llvm::PointerType *T, - Size size, Alignment align, - Alignment pointerAlignment); const FixedTypeInfo *createImmovable(llvm::Type *T, Size size, Alignment align); @@ -183,6 +182,8 @@ class TypeConverter { const LoadableTypeInfo &getUnknownObjectTypeInfo(); const LoadableTypeInfo &getBridgeObjectTypeInfo(); const LoadableTypeInfo &getRawPointerTypeInfo(); + const LoadableTypeInfo &getRawUnsafeContinuationTypeInfo(); + const LoadableTypeInfo &getJobTypeInfo(); const TypeInfo &getTypeMetadataPtrTypeInfo(); const TypeInfo &getSwiftContextPtrTypeInfo(); const TypeInfo &getTaskContinuationFunctionPtrTypeInfo(); diff --git a/lib/IRGen/IRGen.cpp b/lib/IRGen/IRGen.cpp index a7ce119002138..543da66d02761 100644 --- a/lib/IRGen/IRGen.cpp +++ b/lib/IRGen/IRGen.cpp @@ -693,8 +693,12 @@ static void setPointerAuthOptions(PointerAuthOptions &opts, SpecialPointerAuthDiscriminators::ResilientClassStubInitCallback); opts.AsyncContextParent = - PointerAuthSchema(codeKey, /*address*/ true, Discrimination::Constant, + PointerAuthSchema(dataKey, /*address*/ true, Discrimination::Constant, SpecialPointerAuthDiscriminators::AsyncContextParent); + + opts.AsyncContextResume = + PointerAuthSchema(codeKey, /*address*/ true, Discrimination::Constant, + SpecialPointerAuthDiscriminators::AsyncContextResume); } std::unique_ptr diff --git a/lib/IRGen/IRGenDebugInfo.cpp b/lib/IRGen/IRGenDebugInfo.cpp index d462676af3db3..ba81c3eed6113 100644 --- a/lib/IRGen/IRGenDebugInfo.cpp +++ b/lib/IRGen/IRGenDebugInfo.cpp @@ -1326,6 +1326,20 @@ class IRGenDebugInfoImpl : public IRGenDebugInfo { MangledName); } + case TypeKind::BuiltinRawUnsafeContinuation: { + unsigned PtrSize = CI.getTargetInfo().getPointerWidth(0); + return DBuilder.createPointerType(nullptr, PtrSize, 0, + /* DWARFAddressSpace */ None, + MangledName); + } + + case TypeKind::BuiltinJob: { + unsigned PtrSize = CI.getTargetInfo().getPointerWidth(0); + return DBuilder.createPointerType(nullptr, PtrSize, 0, + /* DWARFAddressSpace */ None, + MangledName); + } + case TypeKind::DynamicSelf: { // Self. We don't have a way to represent instancetype in DWARF, // so we emit the static type instead. This is similar to what we diff --git a/lib/IRGen/IRGenFunction.cpp b/lib/IRGen/IRGenFunction.cpp index 3b8ecee29dd46..a68e14f09e1fa 100644 --- a/lib/IRGen/IRGenFunction.cpp +++ b/lib/IRGen/IRGenFunction.cpp @@ -23,6 +23,7 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/raw_ostream.h" +#include "Callee.h" #include "Explosion.h" #include "IRGenDebugInfo.h" #include "IRGenFunction.h" @@ -511,3 +512,225 @@ llvm::Value *IRGenFunction::alignUpToMaximumAlignment(llvm::Type *sizeTy, llvm:: auto *invertedMask = Builder.CreateNot(alignMask); return Builder.CreateAnd(Builder.CreateAdd(val, alignMask), invertedMask); } + +/// Returns the current task \p currTask as a Builtin.RawUnsafeContinuation at +1. +static llvm::Value *unsafeContinuationFromTask(IRGenFunction &IGF, + llvm::Value *currTask) { + auto &IGM = IGF.IGM; + auto &Builder = IGF.Builder; + + auto &rawPointerTI = IGM.getRawUnsafeContinuationTypeInfo(); + return Builder.CreateBitOrPointerCast(currTask, rawPointerTI.getStorageType()); +} + +void IRGenFunction::emitGetAsyncContinuation(SILType resumeTy, + StackAddress resultAddr, + Explosion &out) { + // Create the continuation. + // void current_sil_function(AsyncTask *currTask, Executor *currExecutor, + // AsyncContext *currCtxt) { + // + // A continuation is the current AsyncTask 'currTask' with: + // currTask->ResumeTask = @llvm.coro.async.resume(); + // currTask->ResumeContext = &continuation_context; + // + // Where: + // + // struct { + // AsyncContext *resumeCtxt; + // void *awaitSynchronization; + // SwiftError *errResult; + // Result *result; + // ExecutorRef *resumeExecutor; + // } continuation_context; // local variable of current_sil_function + // + // continuation_context.resumeCtxt = currCtxt; + // continuation_context.errResult = nulllptr; + // continuation_context.result = ... // local alloca. + // continuation_context.resumeExecutor = .. // current executor + + auto currTask = getAsyncTask(); + auto unsafeContinuation = unsafeContinuationFromTask(*this, currTask); + + // Create and setup the continuation context. + // continuation_context.resumeCtxt = currCtxt; + // continuation_context.errResult = nulllptr; + // continuation_context.result = ... // local alloca T + auto pointerAlignment = IGM.getPointerAlignment(); + auto continuationContext = + createAlloca(IGM.AsyncContinuationContextTy, pointerAlignment); + AsyncCoroutineCurrentContinuationContext = continuationContext.getAddress(); + // TODO: add lifetime with matching lifetime in await_async_continuation + auto contResumeAddr = + Builder.CreateStructGEP(continuationContext.getAddress(), 0); + Builder.CreateStore(getAsyncContext(), + Address(contResumeAddr, pointerAlignment)); + auto contErrResultAddr = + Builder.CreateStructGEP(continuationContext.getAddress(), 2); + Builder.CreateStore( + llvm::Constant::getNullValue( + contErrResultAddr->getType()->getPointerElementType()), + Address(contErrResultAddr, pointerAlignment)); + auto contResultAddr = + Builder.CreateStructGEP(continuationContext.getAddress(), 3); + if (!resultAddr.getAddress().isValid()) { + auto &resumeTI = getTypeInfo(resumeTy); + auto resultAddr = + resumeTI.allocateStack(*this, resumeTy, "async.continuation.result"); + Builder.CreateStore(Builder.CreateBitOrPointerCast( + resultAddr.getAddress().getAddress(), + contResultAddr->getType()->getPointerElementType()), + Address(contResultAddr, pointerAlignment)); + } else { + Builder.CreateStore(Builder.CreateBitOrPointerCast( + resultAddr.getAddress().getAddress(), + contResultAddr->getType()->getPointerElementType()), + Address(contResultAddr, pointerAlignment)); + } + // continuation_context.resumeExecutor = // current executor + auto contExecutorRefAddr = + Builder.CreateStructGEP(continuationContext.getAddress(), 4); + Builder.CreateStore( + Builder.CreateBitOrPointerCast( + getAsyncExecutor(), + contExecutorRefAddr->getType()->getPointerElementType()), + Address(contExecutorRefAddr, pointerAlignment)); + + // Fill the current task (i.e the continuation) with the continuation + // information. + // currTask->ResumeTask = @llvm.coro.async.resume(); + assert(currTask->getType() == IGM.SwiftTaskPtrTy); + auto currTaskResumeTaskAddr = Builder.CreateStructGEP(currTask, 4); + auto coroResume = + Builder.CreateIntrinsicCall(llvm::Intrinsic::coro_async_resume, {}); + + assert(AsyncCoroutineCurrentResume == nullptr && + "Don't support nested get_async_continuation"); + AsyncCoroutineCurrentResume = coroResume; + Builder.CreateStore( + Builder.CreateBitOrPointerCast(coroResume, IGM.FunctionPtrTy), + Address(currTaskResumeTaskAddr, pointerAlignment)); + // currTask->ResumeContext = &continuation_context; + auto currTaskResumeCtxtAddr = Builder.CreateStructGEP(currTask, 5); + Builder.CreateStore( + Builder.CreateBitOrPointerCast(continuationContext.getAddress(), + IGM.SwiftContextPtrTy), + Address(currTaskResumeCtxtAddr, pointerAlignment)); + + // Publish all the writes. + // continuation_context.awaitSynchronization =(atomic release) nullptr; + auto contAwaitSyncAddr = + Builder.CreateStructGEP(continuationContext.getAddress(), 1); + auto null = llvm::ConstantInt::get( + contAwaitSyncAddr->getType()->getPointerElementType(), 0); + auto atomicStore = + Builder.CreateStore(null, Address(contAwaitSyncAddr, pointerAlignment)); + atomicStore->setAtomic(llvm::AtomicOrdering::Release, + llvm::SyncScope::System); + out.add(unsafeContinuation); +} + +void IRGenFunction::emitAwaitAsyncContinuation( + SILType resumeTy, bool isIndirectResult, + Explosion &outDirectResult, llvm::BasicBlock *&normalBB, + llvm::PHINode *&optionalErrorResult, llvm::BasicBlock *&optionalErrorBB) { + assert(AsyncCoroutineCurrentContinuationContext && "no active continuation"); + auto pointerAlignment = IGM.getPointerAlignment(); + + // First check whether the await reached this point first. Meaning we still + // have to wait for the continuation result. If the await reaches first we + // abort the control flow here (resuming the continuation will execute the + // remaining control flow). + auto contAwaitSyncAddr = + Builder.CreateStructGEP(AsyncCoroutineCurrentContinuationContext, 1); + auto null = llvm::ConstantInt::get( + contAwaitSyncAddr->getType()->getPointerElementType(), 0); + auto one = llvm::ConstantInt::get( + contAwaitSyncAddr->getType()->getPointerElementType(), 1); + auto results = Builder.CreateAtomicCmpXchg( + contAwaitSyncAddr, null, one, + llvm::AtomicOrdering::Release /*success ordering*/, + llvm::AtomicOrdering::Acquire /* failure ordering */, + llvm::SyncScope::System); + auto firstAtAwait = Builder.CreateExtractValue(results, 1); + auto contBB = createBasicBlock("await.async.maybe.resume"); + auto abortBB = createBasicBlock("await.async.abort"); + Builder.CreateCondBr(firstAtAwait, abortBB, contBB); + Builder.emitBlock(abortBB); + { + // We are first to the sync point. Abort. The continuation's result is not + // available yet. + emitCoroutineOrAsyncExit(); + } + + auto contBB2 = createBasicBlock("await.async.resume"); + Builder.emitBlock(contBB); + { + // Setup the suspend point. + SmallVector arguments; + arguments.push_back(AsyncCoroutineCurrentResume); + auto resumeProjFn = getOrCreateResumePrjFn(); + arguments.push_back( + Builder.CreateBitOrPointerCast(resumeProjFn, IGM.Int8PtrTy)); + // The dispatch function just calls the resume point. + auto resumeFnPtr = + getFunctionPointerForResumeIntrinsic(AsyncCoroutineCurrentResume); + arguments.push_back(Builder.CreateBitOrPointerCast( + createAsyncDispatchFn(resumeFnPtr, + {IGM.Int8PtrTy, IGM.Int8PtrTy, IGM.Int8PtrTy}), + IGM.Int8PtrTy)); + arguments.push_back(AsyncCoroutineCurrentResume); + arguments.push_back( + Builder.CreateBitOrPointerCast(getAsyncTask(), IGM.Int8PtrTy)); + arguments.push_back( + Builder.CreateBitOrPointerCast(getAsyncExecutor(), IGM.Int8PtrTy)); + arguments.push_back(Builder.CreateBitOrPointerCast( + AsyncCoroutineCurrentContinuationContext, IGM.Int8PtrTy)); + emitSuspendAsyncCall(arguments); + + auto results = Builder.CreateAtomicCmpXchg( + contAwaitSyncAddr, null, one, + llvm::AtomicOrdering::Release /*success ordering*/, + llvm::AtomicOrdering::Acquire /* failure ordering */, + llvm::SyncScope::System); + // Again, are we first at the wait (can only reach that state after + // continuation.resume/abort is called)? If so abort to wait for the end of + // the await point to be reached. + auto firstAtAwait = Builder.CreateExtractValue(results, 1); + Builder.CreateCondBr(firstAtAwait, abortBB, contBB2); + } + + Builder.emitBlock(contBB2); + auto contBB3 = createBasicBlock("await.async.normal"); + if (optionalErrorBB) { + auto contErrResultAddr = Address( + Builder.CreateStructGEP(AsyncCoroutineCurrentContinuationContext, 2), + pointerAlignment); + auto errorRes = Builder.CreateLoad(contErrResultAddr); + auto nullError = llvm::Constant::getNullValue(errorRes->getType()); + auto hasError = Builder.CreateICmpNE(errorRes, nullError); + optionalErrorResult->addIncoming(errorRes, Builder.GetInsertBlock()); + Builder.CreateCondBr(hasError, optionalErrorBB, contBB3); + } else { + Builder.CreateBr(contBB3); + } + + Builder.emitBlock(contBB3); + if (!isIndirectResult) { + auto contResultAddrAddr = + Builder.CreateStructGEP(AsyncCoroutineCurrentContinuationContext, 3); + auto resultAddrVal = + Builder.CreateLoad(Address(contResultAddrAddr, pointerAlignment)); + // Take the result. + auto &resumeTI = cast(getTypeInfo(resumeTy)); + auto resultStorageTy = resumeTI.getStorageType(); + auto resultAddr = + Address(Builder.CreateBitOrPointerCast(resultAddrVal, + resultStorageTy->getPointerTo()), + resumeTI.getFixedAlignment()); + resumeTI.loadAsTake(*this, resultAddr, outDirectResult); + } + Builder.CreateBr(normalBB); + AsyncCoroutineCurrentResume = nullptr; + AsyncCoroutineCurrentContinuationContext = nullptr; +} diff --git a/lib/IRGen/IRGenFunction.h b/lib/IRGen/IRGenFunction.h index 5f96c684379bc..e4a938618fa21 100644 --- a/lib/IRGen/IRGenFunction.h +++ b/lib/IRGen/IRGenFunction.h @@ -132,9 +132,31 @@ class IRGenFunction { llvm::Value *getAsyncExecutor(); llvm::Value *getAsyncContext(); + llvm::CallInst *emitSuspendAsyncCall(ArrayRef args); + llvm::Function *getOrCreateResumePrjFn(); llvm::Function *createAsyncDispatchFn(const FunctionPointer &fnPtr, ArrayRef args); + llvm::Function *createAsyncDispatchFn(const FunctionPointer &fnPtr, + ArrayRef argTypes); + + void emitGetAsyncContinuation(SILType resumeTy, + StackAddress optionalResultAddr, + Explosion &out); + + void emitAwaitAsyncContinuation(SILType resumeTy, + bool isIndirectResult, + Explosion &outDirectResult, + llvm::BasicBlock *&normalBB, + llvm::PHINode *&optionalErrorPhi, + llvm::BasicBlock *&optionalErrorBB); + + FunctionPointer + getFunctionPointerForResumeIntrinsic(llvm::Value *resumeIntrinsic); + + void emitSuspensionPoint(llvm::Value *toExecutor, llvm::Value *asyncResume); + llvm::Function *getOrCreateResumeFromSuspensionFn(); + llvm::Function *createAsyncSuspendFn(); private: void emitPrologue(); @@ -145,7 +167,18 @@ class IRGenFunction { llvm::Value *CalleeErrorResultSlot = nullptr; llvm::Value *CallerErrorResultSlot = nullptr; llvm::Value *CoroutineHandle = nullptr; - bool IsAsync = false; + llvm::Value *AsyncCoroutineCurrentResume = nullptr; + llvm::Value *AsyncCoroutineCurrentContinuationContext = nullptr; + + Address asyncTaskLocation; + Address asyncExecutorLocation; + Address asyncContextLocation; + + /// The unique block that calls @llvm.coro.end. + llvm::BasicBlock *CoroutineExitBlock = nullptr; + +public: + void emitCoroutineOrAsyncExit(); //--- Helper methods ----------------------------------------------------------- public: @@ -159,8 +192,8 @@ class IRGenFunction { return getEffectiveOptimizationMode() == OptimizationMode::ForSize; } - bool isAsync() const { return IsAsync; } - void setAsync(bool async = true) { IsAsync = async; } + void setupAsync(); + bool isAsync() const { return asyncTaskLocation.isValid(); } Address createAlloca(llvm::Type *ty, Alignment align, const llvm::Twine &name = ""); diff --git a/lib/IRGen/IRGenMangler.cpp b/lib/IRGen/IRGenMangler.cpp index 720a85ecc9887..98e59d382cd51 100644 --- a/lib/IRGen/IRGenMangler.cpp +++ b/lib/IRGen/IRGenMangler.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "IRGenMangler.h" +#include "GenClass.h" #include "swift/AST/ExistentialLayout.h" #include "swift/AST/IRGenOptions.h" #include "swift/AST/ProtocolAssociations.h" @@ -91,7 +92,7 @@ IRGenMangler::withSymbolicReferences(IRGenModule &IGM, CanSymbolicReferenceLocally(CanSymbolicReference); AllowSymbolicReferences = true; - CanSymbolicReference = [](SymbolicReferent s) -> bool { + CanSymbolicReference = [&](SymbolicReferent s) -> bool { if (auto type = s.dyn_cast()) { // The short-substitution types in the standard library have compact // manglings already, and the runtime ought to have a lookup table for @@ -113,10 +114,16 @@ IRGenMangler::withSymbolicReferences(IRGenModule &IGM, // TODO: We could assign a symbolic reference discriminator to refer // to objc class refs. if (auto clas = dyn_cast(type)) { - if (clas->hasClangNode() - && clas->getForeignClassKind() != ClassDecl::ForeignKind::CFType) { - return false; - } + // Swift-defined classes can be symbolically referenced. + if (hasKnownSwiftMetadata(IGM, const_cast(clas))) + return true; + + // Foreign class types can be symbolically referenced. + if (clas->getForeignClassKind() == ClassDecl::ForeignKind::CFType) + return true; + + // Otherwise no. + return false; } return true; diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp index 989ada945c2f6..3735bf13af130 100644 --- a/lib/IRGen/IRGenModule.cpp +++ b/lib/IRGen/IRGenModule.cpp @@ -601,7 +601,18 @@ IRGenModule::IRGenModule(IRGenerator &irgen, AsyncFunctionPointerTy = createStructType(*this, "swift.async_func_pointer", {RelativeAddressTy, Int32Ty}, true); SwiftContextTy = createStructType(*this, "swift.context", {}); - SwiftTaskTy = createStructType(*this, "swift.task", {}); + auto *ContextPtrTy = llvm::PointerType::getUnqual(SwiftContextTy); + + // This must match the definition of class AsyncTask in swift/ABI/Task.h. + SwiftTaskTy = createStructType(*this, "swift.task", { + RefCountedStructTy, // object header + Int8PtrTy, Int8PtrTy, // Job.SchedulerPrivate + SizeTy, // Job.Flags + FunctionPtrTy, // Job.RunJob/Job.ResumeTask + ContextPtrTy, // Task.ResumeContext + IntPtrTy // Task.Status + }); + SwiftExecutorTy = createStructType(*this, "swift.executor", {}); AsyncFunctionPointerPtrTy = AsyncFunctionPointerTy->getPointerTo(DefaultAS); SwiftContextPtrTy = SwiftContextTy->getPointerTo(DefaultAS); @@ -620,6 +631,11 @@ IRGenModule::IRGenModule(IRGenerator &irgen, *this, "swift.async_task_and_context", { SwiftTaskPtrTy, SwiftContextPtrTy }); + AsyncContinuationContextTy = createStructType( + *this, "swift.async_continuation_context", + {SwiftContextPtrTy, SizeTy, ErrorPtrTy, OpaquePtrTy, SwiftExecutorPtrTy}); + AsyncContinuationContextPtrTy = AsyncContinuationContextTy->getPointerTo(); + DifferentiabilityWitnessTy = createStructType( *this, "swift.differentiability_witness", {Int8PtrTy, Int8PtrTy}); } @@ -731,6 +747,14 @@ namespace RuntimeConstants { } return RuntimeAvailability::AlwaysAvailable; } + + RuntimeAvailability DifferentiationAvailability(ASTContext &context) { + auto featureAvailability = context.getDifferentiationAvailability(); + if (!isDeploymentAvailabilityContainedIn(context, featureAvailability)) { + return RuntimeAvailability::ConditionallyAvailable; + } + return RuntimeAvailability::AlwaysAvailable; + } } // namespace RuntimeConstants // We don't use enough attributes to justify generalizing the diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index b756b6ec6ccc0..ebfb29f1f0525 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -735,6 +735,8 @@ class IRGenModule { llvm::FunctionType *TaskContinuationFunctionTy; llvm::PointerType *TaskContinuationFunctionPtrTy; llvm::StructType *AsyncTaskAndContextTy; + llvm::StructType *AsyncContinuationContextTy; + llvm::PointerType *AsyncContinuationContextPtrTy; llvm::StructType *DifferentiabilityWitnessTy; // { i8*, i8* } llvm::GlobalVariable *TheTrivialPropertyDescriptor = nullptr; @@ -907,6 +909,7 @@ class IRGenModule { const LoadableTypeInfo &getUnknownObjectTypeInfo(); const LoadableTypeInfo &getBridgeObjectTypeInfo(); const LoadableTypeInfo &getRawPointerTypeInfo(); + const LoadableTypeInfo &getRawUnsafeContinuationTypeInfo(); llvm::Type *getStorageTypeForUnlowered(Type T); llvm::Type *getStorageTypeForLowered(CanType T); llvm::Type *getStorageType(SILType T); @@ -1281,6 +1284,7 @@ class IRGenModule { llvm::InlineAsm *getObjCRetainAutoreleasedReturnValueMarker(); ClassDecl *getObjCRuntimeBaseForSwiftRootClass(ClassDecl *theClass); ClassDecl *getObjCRuntimeBaseClass(Identifier name, Identifier objcName); + ClassDecl *getSwiftNativeNSObjectDecl(); llvm::Module *getModule() const; llvm::AttributeList getAllocAttrs(); diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index 90858a76fbcd0..6a42fb90948f7 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -415,9 +415,6 @@ class IRGenSILFunction : // value which would be returned directly cannot fit into registers. Address IndirectReturn; - /// The unique block that calls @llvm.coro.end. - llvm::BasicBlock *CoroutineExitBlock = nullptr; - // A cached dominance analysis. std::unique_ptr Dominance; @@ -758,7 +755,7 @@ class IRGenSILFunction : if (IGM.IRGen.Opts.DisableDebuggerShadowCopies || IGM.IRGen.Opts.shouldOptimize() || IsAnonymous || isa(Storage) || isa(Storage) || - Storage->getType() == IGM.RefCountedPtrTy || !needsShadowCopy(Storage)) + !needsShadowCopy(Storage)) return Storage; // Emit a shadow copy. @@ -1090,22 +1087,11 @@ class IRGenSILFunction : void visitCheckedCastValueBranchInst(CheckedCastValueBranchInst *i); void visitCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *i); - void visitGetAsyncContinuationInst(GetAsyncContinuationInst *i) { - //TODO(async) - llvm_unreachable("not implemented"); - } - void visitGetAsyncContinuationAddrInst(GetAsyncContinuationAddrInst *i) { - //TODO(async) - llvm_unreachable("not implemented"); - } - void visitAwaitAsyncContinuationInst(AwaitAsyncContinuationInst *i) { - //TODO(async) - llvm_unreachable("not implemented"); - } + void visitGetAsyncContinuationInst(GetAsyncContinuationInst *i); + void visitGetAsyncContinuationAddrInst(GetAsyncContinuationAddrInst *i); + void visitAwaitAsyncContinuationInst(AwaitAsyncContinuationInst *i); - void visitHopToExecutorInst(HopToExecutorInst *i) { - //TODO(async) - } + void visitHopToExecutorInst(HopToExecutorInst *i); void visitKeyPathInst(KeyPathInst *I); @@ -1516,7 +1502,8 @@ IRGenSILFunction::IRGenSILFunction(IRGenModule &IGM, SILFunction *f) IGM.createReplaceableProlog(*this, f); } - setAsync(f->getLoweredFunctionType()->isAsync()); + if (f->getLoweredFunctionType()->isAsync()) + setupAsync(); } IRGenSILFunction::~IRGenSILFunction() { @@ -3170,28 +3157,28 @@ void IRGenSILFunction::visitUnreachableInst(swift::UnreachableInst *i) { Builder.CreateUnreachable(); } -static void emitCoroutineOrAsyncExit(IRGenSILFunction &IGF) { +void IRGenFunction::emitCoroutineOrAsyncExit() { // The LLVM coroutine representation demands that there be a // unique call to llvm.coro.end. // If the coroutine exit block already exists, just branch to it. - if (auto coroEndBB = IGF.CoroutineExitBlock) { - IGF.Builder.CreateBr(coroEndBB); + if (auto coroEndBB = CoroutineExitBlock) { + Builder.CreateBr(coroEndBB); return; } // Otherwise, create it and branch to it. - auto coroEndBB = IGF.createBasicBlock("coro.end"); - IGF.CoroutineExitBlock = coroEndBB; - IGF.Builder.CreateBr(coroEndBB); + auto coroEndBB = createBasicBlock("coro.end"); + CoroutineExitBlock = coroEndBB; + Builder.CreateBr(coroEndBB); // Emit the block. - IGF.Builder.emitBlock(coroEndBB); - auto handle = IGF.getCoroutineHandle(); - IGF.Builder.CreateIntrinsicCall(llvm::Intrinsic::coro_end, - {handle, - /*is unwind*/ IGF.Builder.getFalse()}); - IGF.Builder.CreateUnreachable(); + Builder.emitBlock(coroEndBB); + auto handle = getCoroutineHandle(); + Builder.CreateIntrinsicCall(llvm::Intrinsic::coro_end, + {handle, + /*is unwind*/ Builder.getFalse()}); + Builder.CreateUnreachable(); } static void emitReturnInst(IRGenSILFunction &IGF, @@ -3202,7 +3189,7 @@ static void emitReturnInst(IRGenSILFunction &IGF, if (IGF.isCoroutine() && !IGF.isAsync()) { assert(result.empty() && "coroutines do not currently support non-void returns"); - emitCoroutineOrAsyncExit(IGF); + IGF.emitCoroutineOrAsyncExit(); return; } @@ -3234,7 +3221,7 @@ static void emitReturnInst(IRGenSILFunction &IGF, .initialize(IGF, result, fieldAddr, /*isOutlined*/ false); } emitAsyncReturn(IGF, layout, fnType); - emitCoroutineOrAsyncExit(IGF); + IGF.emitCoroutineOrAsyncExit(); } else { auto funcLang = IGF.CurSILFn->getLoweredFunctionType()->getLanguage(); auto swiftCCReturn = funcLang == SILFunctionLanguage::Swift; @@ -3277,7 +3264,7 @@ void IRGenSILFunction::visitThrowInst(swift::ThrowInst *i) { if (isAsync()) { auto layout = getAsyncContextLayout(*this); emitAsyncReturn(*this, layout, i->getFunction()->getLoweredFunctionType()); - emitCoroutineOrAsyncExit(*this); + emitCoroutineOrAsyncExit(); return; } @@ -3294,7 +3281,7 @@ void IRGenSILFunction::visitThrowInst(swift::ThrowInst *i) { void IRGenSILFunction::visitUnwindInst(swift::UnwindInst *i) { // Just call coro.end; there's no need to distinguish 'unwind' // and 'return' at the LLVM level. - emitCoroutineOrAsyncExit(*this); + emitCoroutineOrAsyncExit(); } void IRGenSILFunction::visitYieldInst(swift::YieldInst *i) { @@ -5726,6 +5713,13 @@ void IRGenSILFunction::visitCheckedCastAddrBranchInst( getLoweredBB(i->getFailureBB()).bb); } +void IRGenSILFunction::visitHopToExecutorInst(HopToExecutorInst *i) { + llvm::Value *resumeFn = Builder.CreateIntrinsicCall( + llvm::Intrinsic::coro_async_resume, {}); + + emitSuspensionPoint(getLoweredSingletonExplosion(i->getOperand()), resumeFn); +} + void IRGenSILFunction::visitKeyPathInst(swift::KeyPathInst *I) { auto pattern = IGM.getAddrOfKeyPathPattern(I->getPattern(), I->getLoc()); // Build up the argument vector to instantiate the pattern here. @@ -6380,3 +6374,45 @@ void IRGenModule::emitSILStaticInitializers() { IRGlobal->setInitializer(emitConstantTuple(*this, TI)); } } + +void IRGenSILFunction::visitGetAsyncContinuationInst( + GetAsyncContinuationInst *i) { + Explosion out; + emitGetAsyncContinuation(i->getLoweredResumeType(), StackAddress(), out); + setLoweredExplosion(i, out); +} + +void IRGenSILFunction::visitGetAsyncContinuationAddrInst( + GetAsyncContinuationAddrInst *i) { + auto resultAddr = getLoweredStackAddress(i->getOperand()); + Explosion out; + emitGetAsyncContinuation(i->getLoweredResumeType(), resultAddr, out); + setLoweredExplosion(i, out); +} + +void IRGenSILFunction::visitAwaitAsyncContinuationInst( + AwaitAsyncContinuationInst *i) { + Explosion resumeResult; + + bool isIndirect = i->getResumeBB()->args_empty(); + SILType resumeTy; + if (!isIndirect) + resumeTy = (*i->getResumeBB()->args_begin())->getType(); + + auto &normalDest = getLoweredBB(i->getResumeBB()); + auto *normalDestBB = normalDest.bb; + + bool hasError = i->getErrorBB() != nullptr; + auto *errorDestBB = hasError ? getLoweredBB(i->getErrorBB()).bb : nullptr; + auto *errorPhi = hasError ? getLoweredBB(i->getErrorBB()).phis[0] : nullptr; + assert(!hasError || getLoweredBB(i->getErrorBB()).phis.size() == 1 && + "error basic block should only expect one value"); + + emitAwaitAsyncContinuation(resumeTy, isIndirect, resumeResult, + normalDestBB, errorPhi, errorDestBB); + if (!isIndirect) { + unsigned firstIndex = 0; + addIncomingExplosionToPHINodes(*this, normalDest, firstIndex, resumeResult); + assert(firstIndex == normalDest.phis.size()); + } +} diff --git a/lib/IRGen/MetadataRequest.cpp b/lib/IRGen/MetadataRequest.cpp index 8da780517d5df..b81ca9632ea32 100644 --- a/lib/IRGen/MetadataRequest.cpp +++ b/lib/IRGen/MetadataRequest.cpp @@ -1297,6 +1297,18 @@ namespace { return emitDirectMetadataRef(type); } + MetadataResponse + visitBuiltinRawUnsafeContinuationType(CanBuiltinRawUnsafeContinuationType type, + DynamicMetadataRequest request) { + return emitDirectMetadataRef(type); + } + + MetadataResponse + visitBuiltinJobType(CanBuiltinJobType type, + DynamicMetadataRequest request) { + return emitDirectMetadataRef(type); + } + MetadataResponse visitBuiltinFloatType(CanBuiltinFloatType type, DynamicMetadataRequest request) { diff --git a/lib/IRGen/StructLayout.cpp b/lib/IRGen/StructLayout.cpp index 77701453230dd..0e8330554b2b3 100644 --- a/lib/IRGen/StructLayout.cpp +++ b/lib/IRGen/StructLayout.cpp @@ -19,6 +19,7 @@ #include "llvm/IR/DerivedTypes.h" #include "swift/AST/ASTContext.h" #include "swift/AST/DiagnosticsIRGen.h" +#include "swift/ABI/MetadataValues.h" #include "BitPatternBuilder.h" #include "FixedTypeInfo.h" @@ -199,6 +200,27 @@ void StructLayoutBuilder::addNSObjectHeader() { headerSize = CurSize; } +void StructLayoutBuilder::addDefaultActorHeader() { + assert(StructFields.size() == 1 && + StructFields[0] == IGM.RefCountedStructTy && + "adding default actor header at wrong offset"); + + // These must match the DefaultActor class in Actor.h. + auto size = NumWords_DefaultActor * IGM.getPointerSize(); + auto align = Alignment(Alignment_DefaultActor); + auto ty = llvm::ArrayType::get(IGM.Int8PtrTy, NumWords_DefaultActor); + + // Note that we align the *entire structure* to the new alignment, + // not the storage we're adding. Otherwise we would potentially + // get internal padding. + assert(CurSize.isMultipleOf(IGM.getPointerSize())); + assert(align >= CurAlignment); + CurSize += size; + CurAlignment = align; + StructFields.push_back(ty); + headerSize = CurSize; +} + bool StructLayoutBuilder::addFields(llvm::MutableArrayRef elts, LayoutStrategy strategy) { // Track whether we've added any storage to our layout. diff --git a/lib/IRGen/StructLayout.h b/lib/IRGen/StructLayout.h index 541c2cd268988..7d5ab54b16d89 100644 --- a/lib/IRGen/StructLayout.h +++ b/lib/IRGen/StructLayout.h @@ -295,6 +295,9 @@ class StructLayoutBuilder { /// Add the NSObject object header to the layout. This must be the first /// thing added to the layout. void addNSObjectHeader(); + /// Add the default-actor header to the layout. This must be the second + /// thing added to the layout, following the Swift heap header. + void addDefaultActorHeader(); /// Add a number of fields to the layout. The field layouts need /// only have the TypeInfo set; the rest will be filled out. diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp index 159b2bae102ee..caa4174c2a610 100644 --- a/lib/Parse/ParseDecl.cpp +++ b/lib/Parse/ParseDecl.cpp @@ -644,6 +644,14 @@ bool Parser::parseSpecializeAttributeArguments( if (ParamLabel == "exported") { Exported = isTrue; } + if (Exported == true) { + const LangOptions &LangOpts = Context.LangOpts; + if (!LangOpts.EnableExperimentalPrespecialization) { + diagnose(Tok.getLoc(), + diag::attr_specialize_unsupported_exported_true, + ParamLabel); + } + } } if (ParamLabel == "kind") { SourceLoc paramValueLoc; @@ -930,9 +938,7 @@ bool Parser::parseDifferentiabilityParametersClause( return true; } Identifier paramName; - if (parseIdentifier(paramName, paramLoc, - diag::diff_params_clause_expected_parameter)) - return true; + paramLoc = consumeIdentifier(paramName, /*diagnoseDollarPrefix=*/false); parameters.push_back( ParsedAutoDiffParameter::getNamedParameter(paramLoc, paramName)); break; @@ -1966,8 +1972,8 @@ bool Parser::parseNewDeclAttribute(DeclAttributes &Attributes, SourceLoc AtLoc, } Identifier name; - consumeIdentifier(&name); - + consumeIdentifier(name, /*diagnoseDollarPrefix=*/false); + auto range = SourceRange(Loc, Tok.getRange().getStart()); if (!consumeIf(tok::r_paren)) { @@ -2545,7 +2551,7 @@ bool Parser::parseNewDeclAttribute(DeclAttributes &Attributes, SourceLoc AtLoc, } Identifier name; - consumeIdentifier(&name, /*allowDollarIdentifier=*/true); + consumeIdentifier(name, /*diagnoseDollarPrefix=*/false); auto range = SourceRange(Loc, Tok.getRange().getStart()); @@ -4418,6 +4424,7 @@ ParserResult Parser::parseDeclImport(ParseDeclOptions Flags, } importPath.push_back(Identifier(), Tok.getLoc()); if (parseAnyIdentifier(importPath.back().Item, + /*diagnoseDollarPrefix=*/false, diag::expected_identifier_in_decl, "import")) return nullptr; HasNext = consumeIf(tok::period); @@ -4543,7 +4550,7 @@ parseIdentifierDeclName(Parser &P, Identifier &Result, SourceLoc &Loc, StringRef DeclKindName, llvm::function_ref canRecover) { if (P.Tok.is(tok::identifier)) { - Loc = P.consumeIdentifier(&Result); + Loc = P.consumeIdentifier(Result, /*diagnoseDollarPrefix=*/true); // We parsed an identifier for the declaration. If we see another // identifier, it might've been a single identifier that got broken by a @@ -5457,7 +5464,7 @@ static ParameterList *parseOptionalAccessorArgument(SourceLoc SpecifierLoc, EndLoc = StartLoc; } else { // We have a name. - NameLoc = P.consumeIdentifier(&Name); + NameLoc = P.consumeIdentifier(Name, /*diagnoseDollarPrefix=*/true); auto DiagID = Kind == AccessorKind::Set ? diag::expected_rparen_set_name : @@ -7737,7 +7744,7 @@ Parser::parseDeclOperatorImpl(SourceLoc OperatorLoc, Identifier Name, SyntaxKind::IdentifierList); Identifier name; - auto loc = consumeIdentifier(&name); + auto loc = consumeIdentifier(name, /*diagnoseDollarPrefix=*/false); identifiers.emplace_back(name, loc); while (Tok.is(tok::comma)) { @@ -7745,7 +7752,7 @@ Parser::parseDeclOperatorImpl(SourceLoc OperatorLoc, Identifier Name, if (Tok.is(tok::identifier)) { Identifier name; - auto loc = consumeIdentifier(&name); + auto loc = consumeIdentifier(name, /*diagnoseDollarPrefix=*/false); identifiers.emplace_back(name, loc); } else { if (Tok.isNot(tok::eof)) { @@ -7762,7 +7769,7 @@ Parser::parseDeclOperatorImpl(SourceLoc OperatorLoc, Identifier Name, SyntaxKind::IdentifierList); Identifier name; - auto nameLoc = consumeIdentifier(&name); + auto nameLoc = consumeIdentifier(name, /*diagnoseDollarPrefix=*/false); identifiers.emplace_back(name, nameLoc); if (isPrefix || isPostfix) { @@ -7833,7 +7840,8 @@ Parser::parseDeclPrecedenceGroup(ParseDeclOptions flags, Identifier name; SourceLoc nameLoc; - if (parseIdentifier(name, nameLoc, diag::expected_precedencegroup_name)) { + if (parseIdentifier(name, nameLoc, /*diagnoseDollarPrefix=*/true, + diag::expected_precedencegroup_name)) { // If the identifier is missing or a keyword or something, try to // skip the entire body. if (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof) && @@ -8039,7 +8047,8 @@ Parser::parseDeclPrecedenceGroup(ParseDeclOptions flags, return abortBody(); } Identifier name; - SourceLoc nameLoc = consumeIdentifier(&name); + SourceLoc nameLoc = consumeIdentifier(name, + /*diagnoseDollarPrefix=*/false); relations.push_back({nameLoc, name, nullptr}); if (skipUnspacedCodeCompleteToken()) diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp index 784c27b561384..6530aad40034f 100644 --- a/lib/Parse/ParseExpr.cpp +++ b/lib/Parse/ParseExpr.cpp @@ -436,6 +436,13 @@ ParserResult Parser::parseExprSequenceElement(Diag<> message, : parseExprUnary(message, isExprBasic); if (hadTry && !sub.hasCodeCompletion() && !sub.isNull()) { + // "await" must precede "try". + if (auto await = dyn_cast(sub.get())) { + diagnose(await->getLoc(), diag::try_before_await) + .fixItRemove(await->getLoc()) + .fixItInsert(tryLoc, "await "); + } + ElementContext.setCreateSyntax(SyntaxKind::TryExpr); switch (trySuffix ? trySuffix->getKind() : tok::NUM_TOKENS) { case tok::exclaim_postfix: @@ -1500,7 +1507,7 @@ ParserResult Parser::parseExprPrimary(Diag<> ID, bool isExprBasic) { peekToken().isNot(tok::period, tok::period_prefix, tok::l_paren)) { DeferringContextRAII Deferring(*SyntaxContext); Identifier name; - SourceLoc loc = consumeIdentifier(&name, /*allowDollarIdentifier=*/true); + SourceLoc loc = consumeIdentifier(name, /*diagnoseDollarPrefix=*/false); auto introducer = (InVarOrLetPattern != IVOLP_InVar ? VarDecl::Introducer::Let : VarDecl::Introducer::Var); @@ -2114,8 +2121,7 @@ DeclNameRef Parser::parseDeclNameRef(DeclNameLoc &loc, SourceLoc baseNameLoc; if (Tok.isAny(tok::identifier, tok::kw_Self, tok::kw_self)) { Identifier baseNameId; - baseNameLoc = consumeIdentifier( - &baseNameId, /*allowDollarIdentifier=*/true); + baseNameLoc = consumeIdentifier(baseNameId, /*diagnoseDollarPrefix=*/false); baseName = baseNameId; } else if (flags.contains(DeclNameFlag::AllowOperators) && Tok.isAnyOperator()) { @@ -2490,7 +2496,7 @@ parseClosureSignatureIfPresent(SourceRange &bracketRange, } else { // Otherwise, the name is a new declaration. - consumeIdentifier(&name); + consumeIdentifier(name, /*diagnoseDollarPrefix=*/true); equalLoc = consumeToken(tok::equal); auto ExprResult = parseExpr(diag::expected_init_capture_specifier); @@ -2569,7 +2575,7 @@ parseClosureSignatureIfPresent(SourceRange &bracketRange, Identifier name; SourceLoc nameLoc; if (Tok.is(tok::identifier)) { - nameLoc = consumeIdentifier(&name); + nameLoc = consumeIdentifier(name, /*diagnoseDollarPrefix=*/true); } else { nameLoc = consumeToken(tok::kw__); } @@ -3233,7 +3239,7 @@ ParserResult Parser::parseExprPoundUnknown(SourceLoc LSquareLoc) { PoundLoc.getAdvancedLoc(1) == Tok.getLoc()); Identifier Name; - SourceLoc NameLoc = consumeIdentifier(&Name); + SourceLoc NameLoc = consumeIdentifier(Name, /*diagnoseDollarPrefix=*/false); // Parse arguments if exist. SourceLoc LParenLoc, RParenLoc; @@ -3670,6 +3676,7 @@ Parser::parsePlatformVersionConstraintSpec() { } if (parseIdentifier(PlatformIdentifier, PlatformLoc, + /*diagnoseDollarPrefix=*/false, diag::avail_query_expected_platform_name)) { return nullptr; } diff --git a/lib/Parse/ParseGeneric.cpp b/lib/Parse/ParseGeneric.cpp index c0095cc656ab7..64ba485f837db 100644 --- a/lib/Parse/ParseGeneric.cpp +++ b/lib/Parse/ParseGeneric.cpp @@ -71,7 +71,7 @@ Parser::parseGenericParametersBeforeWhere(SourceLoc LAngleLoc, // Parse the name of the parameter. Identifier Name; SourceLoc NameLoc; - if (parseIdentifier(Name, NameLoc, + if (parseIdentifier(Name, NameLoc, /*diagnoseDollarPrefix=*/true, diag::expected_generics_parameter_name)) { Result.setIsParseError(); break; @@ -307,7 +307,8 @@ ParserStatus Parser::parseGenericWhereClause( ->isKnownLayout()) { // Parse a layout constraint. Identifier LayoutName; - auto LayoutLoc = consumeIdentifier(&LayoutName); + auto LayoutLoc = consumeIdentifier(LayoutName, + /*diagnoseDollarPrefix=*/false); auto LayoutInfo = parseLayoutConstraint(LayoutName); if (!LayoutInfo->isKnownLayout()) { // There was a bug in the layout constraint. diff --git a/lib/Parse/ParsePattern.cpp b/lib/Parse/ParsePattern.cpp index 012c33bfdfac2..dc1e11cdde5f4 100644 --- a/lib/Parse/ParsePattern.cpp +++ b/lib/Parse/ParsePattern.cpp @@ -975,7 +975,7 @@ ParserResult Parser::parsePattern() { case tok::identifier: { PatternCtx.setCreateSyntax(SyntaxKind::IdentifierPattern); Identifier name; - SourceLoc loc = consumeIdentifier(&name); + SourceLoc loc = consumeIdentifier(name, /*diagnoseDollarPrefix=*/true); if (Tok.isIdentifierOrUnderscore() && !Tok.isContextualDeclKeyword()) diagnoseConsecutiveIDs(name.str(), loc, introducer == VarDecl::Introducer::Let @@ -1054,7 +1054,7 @@ Parser::parsePatternTupleElement() { // If the tuple element has a label, parse it. if (Tok.is(tok::identifier) && peekToken().is(tok::colon)) { - LabelLoc = consumeIdentifier(&Label); + LabelLoc = consumeIdentifier(Label, /*diagnoseDollarPrefix=*/true); consumeToken(tok::colon); } diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp index bcd4464ae7a45..b0b16fdab34b7 100644 --- a/lib/Parse/ParseStmt.cpp +++ b/lib/Parse/ParseStmt.cpp @@ -192,7 +192,7 @@ static bool isAtStartOfSwitchCase(Parser &parser, backtrack.emplace(parser); parser.consumeToken(tok::at_sign); - parser.consumeIdentifier(); + parser.consumeToken(tok::identifier); if (parser.Tok.is(tok::l_paren)) parser.skipSingle(); } @@ -536,7 +536,8 @@ ParserResult Parser::parseStmt() { // If this is a label on a loop/switch statement, consume it and pass it into // parsing logic below. if (Tok.is(tok::identifier) && peekToken().is(tok::colon)) { - LabelInfo.Loc = consumeIdentifier(&LabelInfo.Name); + LabelInfo.Loc = consumeIdentifier(LabelInfo.Name, + /*diagnoseDollarPrefix=*/true); consumeToken(tok::colon); } @@ -687,7 +688,7 @@ static ParserStatus parseOptionalControlTransferTarget(Parser &P, if (!P.Tok.isAtStartOfLine()) { if (P.Tok.is(tok::identifier) && !P.isStartOfStmt() && !P.isStartOfSwiftDecl()) { - TargetLoc = P.consumeIdentifier(&Target); + TargetLoc = P.consumeIdentifier(Target, /*diagnoseDollarPrefix=*/false); return makeParserSuccess(); } else if (P.Tok.is(tok::code_complete)) { if (P.CodeCompletion) @@ -2486,7 +2487,7 @@ ParserResult Parser::parseStmtCase(bool IsActive) { diagnose(UnknownAttrLoc, diag::previous_attribute, false); consumeToken(tok::at_sign); } - consumeIdentifier(); + consumeToken(tok::identifier); SyntaxParsingContext Args(SyntaxContext, SyntaxKind::TokenList); if (Tok.is(tok::l_paren)) { @@ -2494,9 +2495,11 @@ ParserResult Parser::parseStmtCase(bool IsActive) { skipSingle(); } } else { + assert(peekToken().is(tok::identifier) && "isAtStartOfSwitchCase() lied"); + consumeToken(tok::at_sign); diagnose(Tok, diag::unknown_attribute, Tok.getText()); - consumeIdentifier(); + consumeToken(tok::identifier); SyntaxParsingContext Args(SyntaxContext, SyntaxKind::TokenList); if (Tok.is(tok::l_paren)) diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp index a072c64df0bb5..3bd38012f858c 100644 --- a/lib/Parse/Parser.cpp +++ b/lib/Parse/Parser.cpp @@ -903,12 +903,12 @@ bool Parser::StructureMarkerRAII::pushStructureMarker( //===----------------------------------------------------------------------===// bool Parser::parseIdentifier(Identifier &Result, SourceLoc &Loc, - const Diagnostic &D) { + const Diagnostic &D, bool diagnoseDollarPrefix) { switch (Tok.getKind()) { case tok::kw_self: case tok::kw_Self: case tok::identifier: - Loc = consumeIdentifier(&Result); + Loc = consumeIdentifier(Result, diagnoseDollarPrefix); return false; default: checkForInputIncomplete(); @@ -930,9 +930,10 @@ bool Parser::parseSpecificIdentifier(StringRef expected, SourceLoc &loc, /// parseAnyIdentifier - Consume an identifier or operator if present and return /// its name in Result. Otherwise, emit an error and return true. bool Parser::parseAnyIdentifier(Identifier &Result, SourceLoc &Loc, - const Diagnostic &D) { + const Diagnostic &D, + bool diagnoseDollarPrefix) { if (Tok.is(tok::identifier)) { - Loc = consumeIdentifier(&Result); + Loc = consumeIdentifier(Result, diagnoseDollarPrefix); return false; } diff --git a/lib/SIL/IR/OperandOwnership.cpp b/lib/SIL/IR/OperandOwnership.cpp index d541489df2495..eda62d97dbfe1 100644 --- a/lib/SIL/IR/OperandOwnership.cpp +++ b/lib/SIL/IR/OperandOwnership.cpp @@ -148,7 +148,6 @@ INTERIOR_POINTER_PROJECTION(RefTailAddr) UseLifetimeConstraint::USE_LIFETIME_CONSTRAINT}; \ } CONSTANT_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, OpenExistentialValue) -CONSTANT_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, OpenExistentialBoxValue) CONSTANT_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, OpenExistentialBox) CONSTANT_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, HopToExecutor) CONSTANT_OWNERSHIP_INST(Owned, LifetimeEnding, AutoreleaseValue) @@ -289,6 +288,7 @@ ACCEPTS_ANY_OWNERSHIP_INST(ConvertEscapeToNoEscape) #define FORWARD_ANY_OWNERSHIP_INST(INST) \ OwnershipConstraint OwnershipConstraintClassifier::visit##INST##Inst( \ INST##Inst *i) { \ + assert(isa(i)); \ auto kind = i->getOwnershipKind(); \ auto lifetimeConstraint = kind.getForwardingLifetimeConstraint(); \ return {kind, lifetimeConstraint}; \ @@ -314,27 +314,22 @@ FORWARD_ANY_OWNERSHIP_INST(DestructureTuple) #undef FORWARD_ANY_OWNERSHIP_INST // An instruction that forwards a constant ownership or trivial ownership. -#define FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(OWNERSHIP, \ - USE_LIFETIME_CONSTRAINT, INST) \ +#define FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(OWNERSHIP, INST) \ OwnershipConstraint OwnershipConstraintClassifier::visit##INST##Inst( \ INST##Inst *i) { \ assert(i->getNumOperands() && "Expected to have non-zero operands"); \ - assert(isGuaranteedForwardingValueKind(SILNodeKind(i->getKind())) && \ - "Expected an ownership forwarding inst"); \ - return {OwnershipKind::OWNERSHIP, \ - UseLifetimeConstraint::USE_LIFETIME_CONSTRAINT}; \ + assert(isa(i)); \ + ValueOwnershipKind kind = OwnershipKind::OWNERSHIP; \ + return {kind, kind.getForwardingLifetimeConstraint()}; \ } -FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, - TupleExtract) -FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, - StructExtract) -FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, +FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, TupleExtract) +FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, StructExtract) +FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, DifferentiableFunctionExtract) -FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, NonLifetimeEnding, - LinearFunctionExtract) -FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Owned, LifetimeEnding, - MarkUninitialized) -#undef CONSTANT_OR_NONE_OWNERSHIP_INST +FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, LinearFunctionExtract) +FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, OpenExistentialBoxValue) +FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST(Owned, MarkUninitialized) +#undef FORWARD_CONSTANT_OR_NONE_OWNERSHIP_INST OwnershipConstraint OwnershipConstraintClassifier::visitDeallocPartialRefInst( DeallocPartialRefInst *i) { @@ -880,6 +875,9 @@ CONSTANT_OWNERSHIP_BUILTIN(Owned, LifetimeEnding, UnsafeGuaranteed) CONSTANT_OWNERSHIP_BUILTIN(Guaranteed, NonLifetimeEnding, CancelAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(Guaranteed, NonLifetimeEnding, CreateAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(Guaranteed, NonLifetimeEnding, CreateAsyncTaskFuture) +CONSTANT_OWNERSHIP_BUILTIN(None, NonLifetimeEnding, AutoDiffCreateLinearMapContext) +CONSTANT_OWNERSHIP_BUILTIN(Guaranteed, NonLifetimeEnding, AutoDiffAllocateSubcontext) +CONSTANT_OWNERSHIP_BUILTIN(Guaranteed, NonLifetimeEnding, AutoDiffProjectTopLevelSubcontext) #undef CONSTANT_OWNERSHIP_BUILTIN @@ -922,10 +920,17 @@ Optional Operand::getOwnershipConstraint() const { // We do not ever call this function when an instruction isn't in a block. assert(getUser()->getParent() && "Can not lookup ownership constraint unless inserted into block"); - if (auto *block = getUser()->getParent()) - if (auto *func = block->getParent()) - if (!func->hasOwnership()) - return {{OwnershipKind::Any, UseLifetimeConstraint::NonLifetimeEnding}}; + if (auto *block = getUser()->getParent()) { + auto *func = block->getParent(); + if (!func) { + // If we don't have a function, then we must have a SILGlobalVariable. In + // that case, we act as if we aren't in ownership. + return {{OwnershipKind::Any, UseLifetimeConstraint::NonLifetimeEnding}}; + } + + if (!func->hasOwnership()) + return {{OwnershipKind::Any, UseLifetimeConstraint::NonLifetimeEnding}}; + } OwnershipConstraintClassifier classifier(getUser()->getModule(), *this); return classifier.visit(const_cast(getUser())); diff --git a/lib/SIL/IR/SILBuilder.cpp b/lib/SIL/IR/SILBuilder.cpp index ff246686d696d..7733df3206f96 100644 --- a/lib/SIL/IR/SILBuilder.cpp +++ b/lib/SIL/IR/SILBuilder.cpp @@ -637,15 +637,18 @@ DebugValueAddrInst *SILBuilder::createDebugValueAddr(SILLocation Loc, void SILBuilder::emitScopedBorrowOperation(SILLocation loc, SILValue original, function_ref &&fun) { - if (original->getType().isAddress()) { - original = createLoadBorrow(loc, original); + SILValue value = original; + if (value->getType().isAddress()) { + value = createLoadBorrow(loc, value); } else { - original = createBeginBorrow(loc, original); + value = emitBeginBorrowOperation(loc, value); } - fun(original); + fun(value); - createEndBorrow(loc, original); + // If we actually inserted a borrowing operation... insert the end_borrow. + if (value != original) + createEndBorrow(loc, value); } CheckedCastBranchInst *SILBuilder::createCheckedCastBranch( diff --git a/lib/SIL/IR/SILDeclRef.cpp b/lib/SIL/IR/SILDeclRef.cpp index 93f9944bb1ba1..6c08df27688ba 100644 --- a/lib/SIL/IR/SILDeclRef.cpp +++ b/lib/SIL/IR/SILDeclRef.cpp @@ -764,6 +764,9 @@ std::string SILDeclRef::mangle(ManglingKind MKind) const { case SILDeclRef::ManglingKind::DynamicThunk: SKind = ASTMangler::SymbolKind::DynamicThunk; break; + case SILDeclRef::ManglingKind::AsyncHandlerBody: + SKind = ASTMangler::SymbolKind::AsyncHandlerBody; + break; } switch (kind) { @@ -1227,3 +1230,13 @@ bool SILDeclRef::isDynamicallyReplaceable() const { // enabled. return decl->shouldUseNativeMethodReplacement(); } + +bool SILDeclRef::hasAsync() const { + if (hasDecl()) { + if (auto afd = dyn_cast(getDecl())) { + return afd->hasAsync(); + } + return false; + } + return getAbstractClosureExpr()->isBodyAsync(); +} diff --git a/lib/SIL/IR/SILInstruction.cpp b/lib/SIL/IR/SILInstruction.cpp index 540c49c086f5c..165648db7b449 100644 --- a/lib/SIL/IR/SILInstruction.cpp +++ b/lib/SIL/IR/SILInstruction.cpp @@ -1105,6 +1105,11 @@ bool SILInstruction::mayRelease() const { default: llvm_unreachable("Unhandled releasing instruction!"); + case SILInstructionKind::GetAsyncContinuationInst: + case SILInstructionKind::GetAsyncContinuationAddrInst: + case SILInstructionKind::AwaitAsyncContinuationInst: + return false; + case SILInstructionKind::ApplyInst: case SILInstructionKind::TryApplyInst: case SILInstructionKind::BeginApplyInst: @@ -1306,6 +1311,12 @@ bool SILInstruction::isTriviallyDuplicatable() const { if (isa(this)) return false; + // Can't duplicate get/await_async_continuation. + if (isa(this) || + isa(this) || + isa(this)) + return false; + // If you add more cases here, you should also update SILLoop:canDuplicate. return true; diff --git a/lib/SIL/IR/SILInstructions.cpp b/lib/SIL/IR/SILInstructions.cpp index b5497a3ad31b2..8ad71be58ad0d 100644 --- a/lib/SIL/IR/SILInstructions.cpp +++ b/lib/SIL/IR/SILInstructions.cpp @@ -724,7 +724,8 @@ DifferentiableFunctionExtractInst::DifferentiableFunctionExtractInst( : UnaryInstructionBase(debugLoc, function, extracteeType ? *extracteeType - : getExtracteeType(function, extractee, module)), + : getExtracteeType(function, extractee, module), + function.getOwnershipKind()), Extractee(extractee), HasExplicitExtracteeType(extracteeType.hasValue()) { #ifndef NDEBUG if (extracteeType.hasValue()) { @@ -763,7 +764,8 @@ LinearFunctionExtractInst::LinearFunctionExtractInst( SILModule &module, SILDebugLocation debugLoc, LinearDifferentiableFunctionTypeComponent extractee, SILValue function) : UnaryInstructionBase(debugLoc, function, - getExtracteeType(function, extractee, module)), + getExtracteeType(function, extractee, module), + function.getOwnershipKind()), extractee(extractee) {} SILType DifferentiabilityWitnessFunctionInst::getDifferentiabilityWitnessType( @@ -1363,12 +1365,6 @@ VarDecl *swift::getIndexedField(NominalTypeDecl *decl, unsigned index) { return nullptr; } -unsigned FieldIndexCacheBase::cacheFieldIndex() { - unsigned index = ::getFieldIndex(getParentDecl(), getField()); - SILInstruction::Bits.FieldIndexCacheBase.FieldIndex = index; - return index; -} - // FIXME: this should be cached during cacheFieldIndex(). bool StructExtractInst::isTrivialFieldOfOneRCIDStruct() const { auto *F = getFunction(); @@ -2108,13 +2104,13 @@ OpenExistentialBoxInst::OpenExistentialBoxInst( OpenExistentialBoxValueInst::OpenExistentialBoxValueInst( SILDebugLocation DebugLoc, SILValue operand, SILType ty) - : UnaryInstructionBase(DebugLoc, operand, ty) { -} + : UnaryInstructionBase(DebugLoc, operand, ty, operand.getOwnershipKind()) {} -OpenExistentialValueInst::OpenExistentialValueInst(SILDebugLocation DebugLoc, - SILValue Operand, - SILType SelfTy) - : UnaryInstructionBase(DebugLoc, Operand, SelfTy) {} +OpenExistentialValueInst::OpenExistentialValueInst(SILDebugLocation debugLoc, + SILValue operand, + SILType selfTy) + : UnaryInstructionBase(debugLoc, operand, selfTy, + operand.getOwnershipKind()) {} BeginCOWMutationInst::BeginCOWMutationInst(SILDebugLocation loc, SILValue operand, @@ -2895,11 +2891,6 @@ DestructureTupleInst *DestructureTupleInst::create(const SILFunction &F, DestructureTupleInst(M, Loc, Operand, Types, OwnershipKinds); } -CanType GetAsyncContinuationInstBase::getFormalResumeType() const { - // The resume type is the type argument to the continuation type. - return getType().castTo().getGenericArgs()[0]; -} - SILType GetAsyncContinuationInstBase::getLoweredResumeType() const { // The lowered resume type is the maximally-abstracted lowering of the // formal resume type. @@ -2909,12 +2900,6 @@ SILType GetAsyncContinuationInstBase::getLoweredResumeType() const { return M.Types.getLoweredType(AbstractionPattern::getOpaque(), formalType, c); } -bool GetAsyncContinuationInstBase::throws() const { - // The continuation throws if it's an UnsafeThrowingContinuation - return getType().castTo()->getDecl() - == getFunction()->getASTContext().getUnsafeThrowingContinuationDecl(); -} - ReturnInst::ReturnInst(SILFunction &func, SILDebugLocation debugLoc, SILValue returnValue) : UnaryInstructionBase(debugLoc, returnValue), diff --git a/lib/SIL/IR/SILPrinter.cpp b/lib/SIL/IR/SILPrinter.cpp index 58b0b0347b674..4ad950ecfd21e 100644 --- a/lib/SIL/IR/SILPrinter.cpp +++ b/lib/SIL/IR/SILPrinter.cpp @@ -2077,13 +2077,13 @@ class SILPrinter : public SILInstructionVisitor { void visitGetAsyncContinuationInst(GetAsyncContinuationInst *GI) { if (GI->throws()) *this << "[throws] "; - *this << '$' << GI->getFormalResumeType(); + *this << GI->getFormalResumeType(); } void visitGetAsyncContinuationAddrInst(GetAsyncContinuationAddrInst *GI) { if (GI->throws()) *this << "[throws] "; - *this << '$' << GI->getFormalResumeType() + *this << GI->getFormalResumeType() << ", " << getIDAndType(GI->getOperand()); } diff --git a/lib/SIL/IR/SILValue.cpp b/lib/SIL/IR/SILValue.cpp index a06aec238464b..c5741dfd86280 100644 --- a/lib/SIL/IR/SILValue.cpp +++ b/lib/SIL/IR/SILValue.cpp @@ -216,8 +216,6 @@ ValueOwnershipKind::ValueOwnershipKind(const SILFunction &F, SILType Type, case SILArgumentConvention::Direct_Guaranteed: value = OwnershipKind::Guaranteed; return; - case SILArgumentConvention::Direct_Deallocating: - llvm_unreachable("Not handled"); } } diff --git a/lib/SIL/IR/TypeLowering.cpp b/lib/SIL/IR/TypeLowering.cpp index 137418c1a70bf..ff6b658dca325 100644 --- a/lib/SIL/IR/TypeLowering.cpp +++ b/lib/SIL/IR/TypeLowering.cpp @@ -243,6 +243,8 @@ namespace { IMPL(BuiltinIntegerLiteral, Trivial) IMPL(BuiltinFloat, Trivial) IMPL(BuiltinRawPointer, Trivial) + IMPL(BuiltinRawUnsafeContinuation, Trivial) + IMPL(BuiltinJob, Trivial) IMPL(BuiltinNativeObject, Reference) IMPL(BuiltinBridgeObject, Reference) IMPL(BuiltinVector, Trivial) @@ -1076,7 +1078,7 @@ namespace { return; } - B.emitReleaseValueAndFold(loc, aggValue); + B.createReleaseValue(loc, aggValue, B.getDefaultAtomicity()); } void @@ -1254,7 +1256,7 @@ namespace { B.createDestroyValue(loc, value); return; } - B.emitReleaseValueAndFold(loc, value); + B.createReleaseValue(loc, value, B.getDefaultAtomicity()); } void emitLoweredDestroyValue(SILBuilder &B, SILLocation loc, SILValue value, @@ -1415,7 +1417,7 @@ namespace { B.createDestroyValue(loc, value); return; } - B.emitStrongReleaseAndFold(loc, value); + B.createStrongRelease(loc, value, B.getDefaultAtomicity()); } }; @@ -1501,13 +1503,13 @@ namespace { void emitDestroyAddress(SILBuilder &B, SILLocation loc, SILValue addr) const override { if (!isTrivial()) - B.emitDestroyAddrAndFold(loc, addr); + B.createDestroyAddr(loc, addr); } void emitDestroyRValue(SILBuilder &B, SILLocation loc, SILValue value) const override { if (!isTrivial()) - B.emitDestroyAddrAndFold(loc, value); + B.createDestroyAddr(loc, value); } SILValue emitCopyValue(SILBuilder &B, SILLocation loc, @@ -3014,6 +3016,10 @@ TypeConverter::checkForABIDifferences(SILModule &M, // trivial. if (auto fnTy1 = type1.getAs()) { if (auto fnTy2 = type2.getAs()) { + // Async/synchronous conversions always need a thunk. + if (fnTy1->isAsync() != fnTy2->isAsync()) + return ABIDifference::NeedsThunk; + // @convention(block) is a single retainable pointer so optionality // change is allowed. if (optionalityChange) diff --git a/lib/SIL/IR/ValueOwnership.cpp b/lib/SIL/IR/ValueOwnership.cpp index 1aaa9541a2d66..59b8bd324aba1 100644 --- a/lib/SIL/IR/ValueOwnership.cpp +++ b/lib/SIL/IR/ValueOwnership.cpp @@ -545,6 +545,9 @@ CONSTANT_OWNERSHIP_BUILTIN(None, GetCurrentAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(None, CancelAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(Owned, CreateAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(Owned, CreateAsyncTaskFuture) +CONSTANT_OWNERSHIP_BUILTIN(Owned, AutoDiffCreateLinearMapContext) +CONSTANT_OWNERSHIP_BUILTIN(None, AutoDiffProjectTopLevelSubcontext) +CONSTANT_OWNERSHIP_BUILTIN(None, AutoDiffAllocateSubcontext) #undef CONSTANT_OWNERSHIP_BUILTIN @@ -591,10 +594,19 @@ ValueOwnershipKind SILValue::getOwnershipKind() const { // // We assume that any time we are in SILBuilder and call this without having a // value in a block yet, ossa is enabled. - if (auto *block = Value->getParentBlock()) - if (auto *f = block->getParent()) - if (!f->hasOwnership()) - return OwnershipKind::None; + if (auto *block = Value->getParentBlock()) { + auto *f = block->getParent(); + // If our block isn't in a function, then it must be in a global + // variable. We don't verify ownership there so just return + // OwnershipKind::None. + if (!f) + return OwnershipKind::None; + + // Now that we know that we do have a block/function, check if we have + // ownership. + if (!f->hasOwnership()) + return OwnershipKind::None; + } ValueOwnershipKindClassifier Classifier; auto result = Classifier.visit(const_cast(Value)); diff --git a/lib/SIL/Parser/ParseSIL.cpp b/lib/SIL/Parser/ParseSIL.cpp index 36573f822eb8c..304bfc585748c 100644 --- a/lib/SIL/Parser/ParseSIL.cpp +++ b/lib/SIL/Parser/ParseSIL.cpp @@ -3528,8 +3528,8 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, SourceLoc KindLoc = P.Tok.getLoc(); if (P.consumeIf(tok::kw_var)) KindId = P.Context.getIdentifier("var"); - else if (P.parseIdentifier(KindId, KindLoc, diag::expected_tok_in_sil_instr, - "kind")) + else if (P.parseIdentifier(KindId, KindLoc, /*diagnoseDollarPrefix=*/false, + diag::expected_tok_in_sil_instr, "kind")) return true; if (P.parseToken(tok::r_square, diag::expected_tok_in_sil_instr, "]")) @@ -5288,8 +5288,8 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, throws = true; } - SILType resumeTy; - if (parseSILType(resumeTy)) { + CanType resumeTy; + if (parseASTType(resumeTy)) { return true; } @@ -5304,21 +5304,11 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, if (parseSILDebugLocation(InstLoc, B)) return true; - auto &M = B.getModule(); - NominalTypeDecl *continuationDecl = throws - ? M.getASTContext().getUnsafeThrowingContinuationDecl() - : M.getASTContext().getUnsafeContinuationDecl(); - - auto continuationTy = BoundGenericType::get(continuationDecl, Type(), - resumeTy.getASTType()); - auto continuationSILTy - = SILType::getPrimitiveObjectType(continuationTy->getCanonicalType()); - if (Opcode == SILInstructionKind::GetAsyncContinuationAddrInst) { ResultVal = B.createGetAsyncContinuationAddr(InstLoc, resumeBuffer, - continuationSILTy); + resumeTy, throws); } else { - ResultVal = B.createGetAsyncContinuation(InstLoc, continuationSILTy); + ResultVal = B.createGetAsyncContinuation(InstLoc, resumeTy, throws); } break; } @@ -5783,7 +5773,8 @@ bool SILParserState::parseDeclSIL(Parser &P) { &isWithoutActuallyEscapingThunk, &Semantics, &SpecAttrs, &ClangDecl, &MRK, FunctionState, M) || P.parseToken(tok::at_sign, diag::expected_sil_function_name) || - P.parseIdentifier(FnName, FnNameLoc, diag::expected_sil_function_name) || + P.parseIdentifier(FnName, FnNameLoc, /*diagnoseDollarPrefix=*/false, + diag::expected_sil_function_name) || P.parseToken(tok::colon, diag::expected_sil_type)) return true; { @@ -6009,7 +6000,8 @@ bool SILParserState::parseSILGlobal(Parser &P) { &isLet, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, State, M) || P.parseToken(tok::at_sign, diag::expected_sil_value_name) || - P.parseIdentifier(GlobalName, NameLoc, diag::expected_sil_value_name) || + P.parseIdentifier(GlobalName, NameLoc, /*diagnoseDollarPrefix=*/false, + diag::expected_sil_value_name) || P.parseToken(tok::colon, diag::expected_sil_type)) return true; @@ -6095,6 +6087,7 @@ bool SILParserState::parseSILProperty(Parser &P) { if (!P.consumeIf(tok::r_paren)) { KeyPathPatternComponent parsedComponent; if (P.parseIdentifier(ComponentKind, ComponentLoc, + /*diagnoseDollarPrefix=*/false, diag::expected_tok_in_sil_instr, "component kind") || SP.parseKeyPathPatternComponent(parsedComponent, OperandTypes, ComponentLoc, ComponentKind, InstLoc, @@ -6316,23 +6309,19 @@ static CanType parseAssociatedTypePath(Parser &P, SILParser &SP, static ProtocolConformanceRef parseRootProtocolConformance(Parser &P, SILParser &SP, Type ConformingTy, ProtocolDecl *&proto) { - Identifier ModuleKeyword, ModuleName; + const StringRef ModuleKeyword = "module"; + Identifier ModuleName; SourceLoc Loc, KeywordLoc; proto = parseProtocolDecl(P, SP); if (!proto) return ProtocolConformanceRef(); - if (P.parseIdentifier(ModuleKeyword, KeywordLoc, - diag::expected_tok_in_sil_instr, "module") || - SP.parseSILIdentifier(ModuleName, Loc, - diag::expected_sil_value_name)) + if (P.parseSpecificIdentifier( + ModuleKeyword, KeywordLoc, + Diagnostic(diag::expected_tok_in_sil_instr, ModuleKeyword)) || + SP.parseSILIdentifier(ModuleName, Loc, diag::expected_sil_value_name)) return ProtocolConformanceRef(); - if (ModuleKeyword.str() != "module") { - P.diagnose(KeywordLoc, diag::expected_tok_in_sil_instr, "module"); - return ProtocolConformanceRef(); - } - // Calling lookupConformance on a BoundGenericType will return a specialized // conformance. We use UnboundGenericType to find the normal conformance. Type lookupTy = ConformingTy; @@ -6460,9 +6449,10 @@ static bool parseSILWitnessTableEntry( Identifier EntryKeyword; SourceLoc KeywordLoc; if (P.parseIdentifier(EntryKeyword, KeywordLoc, - diag::expected_tok_in_sil_instr, - "method, associated_type, associated_type_protocol, base_protocol" - ", no_default")) + /*diagnoseDollarPrefix=*/false, + diag::expected_tok_in_sil_instr, + "method, associated_type, associated_type_protocol" + ", base_protocol, no_default")) return true; if (EntryKeyword.str() == "no_default") { @@ -6894,11 +6884,8 @@ llvm::Optional SILParser::parseSILCoverageExpr( auto LHS = parseSILCoverageExpr(Builder); if (!LHS) return None; - Identifier Operator; - SourceLoc Loc; - if (P.parseAnyIdentifier(Operator, Loc, - diag::sil_coverage_invalid_operator)) - return None; + const Identifier Operator = P.Context.getIdentifier(P.Tok.getText()); + const SourceLoc Loc = P.consumeToken(); if (Operator.str() != "+" && Operator.str() != "-") { P.diagnose(Loc, diag::sil_coverage_invalid_operator); return None; diff --git a/lib/SIL/Utils/BasicBlockUtils.cpp b/lib/SIL/Utils/BasicBlockUtils.cpp index 6421131f323c7..5bc97ac515c34 100644 --- a/lib/SIL/Utils/BasicBlockUtils.cpp +++ b/lib/SIL/Utils/BasicBlockUtils.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "swift/SIL/BasicBlockUtils.h" +#include "swift/Basic/STLExtras.h" #include "swift/SIL/Dominance.h" #include "swift/SIL/LoopInfo.h" #include "swift/SIL/SILArgument.h" @@ -18,6 +19,7 @@ #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILFunction.h" #include "swift/SIL/TerminatorUtils.h" +#include "llvm/ADT/STLExtras.h" using namespace swift; @@ -378,3 +380,113 @@ void DeadEndBlocks::compute() { ReachableBlocks.insert(Pred); } } + +//===----------------------------------------------------------------------===// +// Post Dominance Set Completion Utilities +//===----------------------------------------------------------------------===// + +void JointPostDominanceSetComputer::findJointPostDominatingSet( + SILBasicBlock *dominatingBlock, ArrayRef dominatedBlockSet, + function_ref inputBlocksFoundDuringWalk, + function_ref foundJointPostDomSetCompletionBlocks, + function_ref inputBlocksInJointPostDomSet) { + // If our reachable block set is empty, assert. This is most likely programmer + // error. + assert(dominatedBlockSet.size() != 0); + + // If we have a reachable block set with a single block and that block is + // dominatingBlock, then we return success since a block post-doms its self so + // it is already complete. + // + // NOTE: We do not consider this a visiteed + if (dominatedBlockSet.size() == 1) { + if (dominatingBlock == dominatedBlockSet[0]) { + if (inputBlocksInJointPostDomSet) + inputBlocksInJointPostDomSet(dominatingBlock); + return; + } + } + + // At the top of where we for sure are going to use state... make sure we + // always clean up any resources that we use! + SWIFT_DEFER { clear(); }; + + // Otherwise, we need to compute our joint post dominating set. We do this by + // performing a backwards walk up the CFG tracking back liveness until we find + // our dominating block. As we walk up, we keep track of any successor blocks + // that we need to visit before the walk completes lest we leak. After we + // finish the walk, these leaking blocks are a valid (albeit not unique) + // completion of the post dom set. + for (auto *block : dominatedBlockSet) { + // Skip dead end blocks. + if (deadEndBlocks.isDeadEnd(block)) + continue; + + // We require dominatedBlockSet to be a set and thus assert if we hit it to + // flag user error to our caller. + bool succeededInserting = visitedBlocks.insert(block).second; + (void)succeededInserting; + assert(succeededInserting && + "Repeat Elt: dominatedBlockSet should be a set?!"); + initialBlocks.insert(block); + worklist.push_back(block); + } + + // Then until we run out of blocks... + while (!worklist.empty()) { + auto *block = worklist.pop_back_val(); + + // First remove block from blocksThatLeakIfNeverVisited if it is there since + // we know that it isn't leaking since we are visiting it now. + blocksThatLeakIfNeverVisited.remove(block); + + // Then if our block is not one of our initial blocks, add the block's + // successors to blocksThatLeakIfNeverVisited. + if (!initialBlocks.count(block)) { + for (auto *succBlock : block->getSuccessorBlocks()) { + if (visitedBlocks.count(succBlock)) + continue; + if (deadEndBlocks.isDeadEnd(succBlock)) + continue; + blocksThatLeakIfNeverVisited.insert(succBlock); + } + } + + // If we are the dominating block, we are done. + if (dominatingBlock == block) + continue; + + // Otherwise for each predecessor that we have, first check if it was one of + // our initial blocks (signaling a loop) and then add it to the worklist if + // we haven't visited it already. + for (auto *predBlock : block->getPredecessorBlocks()) { + if (initialBlocks.count(predBlock)) { + reachableInputBlocks.push_back(predBlock); + } + if (visitedBlocks.insert(predBlock).second) + worklist.push_back(predBlock); + } + } + + // After our worklist has emptied, any blocks left in + // blocksThatLeakIfNeverVisited are "leaking blocks". + for (auto *leakingBlock : blocksThatLeakIfNeverVisited) + foundJointPostDomSetCompletionBlocks(leakingBlock); + + // Then unique our list of reachable input blocks and pass them to our + // callback. + sortUnique(reachableInputBlocks); + for (auto *block : reachableInputBlocks) + inputBlocksFoundDuringWalk(block); + + // Then if were asked to find the subset of our input blocks that are in the + // joint-postdominance set, compute that. + if (!inputBlocksInJointPostDomSet) + return; + + // Pass back the reachable input blocks that were not reachable from other + // input blocks to. + for (auto *block : dominatedBlockSet) + if (lower_bound(reachableInputBlocks, block) == reachableInputBlocks.end()) + inputBlocksInJointPostDomSet(block); +} diff --git a/lib/SIL/Utils/GenericSpecializationMangler.cpp b/lib/SIL/Utils/GenericSpecializationMangler.cpp index 529e36b09ff6a..beae0d6481b22 100644 --- a/lib/SIL/Utils/GenericSpecializationMangler.cpp +++ b/lib/SIL/Utils/GenericSpecializationMangler.cpp @@ -74,32 +74,53 @@ std::string SpecializationMangler::finalize() { // Generic Specialization //===----------------------------------------------------------------------===// -std::string GenericSpecializationMangler::mangle(GenericSignature Sig) { - beginMangling(); - - if (!Sig) { - assert(Function && - "Need a SIL function if no generic signature is provided"); - SILFunctionType *FTy = Function->getLoweredFunctionType(); - Sig = FTy->getInvocationGenericSignature(); - } - +void GenericSpecializationMangler:: +appendSubstitutions(GenericSignature sig, SubstitutionMap subs) { bool First = true; - Sig->forEachParam([&](GenericTypeParamType *ParamType, bool Canonical) { + sig->forEachParam([&](GenericTypeParamType *ParamType, bool Canonical) { if (Canonical) { - appendType(Type(ParamType).subst(SubMap)->getCanonicalType()); + appendType(Type(ParamType).subst(subs)->getCanonicalType()); appendListSeparator(First); } }); assert(!First && "no generic substitutions"); +} - if (isInlined) - appendSpecializationOperator("Ti"); - else - appendSpecializationOperator( - isPrespecializaton ? "Ts" : (isReAbstracted ? "Tg" : "TG")); +std::string GenericSpecializationMangler:: +manglePrespecialized(GenericSignature sig, SubstitutionMap subs) { + beginMangling(); + appendSubstitutions(sig, subs); + appendSpecializationOperator("Ts"); + return finalize(); +} + +std::string GenericSpecializationMangler:: +mangleNotReabstracted(SubstitutionMap subs) { + beginMangling(); + appendSubstitutions(getGenericSignature(), subs); + appendSpecializationOperator("TG"); return finalize(); } + +std::string GenericSpecializationMangler:: +mangleReabstracted(SubstitutionMap subs, bool alternativeMangling) { + beginMangling(); + appendSubstitutions(getGenericSignature(), subs); + + // See ReabstractionInfo::hasConvertedResilientParams for why and when to use + // the alternative mangling. + appendSpecializationOperator(alternativeMangling ? "TB" : "Tg"); + return finalize(); +} + +std::string GenericSpecializationMangler:: +mangleForDebugInfo(GenericSignature sig, SubstitutionMap subs, bool forInlining) { + beginMangling(); + appendSubstitutions(sig, subs); + appendSpecializationOperator(forInlining ? "Ti" : "TG"); + return finalize(); +} + static SubstitutionMap getSubstitutionMapForPrespecialization(GenericSignature genericSig, @@ -132,6 +153,6 @@ std::string GenericSpecializationMangler::manglePrespecialization( GenericSignature specializedSig) { auto subs = getSubstitutionMapForPrespecialization(genericSig, specializedSig); - GenericSpecializationMangler mangler(unspecializedName, subs); - return mangler.mangle(genericSig); + GenericSpecializationMangler mangler(unspecializedName); + return mangler.manglePrespecialized(genericSig, subs); } diff --git a/lib/SIL/Utils/LoopInfo.cpp b/lib/SIL/Utils/LoopInfo.cpp index 72defd29eb7bc..38214b6e7a5d8 100644 --- a/lib/SIL/Utils/LoopInfo.cpp +++ b/lib/SIL/Utils/LoopInfo.cpp @@ -111,6 +111,11 @@ bool SILLoop::canDuplicate(SILInstruction *I) const { if (isa(I)) return false; + // Can't duplicate get/await_async_continuation. + if (isa(I) || + isa(I) || isa(I)) + return false; + // Some special cases above that aren't considered isTriviallyDuplicatable // return true early. assert(I->isTriviallyDuplicatable() && diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index dd3f8a14f812f..84d47c80aa822 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -1805,6 +1805,8 @@ static void visitBuiltinAddress(BuiltinInst *builtin, case BuiltinValueKind::CancelAsyncTask: case BuiltinValueKind::CreateAsyncTask: case BuiltinValueKind::CreateAsyncTaskFuture: + case BuiltinValueKind::AutoDiffCreateLinearMapContext: + case BuiltinValueKind::AutoDiffAllocateSubcontext: return; // General memory access to a pointer in first operand position. @@ -1980,6 +1982,10 @@ void swift::visitAccessedAddress(SILInstruction *I, case SILInstructionKind::UnconditionalCheckedCastAddrInst: case SILInstructionKind::UnconditionalCheckedCastValueInst: case SILInstructionKind::ValueMetatypeInst: + // TODO: Is this correct? + case SILInstructionKind::GetAsyncContinuationInst: + case SILInstructionKind::GetAsyncContinuationAddrInst: + case SILInstructionKind::AwaitAsyncContinuationInst: return; } } diff --git a/lib/SIL/Utils/OwnershipUtils.cpp b/lib/SIL/Utils/OwnershipUtils.cpp index 5c699ae7888da..ed38037a6a0ac 100644 --- a/lib/SIL/Utils/OwnershipUtils.cpp +++ b/lib/SIL/Utils/OwnershipUtils.cpp @@ -26,7 +26,7 @@ bool swift::isValueAddressOrTrivial(SILValue v) { } // These operations forward both owned and guaranteed ownership. -bool swift::isOwnershipForwardingValueKind(SILNodeKind kind) { +static bool isOwnershipForwardingValueKind(SILNodeKind kind) { switch (kind) { case SILNodeKind::TupleInst: case SILNodeKind::StructInst: @@ -42,7 +42,6 @@ bool swift::isOwnershipForwardingValueKind(SILNodeKind kind) { case SILNodeKind::BridgeObjectToRefInst: case SILNodeKind::UnconditionalCheckedCastInst: case SILNodeKind::UncheckedEnumDataInst: - case SILNodeKind::MarkUninitializedInst: case SILNodeKind::SelectEnumInst: case SILNodeKind::SwitchEnumInst: case SILNodeKind::CheckedCastBranchInst: @@ -58,7 +57,7 @@ bool swift::isOwnershipForwardingValueKind(SILNodeKind kind) { // These operations forward guaranteed ownership, but don't necessarily forward // owned values. -bool swift::isGuaranteedForwardingValueKind(SILNodeKind kind) { +static bool isGuaranteedForwardingValueKind(SILNodeKind kind) { switch (kind) { case SILNodeKind::TupleExtractInst: case SILNodeKind::StructExtractInst: @@ -72,13 +71,24 @@ bool swift::isGuaranteedForwardingValueKind(SILNodeKind kind) { } } -bool swift::isOwnedForwardingValueKind(SILNodeKind kind) { - return isOwnershipForwardingValueKind(kind); +static bool isOwnedForwardingValueKind(SILNodeKind kind) { + switch (kind) { + case SILNodeKind::MarkUninitializedInst: + return true; + default: + return isOwnershipForwardingValueKind(kind); + } } bool swift::isOwnedForwardingUse(Operand *op) { - auto kind = op->getUser()->getKind(); - return isOwnershipForwardingValueKind(SILNodeKind(kind)); + auto *user = op->getUser(); + auto kind = user->getKind(); + bool result = isOwnershipForwardingValueKind(SILNodeKind(kind)); + if (result) { + assert(!isa(user)); + assert(isa(user)); + } + return result; } bool swift::isOwnedForwardingValue(SILValue value) { @@ -86,10 +96,17 @@ bool swift::isOwnedForwardingValue(SILValue value) { // terminator, we are fine. if (auto *arg = dyn_cast(value)) if (auto *predTerm = arg->getSingleTerminator()) - if (predTerm->isTransformationTerminator()) + if (predTerm->isTransformationTerminator()) { + assert(isa(predTerm)); return true; - return isOwnedForwardingValueKind( - value->getKindOfRepresentativeSILNodeInObject()); + } + auto *node = value->getRepresentativeSILNodeInObject(); + bool result = isOwnedForwardingValueKind(node->getKind()); + if (result) { + assert(!isa(node)); + assert(isa(node)); + } + return result; } bool swift::isGuaranteedForwardingValue(SILValue value) { @@ -97,21 +114,39 @@ bool swift::isGuaranteedForwardingValue(SILValue value) { // guaranteed. if (auto *arg = dyn_cast(value)) if (auto *ti = arg->getSingleTerminator()) - if (ti->isTransformationTerminator()) + if (ti->isTransformationTerminator()) { + assert(isa(ti)); return true; + } - return isGuaranteedForwardingValueKind( - value->getKindOfRepresentativeSILNodeInObject()); + auto *node = value->getRepresentativeSILNodeInObject(); + bool result = isGuaranteedForwardingValueKind(node->getKind()); + if (result) { + assert(!isa(node)); + assert(isa(node)); + } + return result; } bool swift::isGuaranteedForwardingUse(Operand *use) { - auto kind = SILNodeKind(use->getUser()->getKind()); - return isGuaranteedForwardingValueKind(kind); + auto *user = use->getUser(); + auto kind = SILNodeKind(user->getKind()); + bool result = isGuaranteedForwardingValueKind(kind); + if (result) { + assert(!isa(user)); + assert(isa(user)); + } + return result; } bool swift::isOwnershipForwardingUse(Operand *use) { - auto kind = SILNodeKind(use->getUser()->getKind()); - return isOwnershipForwardingValueKind(kind); + auto *user = use->getUser(); + auto kind = SILNodeKind(user->getKind()); + bool result = isOwnershipForwardingValueKind(kind); + if (result) { + assert(isa(user)); + } + return result; } //===----------------------------------------------------------------------===// @@ -161,32 +196,42 @@ llvm::raw_ostream &swift::operator<<(llvm::raw_ostream &os, return os; } -void BorrowingOperand::visitLocalEndScopeInstructions( - function_ref func) const { +bool BorrowingOperand::visitLocalEndScopeUses( + function_ref func) const { switch (kind) { case BorrowingOperandKind::BeginBorrow: for (auto *use : cast(op->getUser())->getUses()) { if (use->isLifetimeEnding()) { - func(use); + if (!func(use)) + return false; } } - return; + return true; case BorrowingOperandKind::BeginApply: { auto *user = cast(op->getUser()); for (auto *use : user->getTokenResult()->getUses()) { - func(use); + if (!func(use)) + return false; } - return; + return true; } // These are instantaneous borrow scopes so there aren't any special end // scope instructions. case BorrowingOperandKind::Apply: case BorrowingOperandKind::TryApply: case BorrowingOperandKind::Yield: - return; - case BorrowingOperandKind::Branch: - return; + return true; + case BorrowingOperandKind::Branch: { + auto *br = cast(op->getUser()); + for (auto *use : br->getArgForOperand(op)->getUses()) + if (use->isLifetimeEnding()) + if (!func(use)) + return false; + return true; + } } + + llvm_unreachable("Covered switch isn't covered"); } void BorrowingOperand::visitBorrowIntroducingUserResults( @@ -267,7 +312,10 @@ void BorrowingOperand::visitUserResultConsumingUses( void BorrowingOperand::getImplicitUses( SmallVectorImpl &foundUses, std::function *errorFunction) const { - visitLocalEndScopeInstructions([&](Operand *op) { foundUses.push_back(op); }); + visitLocalEndScopeUses([&](Operand *op) { + foundUses.push_back(op); + return true; + }); } //===----------------------------------------------------------------------===// @@ -831,23 +879,29 @@ swift::getSingleOwnedValueIntroducer(SILValue inputValue) { //===----------------------------------------------------------------------===// Optional ForwardingOperand::get(Operand *use) { - auto *user = use->getUser(); + if (use->isTypeDependent()) + return None; - if (isa(user)) { - assert(isGuaranteedForwardingUse(use)); + if (isa(use->getUser())) { return {use}; } - assert(!isGuaranteedForwardingUse(use)); + return None; } ValueOwnershipKind ForwardingOperand::getOwnershipKind() const { - return getUser()->getOwnershipKind(); + return (*this)->getOwnershipKind(); } void ForwardingOperand::setOwnershipKind(ValueOwnershipKind newKind) const { auto *user = use->getUser(); - if (auto *ofsvi = dyn_cast(user)) + // NOTE: This if chain is meant to be a covered switch, so make sure to return + // in each if itself since we have an unreachable at the bottom to ensure if a + // new subclass of OwnershipForwardingInst is added + if (auto *ofsvi = dyn_cast(user)) + if (!ofsvi->getType().isTrivial(*ofsvi->getFunction())) + return ofsvi->setOwnershipKind(newKind); + if (auto *ofsvi = dyn_cast(user)) if (!ofsvi->getType().isTrivial(*ofsvi->getFunction())) return ofsvi->setOwnershipKind(newKind); if (auto *ofci = dyn_cast(user)) @@ -856,7 +910,6 @@ void ForwardingOperand::setOwnershipKind(ValueOwnershipKind newKind) const { if (auto *ofseib = dyn_cast(user)) if (!ofseib->getType().isTrivial(*ofseib->getFunction())) return ofseib->setOwnershipKind(newKind); - if (auto *ofmvi = dyn_cast(user)) { assert(ofmvi->getNumOperands() == 1); if (!ofmvi->getOperand(0)->getType().isTrivial(*ofmvi->getFunction())) { @@ -911,9 +964,13 @@ void ForwardingOperand::replaceOwnershipKind(ValueOwnershipKind oldKind, ValueOwnershipKind newKind) const { auto *user = use->getUser(); - if (auto *ofsvi = dyn_cast(user)) - if (ofsvi->getOwnershipKind() == oldKind) - return ofsvi->setOwnershipKind(newKind); + if (auto *fInst = dyn_cast(user)) + if (fInst->getOwnershipKind() == oldKind) + return fInst->setOwnershipKind(newKind); + + if (auto *fInst = dyn_cast(user)) + if (fInst->getOwnershipKind() == oldKind) + return fInst->setOwnershipKind(newKind); if (auto *ofci = dyn_cast(user)) if (ofci->getOwnershipKind() == oldKind) @@ -966,5 +1023,48 @@ void ForwardingOperand::replaceOwnershipKind(ValueOwnershipKind oldKind, } return; } - llvm_unreachable("Out of sync with ForwardingOperand::get?!"); + + llvm_unreachable("Missing Case! Out of sync with ForwardingOperand::get?!"); +} + +SILValue ForwardingOperand::getSingleForwardedValue() const { + assert(isGuaranteedForwardingUse(use)); + if (auto *svi = dyn_cast(use->getUser())) + return svi; + return SILValue(); +} + +bool ForwardingOperand::visitForwardedValues( + function_ref visitor) { + auto *user = use->getUser(); + + assert(isGuaranteedForwardingUse(use)); + + // See if we have a single value instruction... if we do that is always the + // transitive result. + if (auto *svi = dyn_cast(user)) { + return visitor(svi); + } + + if (auto *mvri = dyn_cast(user)) { + return llvm::all_of(mvri->getResults(), [&](SILValue value) { + if (value.getOwnershipKind() == OwnershipKind::None) + return true; + return visitor(value); + }); + } + + // This is an instruction like switch_enum and checked_cast_br that are + // "transforming terminators"... We know that this means that we should at + // most have a single phi argument. + auto *ti = cast(user); + return llvm::all_of(ti->getSuccessorBlocks(), [&](SILBasicBlock *succBlock) { + // If we do not have any arguments, then continue. + if (succBlock->args_empty()) + return true; + + auto args = succBlock->getSILPhiArguments(); + assert(args.size() == 1 && "Transforming terminator with multiple args?!"); + return visitor(args[0]); + }); } diff --git a/lib/SIL/Verifier/MemoryLifetime.cpp b/lib/SIL/Verifier/MemoryLifetime.cpp index 25c31c55bfc97..4ad45f48dbb6f 100644 --- a/lib/SIL/Verifier/MemoryLifetime.cpp +++ b/lib/SIL/Verifier/MemoryLifetime.cpp @@ -781,7 +781,6 @@ void MemoryLifetimeVerifier::setFuncOperandBits(BlockState &state, Operand &op, case SILArgumentConvention::Indirect_InoutAliasable: case SILArgumentConvention::Direct_Owned: case SILArgumentConvention::Direct_Unowned: - case SILArgumentConvention::Direct_Deallocating: case SILArgumentConvention::Direct_Guaranteed: break; } @@ -968,7 +967,6 @@ void MemoryLifetimeVerifier::checkFuncArgument(Bits &bits, Operand &argumentOp, break; case SILArgumentConvention::Direct_Owned: case SILArgumentConvention::Direct_Unowned: - case SILArgumentConvention::Direct_Deallocating: case SILArgumentConvention::Direct_Guaranteed: break; } diff --git a/lib/SIL/Verifier/ReborrowVerifier.cpp b/lib/SIL/Verifier/ReborrowVerifier.cpp index 179831e56d25b..6024de614bb21 100644 --- a/lib/SIL/Verifier/ReborrowVerifier.cpp +++ b/lib/SIL/Verifier/ReborrowVerifier.cpp @@ -42,8 +42,9 @@ void ReborrowVerifier::verifyReborrows(BorrowingOperand initialScopedOperand, SILValue value) { SmallVector, 4> worklist; // Initialize the worklist with borrow lifetime ending uses - initialScopedOperand.visitLocalEndScopeInstructions([&](Operand *op) { + initialScopedOperand.visitLocalEndScopeUses([&](Operand *op) { worklist.emplace_back(op, value); + return true; }); while (!worklist.empty()) { diff --git a/lib/SIL/Verifier/SILVerifier.cpp b/lib/SIL/Verifier/SILVerifier.cpp index 82ccd3b8bd701..a93f107ef27a6 100644 --- a/lib/SIL/Verifier/SILVerifier.cpp +++ b/lib/SIL/Verifier/SILVerifier.cpp @@ -30,6 +30,7 @@ #include "swift/SIL/DynamicCasts.h" #include "swift/SIL/MemAccessUtils.h" #include "swift/SIL/MemoryLifetime.h" +#include "swift/SIL/OwnershipUtils.h" #include "swift/SIL/PostOrder.h" #include "swift/SIL/PrettyStackTrace.h" #include "swift/SIL/SILDebugScope.h" @@ -461,7 +462,6 @@ struct ImmutableAddressUseVerifier { case SILArgumentConvention::Direct_Unowned: case SILArgumentConvention::Direct_Guaranteed: case SILArgumentConvention::Direct_Owned: - case SILArgumentConvention::Direct_Deallocating: assert(conv.isIndirectConvention() && "Expect an indirect convention"); return true; // return something "conservative". } @@ -493,7 +493,7 @@ struct ImmutableAddressUseVerifier { return false; } - bool isCastToNonConsuming(UncheckedAddrCastInst *i) { + bool isAddrCastToNonConsuming(SingleValueInstruction *i) { // Check if any of our uses are consuming. If none of them are consuming, we // are good to go. return llvm::none_of(i->getUses(), [&](Operand *use) -> bool { @@ -588,8 +588,9 @@ struct ImmutableAddressUseVerifier { break; case SILInstructionKind::DestroyAddrInst: return true; + case SILInstructionKind::UpcastInst: case SILInstructionKind::UncheckedAddrCastInst: { - if (isCastToNonConsuming(cast(inst))) { + if (isAddrCastToNonConsuming(cast(inst))) { break; } return true; @@ -1141,8 +1142,29 @@ class SILVerifier : public SILVerifierBase { } } - // TODO: There should be a use of an opened archetype inside the instruction for - // each opened archetype operand of the instruction. + if (isa(I)) { + checkOwnershipForwardingInst(I); + } + } + + /// We are given an instruction \p fInst that forwards ownership from \p + /// operand to one of \p fInst's results, make sure that if we have a + /// forwarding instruction that can only accept owned or guaranteed ownership + /// that we are following that invariant. + void checkOwnershipForwardingInst(SILInstruction *i) { + if (auto *o = dyn_cast(i)) { + ValueOwnershipKind kind = OwnershipKind::Owned; + require(kind.isCompatibleWith(o->getOwnershipKind()), + "OwnedFirstArgForwardingSingleValueInst's ownership kind must be " + "compatible with owned"); + } + + if (auto *o = dyn_cast(i)) { + ValueOwnershipKind kind = OwnershipKind::Guaranteed; + require(kind.isCompatibleWith(o->getOwnershipKind()), + "GuaranteedFirstArgForwardingSingleValueInst's ownership kind " + "must be compatible with guaranteed"); + } } void checkInstructionsSILLocation(SILInstruction *I) { @@ -4892,12 +4914,7 @@ class SILVerifier : public SILVerifierBase { void checkGetAsyncContinuationInstBase(GetAsyncContinuationInstBase *GACI) { auto resultTy = GACI->getType(); - auto &C = resultTy.getASTContext(); - auto resultBGT = resultTy.getAs(); - require(resultBGT, "Instruction type must be a continuation type"); - auto resultDecl = resultBGT->getDecl(); - require(resultDecl == C.getUnsafeContinuationDecl() - || resultDecl == C.getUnsafeThrowingContinuationDecl(), + require(resultTy.is(), "Instruction type must be a continuation type"); } @@ -5450,6 +5467,16 @@ class SILVerifier : public SILVerifierBase { CanSILFunctionType FTy = F->getLoweredFunctionType(); verifySILFunctionType(FTy); + // Don't verify functions that were skipped. We are likely to see them in + // FunctionBodySkipping::NonInlinableWithoutTypes mode. + auto Ctx = F->getDeclContext(); + if (Ctx) { + if (auto AFD = dyn_cast(Ctx)) { + if (AFD->isBodySkipped()) + return; + } + } + if (F->isExternalDeclaration()) { if (F->hasForeignBody()) return; diff --git a/lib/SILGen/ResultPlan.cpp b/lib/SILGen/ResultPlan.cpp index f249c9261d3f6..235f10ca5290f 100644 --- a/lib/SILGen/ResultPlan.cpp +++ b/lib/SILGen/ResultPlan.cpp @@ -480,23 +480,32 @@ class ForeignAsyncInitializationPlan final : public ResultPlan { emitForeignAsyncCompletionHandler(SILGenFunction &SGF, SILLocation loc) override { // Get the current continuation for the task. - auto continuationDecl = calleeTypeInfo.foreign.async->completionHandlerErrorParamIndex() + bool throws = calleeTypeInfo.foreign.async + ->completionHandlerErrorParamIndex().hasValue(); + + continuation = SGF.B.createGetAsyncContinuationAddr(loc, resumeBuf, + calleeTypeInfo.substResultType, throws); + + // Wrap the Builtin.RawUnsafeContinuation in an + // Unsafe[Throwing]Continuation. + auto continuationDecl = throws ? SGF.getASTContext().getUnsafeThrowingContinuationDecl() : SGF.getASTContext().getUnsafeContinuationDecl(); auto continuationTy = BoundGenericType::get(continuationDecl, Type(), calleeTypeInfo.substResultType) ->getCanonicalType(); - - - continuation = SGF.B.createGetAsyncContinuationAddr(loc, resumeBuf, - SILType::getPrimitiveObjectType(continuationTy)); - + auto wrappedContinuation = + SGF.B.createStruct(loc, + SILType::getPrimitiveObjectType(continuationTy), + {continuation}); + // Stash it in a buffer for a block object. - auto blockStorageTy = SILType::getPrimitiveAddressType(SILBlockStorageType::get(continuationTy)); + auto blockStorageTy = SILType::getPrimitiveAddressType( + SILBlockStorageType::get(continuationTy)); auto blockStorage = SGF.emitTemporaryAllocation(loc, blockStorageTy); auto continuationAddr = SGF.B.createProjectBlockStorage(loc, blockStorage); - SGF.B.createStore(loc, continuation, continuationAddr, + SGF.B.createStore(loc, wrappedContinuation, continuationAddr, StoreOwnershipQualifier::Trivial); // Get the block invocation function for the given completion block type. diff --git a/lib/SILGen/SILGen.cpp b/lib/SILGen/SILGen.cpp index f0943373f754e..03135d87ef95e 100644 --- a/lib/SILGen/SILGen.cpp +++ b/lib/SILGen/SILGen.cpp @@ -327,10 +327,9 @@ SILGenModule::getConformanceToBridgedStoredNSError(SILLocation loc, Type type) { return SwiftModule->lookupConformance(type, proto); } -static FuncDecl * -lookUpResumeContinuationIntrinsic(ASTContext &C, - Optional &cache, - StringRef name) { +static FuncDecl *lookupConcurrencyIntrinsic(ASTContext &C, + Optional &cache, + StringRef name) { if (cache) return *cache; @@ -355,23 +354,55 @@ lookUpResumeContinuationIntrinsic(ASTContext &C, return func; } +FuncDecl * +SILGenModule::getRunChildTask() { + return lookupConcurrencyIntrinsic(getASTContext(), + RunChildTask, + "_runChildTask"); +} + +FuncDecl * +SILGenModule::getTaskFutureGet() { + return lookupConcurrencyIntrinsic(getASTContext(), + TaskFutureGet, + "_taskFutureGet"); +} + +FuncDecl * +SILGenModule::getTaskFutureGetThrowing() { + return lookupConcurrencyIntrinsic(getASTContext(), + TaskFutureGetThrowing, + "_taskFutureGetThrowing"); +} + FuncDecl * SILGenModule::getResumeUnsafeContinuation() { - return lookUpResumeContinuationIntrinsic(getASTContext(), - ResumeUnsafeContinuation, - "_resumeUnsafeContinuation"); + return lookupConcurrencyIntrinsic(getASTContext(), + ResumeUnsafeContinuation, + "_resumeUnsafeContinuation"); } FuncDecl * SILGenModule::getResumeUnsafeThrowingContinuation() { - return lookUpResumeContinuationIntrinsic(getASTContext(), - ResumeUnsafeThrowingContinuation, - "_resumeUnsafeThrowingContinuation"); + return lookupConcurrencyIntrinsic(getASTContext(), + ResumeUnsafeThrowingContinuation, + "_resumeUnsafeThrowingContinuation"); } FuncDecl * SILGenModule::getResumeUnsafeThrowingContinuationWithError() { - return lookUpResumeContinuationIntrinsic(getASTContext(), - ResumeUnsafeThrowingContinuationWithError, - "_resumeUnsafeThrowingContinuationWithError"); + return lookupConcurrencyIntrinsic(getASTContext(), + ResumeUnsafeThrowingContinuationWithError, + "_resumeUnsafeThrowingContinuationWithError"); +} +FuncDecl * +SILGenModule::getRunTaskForBridgedAsyncMethod() { + return lookupConcurrencyIntrinsic(getASTContext(), + RunTaskForBridgedAsyncMethod, + "_runTaskForBridgedAsyncMethod"); +} +FuncDecl * +SILGenModule::getRunAsyncHandler() { + return lookupConcurrencyIntrinsic(getASTContext(), RunAsyncHandler, + "_runAsyncHandler"); } ProtocolConformance *SILGenModule::getNSErrorConformanceToError() { @@ -751,7 +782,16 @@ void SILGenModule::emitFunctionDefinition(SILDeclRef constant, SILFunction *f) { PrettyStackTraceSILFunction X("silgen emitNativeToForeignThunk", f); f->setBare(IsBare); f->setThunk(IsThunk); + // If the native function is async, then the foreign entry point is not, + // so it needs to spawn a detached task in which to run the native + // implementation, so the actual thunk logic needs to go into a closure + // implementation function. + if (constant.hasAsync()) { + f = SILGenFunction(*this, *f, dc).emitNativeAsyncToForeignThunk(constant); + } + SILGenFunction(*this, *f, dc).emitNativeToForeignThunk(constant); + postEmitFunction(constant, f); return; } @@ -2097,6 +2137,20 @@ static void transferSpecializeAttributeTargets(SILGenModule &SGM, SILModule &M, } void SILGenModule::visitImportDecl(ImportDecl *import) { + // Importing `@_specializet(targetFunction: otherFunc)` only supported in + // experimental pre-specialization mode. + if (!getASTContext().LangOpts.EnableExperimentalPrespecialization) + return; + + // TODO: this horrible full AST deserializing walk should be replaced by a + // 'single place' to lookup those declarations in the module + // E.g + // prespecializations { + // extension Array { + // @_specialize(exported: true, targetFunction: other(_:), T == Int) + // func prespecialzie_other() {} + // } + // } auto *module = import->getModule(); if (module->isNonSwiftModule()) return; diff --git a/lib/SILGen/SILGen.h b/lib/SILGen/SILGen.h index 20f02bf40babb..6ec39aaa1741c 100644 --- a/lib/SILGen/SILGen.h +++ b/lib/SILGen/SILGen.h @@ -119,9 +119,15 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { Optional NSErrorConformanceToError; + Optional RunChildTask; + Optional TaskFutureGet; + Optional TaskFutureGetThrowing; + + Optional RunTaskForBridgedAsyncMethod; Optional ResumeUnsafeContinuation; Optional ResumeUnsafeThrowingContinuation; Optional ResumeUnsafeThrowingContinuationWithError; + Optional RunAsyncHandler; public: SILGenModule(SILModule &M, ModuleDecl *SM); @@ -317,7 +323,7 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { /// Emits a thunk from a Swift function to the native Swift convention. void emitNativeToForeignThunk(SILDeclRef thunk); - + void preEmitFunction(SILDeclRef constant, SILFunction *F, SILLocation L); void postEmitFunction(SILDeclRef constant, SILFunction *F); @@ -473,12 +479,25 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { /// Retrieve the conformance of NSError to the Error protocol. ProtocolConformance *getNSErrorConformanceToError(); + /// Retrieve the _Concurrency._runChildTask intrinsic. + FuncDecl *getRunChildTask(); + + /// Retrieve the _Concurrency._taskFutureGet intrinsic. + FuncDecl *getTaskFutureGet(); + + /// Retrieve the _Concurrency._taskFutureGetThrowing intrinsic. + FuncDecl *getTaskFutureGetThrowing(); + /// Retrieve the _Concurrency._resumeUnsafeContinuation intrinsic. FuncDecl *getResumeUnsafeContinuation(); /// Retrieve the _Concurrency._resumeUnsafeThrowingContinuation intrinsic. FuncDecl *getResumeUnsafeThrowingContinuation(); /// Retrieve the _Concurrency._resumeUnsafeThrowingContinuationWithError intrinsic. FuncDecl *getResumeUnsafeThrowingContinuationWithError(); + /// Retrieve the _Concurrency._runAsyncHandler intrinsic. + FuncDecl *getRunAsyncHandler(); + /// Retrieve the _Concurrency._runTaskForBridgedAsyncMethod intrinsic. + FuncDecl *getRunTaskForBridgedAsyncMethod(); SILFunction *getKeyPathProjectionCoroutine(bool isReadAccess, KeyPathTypeKind typeKind); diff --git a/lib/SILGen/SILGenApply.cpp b/lib/SILGen/SILGenApply.cpp index 147fd3ae7d7f3..12477f287b1a8 100644 --- a/lib/SILGen/SILGenApply.cpp +++ b/lib/SILGen/SILGenApply.cpp @@ -546,6 +546,10 @@ class Callee { return cast(Constant.getDecl()); } + ValueDecl *getDecl() { + return Constant.getDecl(); + } + CalleeTypeInfo createCalleeTypeInfo(SILGenFunction &SGF, Optional constant, SILType formalFnType) const & { @@ -3646,6 +3650,7 @@ class CallEmission { Callee callee; FormalEvaluationScope initialWritebackScope; unsigned expectedSiteCount; + bool implicitlyAsync; public: /// Create an emission for a call of the given callee. @@ -3653,7 +3658,8 @@ class CallEmission { FormalEvaluationScope &&writebackScope) : SGF(SGF), callee(std::move(callee)), initialWritebackScope(std::move(writebackScope)), - expectedSiteCount(callee.getParameterListCount()) {} + expectedSiteCount(callee.getParameterListCount()), + implicitlyAsync(false) {} /// A factory method for decomposing the apply expr \p e into a call /// emission. @@ -3688,6 +3694,11 @@ class CallEmission { return (callee.kind == Callee::Kind::EnumElement); } + /// Sets a flag that indicates whether this call be treated as being + /// implicitly async, i.e., it requires a hop_to_executor prior to + /// invoking the sync callee, etc. + void setImplicitlyAsync(bool flag) { implicitlyAsync = flag; } + CleanupHandle applyCoroutine(SmallVectorImpl &yields); RValue apply(SGFContext C = SGFContext()) { @@ -3912,11 +3923,15 @@ RValue CallEmission::applyNormalCall(SGFContext C) { auto mv = callee.getFnValue(SGF, borrowedSelf); + Optional calleeDeclInfo; + if (implicitlyAsync) + calleeDeclInfo = callee.getDecl(); + // Emit the uncurried call. return SGF.emitApply( std::move(resultPlan), std::move(argScope), uncurriedLoc.getValue(), mv, callee.getSubstitutions(), uncurriedArgs, calleeTypeInfo, options, - uncurriedContext); + uncurriedContext, calleeDeclInfo); } static void emitPseudoFunctionArguments(SILGenFunction &SGF, @@ -4224,6 +4239,8 @@ CallEmission CallEmission::forApplyExpr(SILGenFunction &SGF, ApplyExpr *e) { emission.addCallSite(apply.callSite, std::move(preparedArgs), apply.callSite->throws()); + + emission.setImplicitlyAsync(apply.callSite->implicitlyAsync()); } return emission; @@ -4266,6 +4283,31 @@ bool SILGenModule::isNonMutatingSelfIndirect(SILDeclRef methodRef) { return self.isFormalIndirect(); } +Optional SILGenFunction::EmitLoadActorExecutorForCallee( + SILGenFunction *SGF, + ValueDecl *calleeVD, + ArrayRef args) { + if (auto *funcDecl = dyn_cast_or_null(calleeVD)) { + auto actorIso = getActorIsolation(funcDecl); + switch (actorIso.getKind()) { + case ActorIsolation::Unspecified: + case ActorIsolation::Independent: + case ActorIsolation::IndependentUnsafe: + break; + + case ActorIsolation::ActorInstance: { + assert(args.size() > 0 && "no self argument for actor-instance call?"); + auto calleeSelf = args.back(); + return calleeSelf.borrow(*SGF, SGF->F.getLocation()).getValue(); + } + + case ActorIsolation::GlobalActor: + return SGF->emitLoadGlobalActorExecutor(actorIso.getGlobalActor()); + } + } + return None; +} + //===----------------------------------------------------------------------===// // Top Level Entrypoints //===----------------------------------------------------------------------===// @@ -4279,7 +4321,8 @@ RValue SILGenFunction::emitApply(ResultPlanPtr &&resultPlan, ManagedValue fn, SubstitutionMap subs, ArrayRef args, const CalleeTypeInfo &calleeTypeInfo, - ApplyOptions options, SGFContext evalContext) { + ApplyOptions options, SGFContext evalContext, + Optional implicitlyAsyncApply) { auto substFnType = calleeTypeInfo.substFnType; auto substResultType = calleeTypeInfo.substResultType; @@ -4365,15 +4408,31 @@ RValue SILGenFunction::emitApply(ResultPlanPtr &&resultPlan, subs.getGenericSignature().getCanonicalSignature()); } - auto rawDirectResult = [&] { + // The presence of `implicitlyAsyncApply` indicates that the callee is a + // synchronous function isolated to an actor other than our own. + // Such functions require the caller to hop to the callee's executor + // prior to invoking the callee. + if (implicitlyAsyncApply.hasValue()) { + assert(F.isAsync() && "cannot hop_to_executor in a non-async func!"); + + auto calleeVD = implicitlyAsyncApply.getValue(); + auto maybeExecutor = EmitLoadActorExecutorForCallee(this, calleeVD, args); + + assert(maybeExecutor.hasValue()); + B.createHopToExecutor(loc, maybeExecutor.getValue()); + } + + SILValue rawDirectResult; + { SmallVector rawDirectResults; emitRawApply(*this, loc, fn, subs, args, substFnType, options, indirectResultAddrs, rawDirectResults); assert(rawDirectResults.size() == 1); - return rawDirectResults[0]; - }(); + rawDirectResult = rawDirectResults[0]; + } - if (substFnType->isAsync()) + // hop back to the current executor + if (substFnType->isAsync() || implicitlyAsyncApply.hasValue()) emitHopToCurrentExecutor(loc); // Pop the argument scope. @@ -4482,7 +4541,7 @@ RValue SILGenFunction::emitMonomorphicApply( *this, calleeTypeInfo, loc, evalContext); ArgumentScope argScope(*this, loc); return emitApply(std::move(resultPlan), std::move(argScope), loc, fn, {}, - args, calleeTypeInfo, options, evalContext); + args, calleeTypeInfo, options, evalContext, None); } /// Emit either an 'apply' or a 'try_apply', with the error branch of @@ -4768,7 +4827,7 @@ SILGenFunction::emitApplyOfLibraryIntrinsic(SILLocation loc, ResultPlanBuilder::computeResultPlan(*this, calleeTypeInfo, loc, ctx); ArgumentScope argScope(*this, loc); return emitApply(std::move(resultPlan), std::move(argScope), loc, mv, subMap, - finalArgs, calleeTypeInfo, ApplyOptions::None, ctx); + finalArgs, calleeTypeInfo, ApplyOptions::None, ctx, None); } StringRef SILGenFunction::getMagicFunctionString() { @@ -5671,6 +5730,72 @@ SILGenFunction::emitCoroutineAccessor(SILLocation loc, SILDeclRef accessor, return endApplyHandle; } +ManagedValue SILGenFunction::emitRunChildTask( + SILLocation loc, Type functionType, ManagedValue taskFunction) { + auto runChildTaskFn = SGM.getRunChildTask(); + + Type resultType = functionType->castTo()->getResult(); + Type replacementTypes[] = {resultType}; + auto subs = SubstitutionMap::get(runChildTaskFn->getGenericSignature(), + replacementTypes, + ArrayRef{}); + + CanType origParamType = runChildTaskFn->getParameters()->get(0) + ->getInterfaceType()->getCanonicalType(); + CanType substParamType = origParamType.subst(subs)->getCanonicalType(); + + // Ensure that the closure has the appropriate type. + AbstractionPattern origParam( + runChildTaskFn->getGenericSignature().getCanonicalSignature(), + origParamType); + taskFunction = emitSubstToOrigValue( + loc, taskFunction, origParam, substParamType); + + return emitApplyOfLibraryIntrinsic( + loc, runChildTaskFn, subs, {taskFunction}, SGFContext() + ).getScalarValue(); +} + +ManagedValue SILGenFunction::emitCancelAsyncTask( + SILLocation loc, SILValue task) { + ASTContext &ctx = getASTContext(); + auto apply = B.createBuiltin( + loc, + ctx.getIdentifier(getBuiltinName(BuiltinValueKind::CancelAsyncTask)), + getLoweredType(ctx.TheEmptyTupleType), SubstitutionMap(), + { task }); + return ManagedValue::forUnmanaged(apply); +} + +void SILGenFunction::completeAsyncLetChildTask( + PatternBindingDecl *patternBinding, unsigned index) { + SILValue childTask; + bool isThrowing; + std::tie(childTask, isThrowing)= AsyncLetChildTasks[{patternBinding, index}]; + + Type childResultType = patternBinding->getPattern(index)->getType(); + + auto taskFutureGetFn = isThrowing + ? SGM.getTaskFutureGetThrowing() + : SGM.getTaskFutureGet(); + + // Get the result from the future. + Type replacementTypes[] = {childResultType}; + auto subs = SubstitutionMap::get(taskFutureGetFn->getGenericSignature(), + replacementTypes, + ArrayRef{}); + RValue childResult = emitApplyOfLibraryIntrinsic( + SILLocation(patternBinding), taskFutureGetFn, subs, + { ManagedValue::forBorrowedObjectRValue(childTask) }, + SGFContext()); + + // Write the child result into the pattern variables. + emitAssignToPatternVars( + SILLocation(patternBinding), patternBinding->getPattern(index), + std::move(childResult)); +} + + // Create a partial application of a dynamic method, applying bridging thunks // if necessary. static ManagedValue emitDynamicPartialApply(SILGenFunction &SGF, diff --git a/lib/SILGen/SILGenBridging.cpp b/lib/SILGen/SILGenBridging.cpp index 77fdb8be50b92..ac21adb1a27b2 100644 --- a/lib/SILGen/SILGenBridging.cpp +++ b/lib/SILGen/SILGenBridging.cpp @@ -15,6 +15,7 @@ #include "RValue.h" #include "ResultPlan.h" #include "SILGenFunction.h" +#include "SILGenFunctionBuilder.h" #include "Scope.h" #include "swift/AST/DiagnosticsSIL.h" #include "swift/AST/ExistentialLayout.h" @@ -243,7 +244,7 @@ emitBridgeObjectiveCToNative(SILGenFunction &SGF, SGF.emitApply(std::move(resultPlan), std::move(argScope), loc, ManagedValue::forUnmanaged(witnessRef), subs, {objcValue, ManagedValue::forUnmanaged(metatypeValue)}, - calleeTypeInfo, ApplyOptions::None, context); + calleeTypeInfo, ApplyOptions::None, context, None); return std::move(result).getAsSingleValue(SGF, loc); } @@ -1298,7 +1299,9 @@ static SILFunctionType *emitObjCThunkArguments(SILGenFunction &SGF, SILDeclRef thunk, SmallVectorImpl &args, SILValue &foreignErrorSlot, + SILValue &foreignAsyncSlot, Optional &foreignError, + Optional &foreignAsync, CanType &nativeFormalResultTy, CanType &bridgedFormalResultTy) { SILDeclRef native = thunk.asForeign(false); @@ -1321,13 +1324,12 @@ static SILFunctionType *emitObjCThunkArguments(SILGenFunction &SGF, SmallVector bridgedArgs; bridgedArgs.reserve(objcFnTy->getParameters().size()); - SILFunction *orig = SGF.SGM.getFunction(native, NotForDefinition); - - // Find the foreign error convention if we have one. - if (orig->getLoweredFunctionType()->hasErrorResult()) { - auto func = cast(thunk.getDecl()); - foreignError = func->getForeignErrorConvention(); - assert(foreignError && "couldn't find foreign error convention!"); + // Find the foreign error and async conventions if we have one. + if (thunk.hasDecl()) { + if (auto func = dyn_cast(thunk.getDecl())) { + foreignError = func->getForeignErrorConvention(); + foreignAsync = func->getForeignAsyncConvention(); + } } // We don't know what to do with indirect results from the Objective-C side. @@ -1346,18 +1348,24 @@ static SILFunctionType *emitObjCThunkArguments(SILGenFunction &SGF, assert(nativeInputs.size() == bridgedFormalTypes.size()); assert(nativeInputs.size() == nativeFormalTypes.size()); assert(inputs.size() == - nativeInputs.size() + unsigned(foreignError.hasValue())); + nativeInputs.size() + unsigned(foreignError.hasValue()) + + unsigned(foreignAsync.hasValue())); for (unsigned i = 0, e = inputs.size(); i < e; ++i) { SILType argTy = SGF.getSILType(inputs[i], objcFnTy); SILValue arg = SGF.F.begin()->createFunctionArgument(argTy); - // If this parameter is the foreign error slot, pull it out. + // If this parameter is the foreign error or completion slot, pull it out. // It does not correspond to a native argument. if (foreignError && i == foreignError->getErrorParameterIndex()) { foreignErrorSlot = arg; continue; } + if (foreignAsync && i == foreignAsync->completionHandlerParamIndex()) { + foreignAsyncSlot = arg; + continue; + } + // If the argument is a block, copy it. if (argTy.isBlockPointerCompatible()) { auto copy = SGF.B.createCopyBlock(loc, arg); @@ -1377,8 +1385,10 @@ static SILFunctionType *emitObjCThunkArguments(SILGenFunction &SGF, bridgedArgs.push_back(managedArg); } - assert(bridgedArgs.size() + unsigned(foreignError.hasValue()) - == objcFnTy->getParameters().size() && + assert(bridgedArgs.size() + + unsigned(foreignError.hasValue()) + + unsigned(foreignAsync.hasValue()) + == objcFnTy->getParameters().size() && "objc inputs don't match number of arguments?!"); assert(bridgedArgs.size() == swiftFnTy->getParameters().size() && "swift inputs don't match number of arguments?!"); @@ -1386,13 +1396,6 @@ static SILFunctionType *emitObjCThunkArguments(SILGenFunction &SGF, "didn't find foreign error slot"); // Bridge the input types. - - // FIXME: We really want alloc_stacks to outlive this scope, because - // bridging id-to-Any requires allocating an Any which gets passed to - // the native entry point. - - // Scope scope(gen.Cleanups, CleanupLocation::get(loc)); - assert(bridgedArgs.size() == nativeInputs.size()); for (unsigned i = 0, size = bridgedArgs.size(); i < size; ++i) { // Consider the bridged values to be "call results" since they're coming @@ -1428,6 +1431,86 @@ static SILFunctionType *emitObjCThunkArguments(SILGenFunction &SGF, return objcFnTy; } +SILFunction *SILGenFunction::emitNativeAsyncToForeignThunk(SILDeclRef thunk) { + assert(thunk.isForeign); + assert(thunk.hasAsync()); + SILDeclRef native = thunk.asForeign(false); + + // Use the same generic environment as the native entry point. + F.setGenericEnvironment(SGM.Types.getConstantGenericEnvironment(native)); + + // Collect the arguments and make copies of them we can absorb into the + // closure. + auto subs = F.getForwardingSubstitutionMap(); + SmallVector closureArgs; + auto objcInfo = + SGM.Types.getConstantInfo(getTypeExpansionContext(), thunk); + auto objcFnTy = objcInfo.SILFnType->substGenericArgs( + SGM.M, subs, getTypeExpansionContext()); + auto loc = thunk.getAsRegularLocation(); + loc.markAutoGenerated(); + + Scope scope(*this, loc); + + for (auto input : objcFnTy->getParameters()) { + SILType argTy = getSILType(input, objcFnTy); + SILValue arg = F.begin()->createFunctionArgument(argTy); + + if (!input.isConsumed()) { + arg = emitObjCUnconsumedArgument(*this, loc, arg); + } + auto managedArg = emitManagedRValueWithCleanup(arg); + closureArgs.push_back(managedArg.forward(*this)); + } + + // Create the closure implementation function. It has the same signature, + // but is just swiftcc and async. + auto closureExtInfo = objcFnTy->getExtInfo().intoBuilder() + .withRepresentation(SILFunctionTypeRepresentation::Thin) + .withAsync() + .build(); + auto closureTy = objcFnTy->getWithExtInfo(closureExtInfo); + + SmallString<64> closureName(F.getName().begin(), F.getName().end()); + // Trim off the thunk suffix and mangle this like a closure nested inside the + // thunk (which it sorta is) + char thunkSuffix[2] = {closureName.pop_back_val(), + closureName.pop_back_val()}; + assert(thunkSuffix[1] == 'T' + && thunkSuffix[0] == 'o' + && "not an objc thunk?"); + closureName += "yyYcfU_"; // closure with type () async -> () + closureName.push_back(thunkSuffix[1]); + closureName.push_back(thunkSuffix[0]); + + SILGenFunctionBuilder fb(SGM); + auto closure = fb.getOrCreateSharedFunction(loc, closureName, + closureTy, + IsBare, + IsNotTransparent, + F.isSerialized(), + ProfileCounter(), + IsThunk, + IsNotDynamic); + + auto closureRef = B.createFunctionRef(loc, closure); + + auto closureVal = B.createPartialApply(loc, closureRef, subs, + closureArgs, + ParameterConvention::Direct_Guaranteed); + auto closureMV = emitManagedRValueWithCleanup(closureVal); + // Pass the closure on to the intrinsic to spawn it on a task. + auto spawnTask = SGM.getRunTaskForBridgedAsyncMethod(); + emitApplyOfLibraryIntrinsic(loc, spawnTask, {}, closureMV, SGFContext()); + + scope.pop(); + + // Return void to the immediate caller. + B.createReturn(loc, SILUndef::get(SGM.Types.getEmptyTupleType(), F)); + + return closure; +} + void SILGenFunction::emitNativeToForeignThunk(SILDeclRef thunk) { assert(thunk.isForeign); SILDeclRef native = thunk.asForeign(false); @@ -1513,10 +1596,12 @@ void SILGenFunction::emitNativeToForeignThunk(SILDeclRef thunk) { // Bridge the arguments. Optional foreignError; - SILValue foreignErrorSlot; + Optional foreignAsync; + SILValue foreignErrorSlot, foreignAsyncSlot; CanType nativeFormalResultType, bridgedFormalResultType; auto objcFnTy = emitObjCThunkArguments(*this, loc, thunk, args, - foreignErrorSlot, foreignError, + foreignErrorSlot, foreignAsyncSlot, + foreignError, foreignAsync, nativeFormalResultType, bridgedFormalResultType); @@ -1548,7 +1633,75 @@ void SILGenFunction::emitNativeToForeignThunk(SILDeclRef thunk) { SILValue nativeFn = emitGlobalFunctionRef(loc, native, nativeInfo); SILValue result; - assert(foreignError.hasValue() == substTy->hasErrorResult()); + + // Helper function to pass a native async function's result as arguments to + // the ObjC completion handler block. + auto passResultToCompletionHandler = [&](SILValue result) -> SILValue { + Scope completionArgScope(*this, loc); + + SmallVector completionHandlerArgs; + auto completionTy = foreignAsyncSlot->getType().castTo(); + + auto asyncResult = emitManagedRValueWithCleanup(result); + + auto pushArg = [&](ManagedValue arg, + CanType nativeFormalTy, + SILParameterInfo param) { + auto bridgedTy = param.getInterfaceType(); + auto bridgedArg = emitNativeToBridgedValue(loc, + arg, nativeFormalTy, + bridgedTy, + SILType::getPrimitiveObjectType(bridgedTy)); + completionHandlerArgs.push_back(bridgedArg.borrow(*this, loc).getValue()); + }; + + auto errorParamIndex = foreignAsync->completionHandlerErrorParamIndex(); + auto pushErrorPlaceholder = [&]{ + auto errorArgTy = completionTy->getParameters()[*errorParamIndex] + .getSILStorageInterfaceType(); + + // Error type must be optional. We pass nil for a successful return + auto none = B.createOptionalNone(loc, errorArgTy); + completionHandlerArgs.push_back(none); + }; + + unsigned numResults + = completionTy->getParameters().size() - errorParamIndex.hasValue(); + + if (numResults == 1) { + if (errorParamIndex && *errorParamIndex == 0) { + pushErrorPlaceholder(); + } + pushArg(asyncResult, + nativeFormalResultType, + completionTy->getParameters()[completionHandlerArgs.size()]); + + if (errorParamIndex && *errorParamIndex == 1) { + pushErrorPlaceholder(); + } + } else { + // A tuple return maps to multiple completion handler parameters. + auto formalTuple = cast(nativeFormalResultType); + + for (unsigned paramI : indices(completionTy->getParameters())) { + if (errorParamIndex && paramI == *errorParamIndex) { + pushErrorPlaceholder(); + continue; + } + auto elementI = paramI - (errorParamIndex && paramI > *errorParamIndex); + auto param = completionTy->getParameters()[paramI]; + auto formalTy = formalTuple.getElementType(elementI); + auto argPiece = B.createTupleExtract(loc, asyncResult, elementI); + pushArg(argPiece, formalTy, param); + } + } + // Pass the bridged results on to the completion handler. + B.createApply(loc, foreignAsyncSlot, {}, completionHandlerArgs); + + // The immediate function result is an empty tuple. + return SILUndef::get(SGM.Types.getEmptyTupleType(), F); + }; + if (!substTy->hasErrorResult()) { // Create the apply. result = B.createApply(loc, nativeFn, subs, args); @@ -1563,8 +1716,14 @@ void SILGenFunction::emitNativeToForeignThunk(SILDeclRef thunk) { argScope.pop(); // Now bridge the return value. - result = emitBridgeReturnValue(*this, loc, result, nativeFormalResultType, - bridgedFormalResultType, objcResultTy); + // If this is an async method, we forward the results of the async call to + // the completion handler. + if (foreignAsync) { + result = passResultToCompletionHandler(result); + } else { + result = emitBridgeReturnValue(*this, loc, result, nativeFormalResultType, + bridgedFormalResultType, objcResultTy); + } } else { SILBasicBlock *contBB = createBasicBlock(); SILBasicBlock *errorBB = createBasicBlock(); @@ -1581,17 +1740,24 @@ void SILGenFunction::emitNativeToForeignThunk(SILDeclRef thunk) { assert(substTy->getNumResults() == 1); nativeResult = args[0]; } - - // In this branch, the eventual return value is mostly created - // by bridging the native return value, but we may need to - // adjust it slightly. - SILValue bridgedResult = - emitBridgeReturnValueForForeignError(loc, nativeResult, - nativeFormalResultType, - bridgedFormalResultType, - objcResultTy, - foreignErrorSlot, *foreignError); - B.createBranch(loc, contBB, bridgedResult); + + if (foreignAsync) { + // If the function is async, pass the results as the success argument(s) + // to the completion handler, with a nil error. + passResultToCompletionHandler(nativeResult); + B.createBranch(loc, contBB); + } else { + // In this branch, the eventual return value is mostly created + // by bridging the native return value, but we may need to + // adjust it slightly. + SILValue bridgedResult = + emitBridgeReturnValueForForeignError(loc, nativeResult, + nativeFormalResultType, + bridgedFormalResultType, + objcResultTy, + foreignErrorSlot, *foreignError); + B.createBranch(loc, contBB, bridgedResult); + } } // Emit the error destination. @@ -1601,17 +1767,69 @@ void SILGenFunction::emitNativeToForeignThunk(SILDeclRef thunk) { substConv.getSILErrorType(getTypeExpansionContext()), OwnershipKind::Owned); - // In this branch, the eventual return value is mostly invented. - // Store the native error in the appropriate location and return. - SILValue bridgedResult = - emitBridgeErrorForForeignError(loc, nativeError, objcResultTy, - foreignErrorSlot, *foreignError); - B.createBranch(loc, contBB, bridgedResult); + if (foreignAsync) { + // If the function is async, pass the bridged error along to the + // completion handler, with dummy values for the other argument(s). + Scope completionArgScope(*this, loc); + + SmallVector completionHandlerArgs; + auto completionTy = foreignAsyncSlot->getType().castTo(); + auto errorParamIndex = *foreignAsync->completionHandlerErrorParamIndex(); + auto completionErrorTy = completionTy->getParameters()[errorParamIndex] + .getInterfaceType(); + auto bridgedError = emitNativeToBridgedError(loc, + emitManagedRValueWithCleanup(nativeError), + nativeError->getType().getASTType(), + completionErrorTy); + + // Fill in placeholder arguments, and put the bridged error in its + // rightful place. + for (unsigned i : indices(completionTy->getParameters())) { + if (i == errorParamIndex) { + completionHandlerArgs.push_back(bridgedError.borrow(*this, loc).getValue()); + continue; + } + + // For non-error arguments, pass a placeholder. + // If the argument type is non-trivial, it must be Optional, and + // we pass nil. + auto param = completionTy->getParameters()[i]; + auto paramTy = param.getSILStorageInterfaceType(); + if (paramTy.isTrivial(F)) { + // If it's trivial, the value passed doesn't matter. + completionHandlerArgs.push_back(SILUndef::get(paramTy, F.getModule())); + } else { + // If it's not trivial, it must be a nullable class type. Pass + // nil. + auto none = B.createOptionalNone(loc, paramTy); + completionHandlerArgs.push_back(none); + } + } + // Pass the bridged results on to the completion handler. + B.createApply(loc, foreignAsyncSlot, {}, completionHandlerArgs); + completionArgScope.pop(); + + B.createBranch(loc, contBB); + } else { + // In this branch, the eventual return value is mostly invented. + // Store the native error in the appropriate location and return. + SILValue bridgedResult = + emitBridgeErrorForForeignError(loc, nativeError, objcResultTy, + foreignErrorSlot, *foreignError); + B.createBranch(loc, contBB, bridgedResult); + } } // Emit the join block. B.emitBlock(contBB); - result = contBB->createPhiArgument(objcResultTy, OwnershipKind::Owned); + + if (foreignAsync) { + // After invoking the completion handler, our immediate return value is + // void. + result = SILUndef::get(SGM.Types.getEmptyTupleType(), F); + } else { + result = contBB->createPhiArgument(objcResultTy, OwnershipKind::Owned); + } // Leave the scope now. argScope.pop(); @@ -1875,7 +2093,7 @@ void SILGenFunction::emitForeignToNativeThunk(SILDeclRef thunk) { ManagedValue resultMV = emitApply(std::move(resultPlan), std::move(argScope), fd, ManagedValue::forUnmanaged(fn), subs, args, - calleeTypeInfo, ApplyOptions::None, context) + calleeTypeInfo, ApplyOptions::None, context, None) .getAsSingleValue(*this, fd); if (indirectResult) { diff --git a/lib/SILGen/SILGenBuilder.cpp b/lib/SILGen/SILGenBuilder.cpp index 5c9690cf8f2dc..72fa28a2bd254 100644 --- a/lib/SILGen/SILGenBuilder.cpp +++ b/lib/SILGen/SILGenBuilder.cpp @@ -447,7 +447,6 @@ static ManagedValue createInputFunctionArgument(SILGenBuilder &B, SILType type, return ManagedValue::forLValue(arg); case SILArgumentConvention::Indirect_In_Constant: llvm_unreachable("Convention not produced by SILGen"); - case SILArgumentConvention::Direct_Deallocating: case SILArgumentConvention::Indirect_Out: llvm_unreachable("unsupported convention for API"); } diff --git a/lib/SILGen/SILGenBuiltin.cpp b/lib/SILGen/SILGenBuiltin.cpp index ce45359885199..8cbd539e2a5f6 100644 --- a/lib/SILGen/SILGenBuiltin.cpp +++ b/lib/SILGen/SILGenBuiltin.cpp @@ -1409,14 +1409,7 @@ static ManagedValue emitBuiltinGetCurrentAsyncTask( static ManagedValue emitBuiltinCancelAsyncTask( SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, ArrayRef args, SGFContext C) { - ASTContext &ctx = SGF.getASTContext(); - auto argument = args[0].borrow(SGF, loc).forward(SGF); - auto apply = SGF.B.createBuiltin( - loc, - ctx.getIdentifier(getBuiltinName(BuiltinValueKind::CancelAsyncTask)), - SGF.getLoweredType(ctx.TheEmptyTupleType), SubstitutionMap(), - { argument }); - return ManagedValue::forUnmanaged(apply); + return SGF.emitCancelAsyncTask(loc, args[0].borrow(SGF, loc).forward(SGF)); } // Emit SIL for the named builtin: createAsyncTask. @@ -1446,7 +1439,7 @@ static ManagedValue emitBuiltinCreateAsyncTaskFuture( // Form the metatype of the result type. CanType futureResultType = Type( - MetatypeType::get(GenericTypeParamType::get(0, 0, SGF.getASTContext()))) + MetatypeType::get(GenericTypeParamType::get(0, 0, SGF.getASTContext()), MetatypeRepresentation::Thick)) .subst(subs)->getCanonicalType(); CanType anyTypeType = ExistentialMetatypeType::get( ProtocolCompositionType::get(ctx, { }, false))->getCanonicalType(); @@ -1469,6 +1462,127 @@ static ManagedValue emitBuiltinCreateAsyncTaskFuture( return SGF.emitManagedRValueWithCleanup(apply); } +// Shared implementation of withUnsafeContinuation and +// withUnsafe[Throwing]Continuation. +static ManagedValue emitBuiltinWithUnsafeContinuation( + SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, + ArrayRef args, SGFContext C, bool throws) { + // Allocate space to receive the resume value when the continuation is + // resumed. + auto substResultType = subs.getReplacementTypes()[0]->getCanonicalType(); + auto opaqueResumeType = SGF.getLoweredType(AbstractionPattern::getOpaque(), + substResultType); + auto resumeBuf = SGF.emitTemporaryAllocation(loc, opaqueResumeType); + + // Capture the current continuation. + auto continuation = SGF.B.createGetAsyncContinuationAddr(loc, resumeBuf, + substResultType, + throws); + + // Get the callee value. + auto substFnType = args[0].getType().castTo(); + SILValue fnValue = (substFnType->isCalleeConsumed() + ? args[0].forward(SGF) + : args[0].getValue()); + + // Call the provided function value. + SGF.B.createApply(loc, fnValue, {}, {continuation}); + + // Await the continuation. + SILBasicBlock *resumeBlock = SGF.createBasicBlock(); + SILBasicBlock *errorBlock = nullptr; + + if (throws) + errorBlock = SGF.createBasicBlock(FunctionSection::Postmatter); + + SGF.B.createAwaitAsyncContinuation(loc, continuation, resumeBlock, errorBlock); + + // Propagate an error if we have one. + if (throws) { + SGF.B.emitBlock(errorBlock); + + Scope errorScope(SGF, loc); + + auto errorTy = SGF.getASTContext().getErrorDecl()->getDeclaredType() + ->getCanonicalType(); + auto errorVal + = SGF.B.createOwnedPhiArgument(SILType::getPrimitiveObjectType(errorTy)); + + SGF.emitThrow(loc, errorVal, true); + } + + SGF.B.emitBlock(resumeBlock); + + // The incoming value is the maximally-abstracted result type of the + // continuation. Move it out of the resume buffer and reabstract it if + // necessary. + auto resumeResult = SGF.emitLoad(loc, resumeBuf, + AbstractionPattern::getOpaque(), + substResultType, + SGF.getTypeLowering(substResultType), + SGFContext(), IsTake); + + return resumeResult; +} + +// Emit SIL for the named builtin: withUnsafeContinuation +static ManagedValue emitBuiltinWithUnsafeContinuation( + SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, + ArrayRef args, SGFContext C) { + return emitBuiltinWithUnsafeContinuation(SGF, loc, subs, args, C, + /*throws=*/false); +} + +// Emit SIL for the named builtin: withUnsafeThrowingContinuation +static ManagedValue emitBuiltinWithUnsafeThrowingContinuation( + SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, + ArrayRef args, SGFContext C) { + return emitBuiltinWithUnsafeContinuation(SGF, loc, subs, args, C, + /*throws=*/true); +} + +static ManagedValue emitBuiltinAutoDiffCreateLinearMapContext( + SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, + ArrayRef args, SGFContext C) { + ASTContext &ctx = SGF.getASTContext(); + auto *builtinApply = SGF.B.createBuiltin( + loc, + ctx.getIdentifier( + getBuiltinName(BuiltinValueKind::AutoDiffCreateLinearMapContext)), + SILType::getNativeObjectType(ctx), + subs, + /*args*/ {args[0].getValue()}); + return SGF.emitManagedRValueWithCleanup(builtinApply); +} + +static ManagedValue emitBuiltinAutoDiffProjectTopLevelSubcontext( + SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, + ArrayRef args, SGFContext C) { + ASTContext &ctx = SGF.getASTContext(); + auto *builtinApply = SGF.B.createBuiltin( + loc, + ctx.getIdentifier( + getBuiltinName(BuiltinValueKind::AutoDiffProjectTopLevelSubcontext)), + SILType::getRawPointerType(ctx), + subs, + /*args*/ {args[0].borrow(SGF, loc).getValue()}); + return ManagedValue::forUnmanaged(builtinApply); +} + +static ManagedValue emitBuiltinAutoDiffAllocateSubcontext( + SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, + ArrayRef args, SGFContext C) { + ASTContext &ctx = SGF.getASTContext(); + auto *builtinApply = SGF.B.createBuiltin( + loc, + ctx.getIdentifier( + getBuiltinName(BuiltinValueKind::AutoDiffAllocateSubcontext)), + SILType::getRawPointerType(ctx), + subs, + /*args*/ {args[0].borrow(SGF, loc).getValue(), args[1].getValue()}); + return ManagedValue::forUnmanaged(builtinApply); +} + Optional SpecializedEmitter::forDecl(SILGenModule &SGM, SILDeclRef function) { // Only consider standalone declarations in the Builtin module. diff --git a/lib/SILGen/SILGenConstructor.cpp b/lib/SILGen/SILGenConstructor.cpp index 7e3209502fc5b..631fd1930213a 100644 --- a/lib/SILGen/SILGenConstructor.cpp +++ b/lib/SILGen/SILGenConstructor.cpp @@ -562,7 +562,7 @@ void SILGenFunction::emitEnumConstructor(EnumElementDecl *element) { bool Lowering::usesObjCAllocator(ClassDecl *theClass) { // If the root class was implemented in Objective-C, use Objective-C's // allocation methods because they may have been overridden. - return theClass->checkAncestry(AncestryFlags::ClangImported); + return theClass->usesObjCObjectModel(); } void SILGenFunction::emitClassConstructorAllocator(ConstructorDecl *ctor) { diff --git a/lib/SILGen/SILGenDecl.cpp b/lib/SILGen/SILGenDecl.cpp index 63560b2cb77e0..afd0d9a1d500b 100644 --- a/lib/SILGen/SILGenDecl.cpp +++ b/lib/SILGen/SILGenDecl.cpp @@ -428,8 +428,8 @@ class LetValueInitialization : public Initialization { auto &lowering = SGF.getTypeLowering(vd->getType()); // Decide whether we need a temporary stack buffer to evaluate this 'let'. - // There are three cases we need to handle here: parameters, initialized (or - // bound) decls, and uninitialized ones. + // There are four cases we need to handle here: parameters, initialized (or + // bound) decls, uninitialized ones, and async let declarations. bool needsTemporaryBuffer; bool isUninitialized = false; @@ -440,6 +440,13 @@ class LetValueInitialization : public Initialization { // buffer. DI will make sure it is only assigned to once. needsTemporaryBuffer = true; isUninitialized = true; + } else if (vd->isAsyncLet()) { + // If this is an async let, treat it like a let-value without an + // initializer. The initializer runs concurrently in a child task, + // and value will be initialized at the point the variable in the + // async let is used. + needsTemporaryBuffer = true; + isUninitialized = true; } else { // If this is a let with an initializer or bound value, we only need a // buffer if the type is address only. @@ -1130,12 +1137,53 @@ SILGenFunction::emitInitializationForVarDecl(VarDecl *vd, bool forceImmutable) { void SILGenFunction::emitPatternBinding(PatternBindingDecl *PBD, unsigned idx) { + + auto initialization = emitPatternBindingInitialization(PBD->getPattern(idx), JumpDest::invalid()); - // If an initial value expression was specified by the decl, emit it into - // the initialization. Otherwise, mark it uninitialized for DI to resolve. - if (auto *Init = PBD->getExecutableInit(idx)) { + // If this is an async let, create a child task to compute the initializer + // value. + if (PBD->isAsyncLet()) { + // Look through the implicit await (if present), try (if present), and + // call to reach the autoclosure that computes the value. + auto *init = PBD->getExecutableInit(idx); + if (auto awaitExpr = dyn_cast(init)) + init = awaitExpr->getSubExpr(); + if (auto tryExpr = dyn_cast(init)) + init = tryExpr->getSubExpr(); + init = cast(init)->getFn(); + assert(isa(init) && + "Could not find async let autoclosure"); + bool isThrowing = init->getType()->castTo()->isThrowing(); + + // Emit the closure for the child task. + SILValue childTask; + { + FullExpr Scope(Cleanups, CleanupLocation(init)); + SILLocation loc(PBD); + childTask = emitRunChildTask( + loc, + init->getType(), + emitRValue(init).getScalarValue() + ).forward(*this); + } + + // Destroy the task at the end of the scope. + enterDestroyCleanup(childTask); + + // Push a cleanup that will cancel the child task at the end of the scope. + enterCancelAsyncTaskCleanup(childTask); + + // Save the child task so we can await it as needed. + AsyncLetChildTasks[{PBD, idx}] = { childTask, isThrowing }; + + // Mark as uninitialized; actual initialization will occur when the + // variables are referenced. + initialization->finishUninitialized(*this); + } else if (auto *Init = PBD->getExecutableInit(idx)) { + // If an initial value expression was specified by the decl, emit it into + // the initialization. FullExpr Scope(Cleanups, CleanupLocation(Init)); auto *var = PBD->getSingleVar(); @@ -1155,6 +1203,7 @@ void SILGenFunction::emitPatternBinding(PatternBindingDecl *PBD, emitExprInto(Init, initialization.get(), SILLocation(PBD)); } else { + // Otherwise, mark it uninitialized for DI to resolve. initialization->finishUninitialized(*this); } } @@ -1413,6 +1462,35 @@ CleanupHandle SILGenFunction::enterDeinitExistentialCleanup( return Cleanups.getTopCleanup(); } +namespace { + /// A cleanup that cancels an asynchronous task. + class CancelAsyncTaskCleanup: public Cleanup { + SILValue task; + public: + CancelAsyncTaskCleanup(SILValue task) : task(task) { } + + void emit(SILGenFunction &SGF, CleanupLocation l, + ForUnwind_t forUnwind) override { + SILValue borrowedTask = SGF.B.createBeginBorrow(l, task); + SGF.emitCancelAsyncTask(l, borrowedTask); + SGF.B.createEndBorrow(l, borrowedTask); + } + + void dump(SILGenFunction &) const override { +#ifndef NDEBUG + llvm::errs() << "CancelAsyncTaskCleanup\n" + << "Task:" << task << "\n"; +#endif + } + }; +} // end anonymous namespace + +CleanupHandle SILGenFunction::enterCancelAsyncTaskCleanup(SILValue task) { + Cleanups.pushCleanupInState( + CleanupState::Active, task); + return Cleanups.getTopCleanup(); +} + /// Create a LocalVariableInitialization for the uninitialized var. InitializationPtr SILGenFunction::emitLocalVariableWithCleanup( VarDecl *vd, Optional kind, unsigned ArgNo) { diff --git a/lib/SILGen/SILGenDestructor.cpp b/lib/SILGen/SILGenDestructor.cpp index aecd9c83fdb5d..3a44058dd49c3 100644 --- a/lib/SILGen/SILGenDestructor.cpp +++ b/lib/SILGen/SILGenDestructor.cpp @@ -52,8 +52,9 @@ void SILGenFunction::emitDestroyingDestructor(DestructorDecl *dd) { SILValue resultSelfValue; SILType objectPtrTy = SILType::getNativeObjectType(F.getASTContext()); SILType classTy = selfValue->getType(); - if (cd->hasSuperclass()) { - Type superclassTy = dd->mapTypeIntoContext(cd->getSuperclass()); + if (cd->hasSuperclass() && !cd->isNativeNSObjectSubclass()) { + Type superclassTy = + dd->mapTypeIntoContext(cd->getSuperclass()); ClassDecl *superclass = superclassTy->getClassOrBoundGenericClass(); auto superclassDtorDecl = superclass->getDestructor(); SILDeclRef dtorConstant = diff --git a/lib/SILGen/SILGenExpr.cpp b/lib/SILGen/SILGenExpr.cpp index 50af4b0bc0acd..951290469df0e 100644 --- a/lib/SILGen/SILGenExpr.cpp +++ b/lib/SILGen/SILGenExpr.cpp @@ -2271,7 +2271,7 @@ SILGenFunction::emitApplyOfDefaultArgGenerator(SILLocation loc, captures); return emitApply(std::move(resultPtr), std::move(argScope), loc, fnRef, - subs, captures, calleeTypeInfo, ApplyOptions::None, C); + subs, captures, calleeTypeInfo, ApplyOptions::None, C, None); } RValue SILGenFunction::emitApplyOfStoredPropertyInitializer( @@ -2294,7 +2294,7 @@ RValue SILGenFunction::emitApplyOfStoredPropertyInitializer( ResultPlanBuilder::computeResultPlan(*this, calleeTypeInfo, loc, C); ArgumentScope argScope(*this, loc); return emitApply(std::move(resultPlan), std::move(argScope), loc, fnRef, - subs, {}, calleeTypeInfo, ApplyOptions::None, C); + subs, {}, calleeTypeInfo, ApplyOptions::None, C, None); } RValue RValueEmitter::visitDestructureTupleExpr(DestructureTupleExpr *E, @@ -3232,7 +3232,7 @@ getOrCreateKeyPathEqualsAndHash(SILGenModule &SGM, loc, ManagedValue::forUnmanaged(equalsWitness), equatableSub, {lhsArg, rhsArg, metatyValue}, - equalsInfo, ApplyOptions::None, SGFContext()) + equalsInfo, ApplyOptions::None, SGFContext(), None) .getUnmanagedSingleValue(subSGF, loc); } @@ -4478,7 +4478,7 @@ namespace { /// Top-level entrypoint. void emit(CanType destType, RValue &&src) { - visitTupleType(cast(destType), std::move(src)); + visit(destType, std::move(src)); assert(DestLVQueue.empty() && "didn't consume all l-values!"); } @@ -4588,6 +4588,87 @@ RValue RValueEmitter::visitAssignExpr(AssignExpr *E, SGFContext C) { return SGF.emitEmptyTupleRValue(E, C); } +namespace { + /// A visitor for creating a flattened list of LValues from a + /// pattern. + class PatternLValueEmitter + : public PatternVisitor { + + SILGenFunction &SGF; + + SGFAccessKind TheAccessKind; + + /// A flattened list of l-values. + SmallVectorImpl> &Results; + + public: + PatternLValueEmitter(SILGenFunction &SGF, SGFAccessKind accessKind, + SmallVectorImpl> &results) + : SGF(SGF), TheAccessKind(accessKind), Results(results) {} + +#define USE_SUBPATTERN(Kind) \ + Type visit##Kind##Pattern(Kind##Pattern *pattern) { \ + return visit(pattern->getSubPattern()); \ + } + + USE_SUBPATTERN(Paren) + USE_SUBPATTERN(Typed) + USE_SUBPATTERN(Binding) +#undef USE_SUBPATTERN + +#define PATTERN(Kind, Parent) +#define REFUTABLE_PATTERN(Kind, Parent) \ + Type visit##Kind##Pattern(Kind##Pattern *pattern) { \ + llvm_unreachable("No refutable patterns here"); \ + } +#include "swift/AST/PatternNodes.def" + + Type visitTuplePattern(TuplePattern *pattern) { + SmallVector tupleElts; + for (auto &element : pattern->getElements()) { + Type elementType = visit(element.getPattern()); + tupleElts.push_back( + TupleTypeElt(elementType, element.getLabel())); + } + + return TupleType::get(tupleElts, SGF.getASTContext()); + } + + Type visitNamedPattern(NamedPattern *pattern) { + Type type = LValueType::get(pattern->getDecl()->getType()); + auto declRef = new (SGF.getASTContext()) DeclRefExpr( + pattern->getDecl(), DeclNameLoc(), /*Implicit=*/true, + AccessSemantics::Ordinary, type); + + Results.push_back(SGF.emitLValue(declRef, TheAccessKind)); + + return type; + } + + Type visitAnyPattern(AnyPattern *pattern) { + // Discard the value at this position. + Results.push_back(None); + + return LValueType::get(pattern->getType()); + } + }; +} + +void SILGenFunction::emitAssignToPatternVars( + SILLocation loc, Pattern *destPattern, RValue &&src) { + FormalEvaluationScope writeback(*this); + + // Produce a flattened queue of LValues. + SmallVector, 4> destLVs; + CanType destType = PatternLValueEmitter( + *this, SGFAccessKind::Write, destLVs).visit(destPattern) + ->getCanonicalType(); + + // Recurse on the type of the destination, pulling LValues as + // needed from the queue we built up before. + TupleLValueAssigner(*this, loc, destLVs).emit(destType, std::move(src)); +} + void SILGenFunction::emitBindOptionalAddress(SILLocation loc, ManagedValue optAddress, unsigned depth) { diff --git a/lib/SILGen/SILGenFunction.cpp b/lib/SILGen/SILGenFunction.cpp index c9fa9e4b6b27e..08c5c1ff16c9e 100644 --- a/lib/SILGen/SILGenFunction.cpp +++ b/lib/SILGen/SILGenFunction.cpp @@ -517,11 +517,11 @@ void SILGenFunction::emitFunction(FuncDecl *fd) { fd->getResultInterfaceType(), fd->hasThrows(), fd->getThrowsLoc()); prepareEpilog(true, fd->hasThrows(), CleanupLocation(fd)); - if (fd->isAsyncHandler()) { - // Async handlers are need to have their bodies emitted into a - // detached task. - // FIXME: Actually implement these properly. - B.createBuiltinTrap(fd->getTypecheckedBody()); + if (fd->isAsyncHandler() && + // If F.isAsync() we are emitting the asyncHandler body and not the + // original asyncHandler. + !F.isAsync()) { + emitAsyncHandler(fd); } else { emitStmt(fd->getTypecheckedBody()); } @@ -531,6 +531,58 @@ void SILGenFunction::emitFunction(FuncDecl *fd) { mergeCleanupBlocks(); } +/// An asyncHandler function is split into two functions: +/// 1. The asyncHandler body function: it contains the body of the function, but +/// is emitted as an async function. +/// 2. The original function: it just contains +/// _runAsyncHandler(operation: asyncHandlerBodyFunction) +void SILGenFunction::emitAsyncHandler(FuncDecl *fd) { + + // 1. step: create the asyncHandler body function + // + auto origFnTy = F.getLoweredFunctionType(); + assert(!F.isAsync() && "an asyncHandler function cannot be async"); + + // The body function type is the same as the original type, just with "async". + auto bodyFnTy = origFnTy->getWithExtInfo(origFnTy->getExtInfo().withAsync()); + + SILDeclRef constant(fd, SILDeclRef::Kind::Func); + std::string name = constant.mangle(SILDeclRef::ManglingKind::AsyncHandlerBody); + SILLocation loc = F.getLocation(); + SILGenFunctionBuilder builder(*this); + + SILFunction *bodyFn = builder.createFunction( + SILLinkage::Hidden, name, bodyFnTy, F.getGenericEnvironment(), + loc, F.isBare(), F.isTransparent(), + F.isSerialized(), IsNotDynamic, ProfileCounter(), IsNotThunk, + F.getClassSubclassScope(), F.getInlineStrategy(), F.getEffectsKind()); + bodyFn->setDebugScope(new (getModule()) SILDebugScope(loc, bodyFn)); + + SILGenFunction(SGM, *bodyFn, fd).emitFunction(fd); + + // 2. step: emit the original asyncHandler function + // + Scope scope(*this, loc); + + // %bodyFnRef = partial_apply %bodyFn(%originalArg0, %originalArg1, ...) + // + SmallVector managedArgs; + for (SILValue arg : F.getArguments()) { + ManagedValue argVal = ManagedValue(arg, CleanupHandle::invalid()); + managedArgs.push_back(argVal.copy(*this, loc)); + } + auto *bodyFnRef = B.createFunctionRef(loc, bodyFn); + ManagedValue bodyFnValue = + B.createPartialApply(loc, bodyFnRef, F.getForwardingSubstitutionMap(), + managedArgs, ParameterConvention::Direct_Guaranteed); + + // apply %_runAsyncHandler(%bodyFnValue) + // + FuncDecl *asyncHandlerDecl = SGM.getRunAsyncHandler(); + emitApplyOfLibraryIntrinsic(loc, asyncHandlerDecl, SubstitutionMap(), + { bodyFnValue }, SGFContext()); +} + void SILGenFunction::emitClosure(AbstractClosureExpr *ace) { MagicFunctionName = SILGenModule::getMagicFunctionName(ace); @@ -866,8 +918,9 @@ void SILGenFunction::emitGeneratorFunction(SILDeclRef function, Expr *value, auto wrappedInfo = var->getPropertyWrapperBackingPropertyInfo(); auto param = params->get(0); auto *placeholder = wrappedInfo.wrappedValuePlaceholder; - opaqueValue.emplace(*this, placeholder->getOpaqueValuePlaceholder(), - maybeEmitValueOfLocalVarDecl(param)); + opaqueValue.emplace( + *this, placeholder->getOpaqueValuePlaceholder(), + maybeEmitValueOfLocalVarDecl(param, AccessKind::Read)); assert(value == wrappedInfo.initializeFromOriginal); } diff --git a/lib/SILGen/SILGenFunction.h b/lib/SILGen/SILGenFunction.h index ff33fc90ce49e..d15adf2843c72 100644 --- a/lib/SILGen/SILGenFunction.h +++ b/lib/SILGen/SILGenFunction.h @@ -397,7 +397,14 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction /// emitted. The map is queried to produce the lvalue for a DeclRefExpr to /// a local variable. llvm::DenseMap VarLocs; - + + /// Mapping from each async let clause to the child task that will produce + /// the initializer value for that clause and a Boolean value indicating + /// whether the task can throw. + llvm::SmallDenseMap, + std::pair > + AsyncLetChildTasks; + /// When rebinding 'self' during an initializer delegation, we have to be /// careful to preserve the object at 1 retain count during the delegation /// because of assumptions in framework code. This enum tracks the state of @@ -616,6 +623,8 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction /// Generates code for a FuncDecl. void emitFunction(FuncDecl *fd); + /// Generate code for @asyncHandler functions. + void emitAsyncHandler(FuncDecl *fd); /// Emits code for a ClosureExpr. void emitClosure(AbstractClosureExpr *ce); /// Generates code for a class destroying destructor. This @@ -671,9 +680,15 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction /// Generates a thunk from a foreign function to the native Swift convention. void emitForeignToNativeThunk(SILDeclRef thunk); - /// Generates a thunk from a native function to the conventions. + /// Generates a thunk from a native function to foreign conventions. void emitNativeToForeignThunk(SILDeclRef thunk); - + /// Generates a stub that launches a detached task for running the NativeToForeignThunk of an + /// async native method. + /// + /// Returns the SILFunction created for the closure implementation function that is enqueued on the + /// new task. + SILFunction *emitNativeAsyncToForeignThunk(SILDeclRef thunk); + /// Generate a nullary function that returns the given value. /// If \p emitProfilerIncrement is set, emit a profiler increment for /// \p value. @@ -825,6 +840,27 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction void mergeCleanupBlocks(); + //===--------------------------------------------------------------------===// + // Concurrency + //===--------------------------------------------------------------------===// + + /// Generates code into the given SGF that obtains the callee function's + /// executor, if the function is actor-isolated. + /// @returns a SILValue representing the executor, if an executor exists. + static Optional EmitLoadActorExecutorForCallee( + SILGenFunction *SGF, + ValueDecl *calleeVD, + ArrayRef args); + + /// Generates code to obtain the executor given the actor's decl. + /// @returns a SILValue representing the executor. + SILValue emitLoadActorExecutor(VarDecl *actorDecl); + + /// Generates the code to obtain the executor for the shared instance + /// of the \p globalActor based on the type. + /// @returns a SILValue representing the executor. + SILValue emitLoadGlobalActorExecutor(Type globalActor); + //===--------------------------------------------------------------------===// // Memory management //===--------------------------------------------------------------------===// @@ -843,10 +879,6 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction Type resultType, DeclContext *DC, bool throws, SourceLoc throwsLoc); - /// Initializes 'actor' with the loaded shared instance of the \p globalActor - /// type. - void loadGlobalActor(Type globalActor); - /// Create SILArguments in the entry block that bind a single value /// of the given parameter suitably for being forwarded. void bindParameterForForwarding(ParamDecl *param, @@ -1257,7 +1289,8 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction SGFAccessKind accessKind); // FIXME: demote this to private state. - ManagedValue maybeEmitValueOfLocalVarDecl(VarDecl *var); + ManagedValue maybeEmitValueOfLocalVarDecl( + VarDecl *var, AccessKind accessKind); /// Produce an RValue for a reference to the specified declaration, /// with the given type and in response to the specified expression. Try to @@ -1321,6 +1354,14 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction ArgumentSource &&value, bool isOnSelfParameter); + ManagedValue emitRunChildTask( + SILLocation loc, Type functionType, ManagedValue taskFunction); + + ManagedValue emitCancelAsyncTask(SILLocation loc, SILValue task); + + void completeAsyncLetChildTask( + PatternBindingDecl *patternBinding, unsigned index); + bool maybeEmitMaterializeForSetThunk(ProtocolConformanceRef conformance, SILLinkage linkage, Type selfInterfaceType, Type selfType, @@ -1466,6 +1507,12 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction LValue &&src, LValue &&dest); void emitCopyLValueInto(SILLocation loc, LValue &&src, Initialization *dest); + + /// Emit an assignment to the variables in the destination pattern, given + /// an rvalue source that has the same type as the pattern. + void emitAssignToPatternVars( + SILLocation loc, Pattern *destPattern, RValue &&src); + ManagedValue emitAddressOfLValue(SILLocation loc, LValue &&src, TSanKind tsanKind = TSanKind::None); ManagedValue emitBorrowedLValue(SILLocation loc, LValue &&src, @@ -1520,7 +1567,8 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction SILLocation loc, ManagedValue fn, SubstitutionMap subs, ArrayRef args, const CalleeTypeInfo &calleeTypeInfo, ApplyOptions options, - SGFContext evalContext); + SGFContext evalContext, + Optional implicitlyAsyncApply); RValue emitApplyOfDefaultArgGenerator(SILLocation loc, ConcreteDeclRef defaultArgsOwner, @@ -2026,6 +2074,9 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction CanType concreteFormalType, ExistentialRepresentation repr); + /// Enter a cleanup to cancel the given task. + CleanupHandle enterCancelAsyncTaskCleanup(SILValue task); + /// Evaluate an Expr as an lvalue. LValue emitLValue(Expr *E, SGFAccessKind accessKind, LValueOptions options = LValueOptions()); diff --git a/lib/SILGen/SILGenLValue.cpp b/lib/SILGen/SILGenLValue.cpp index fb2e6f0952347..8d7421aa6cd68 100644 --- a/lib/SILGen/SILGenLValue.cpp +++ b/lib/SILGen/SILGenLValue.cpp @@ -1446,7 +1446,8 @@ namespace { // Get the address of the storage property. ManagedValue proj; if (!BaseFormalType) { - proj = SGF.maybeEmitValueOfLocalVarDecl(backingVar); + proj = SGF.maybeEmitValueOfLocalVarDecl( + backingVar, AccessKind::Write); } else if (BaseFormalType->mayHaveSuperclass()) { RefElementComponent REC(backingVar, LValueOptions(), varStorageType, typeData); @@ -2666,6 +2667,24 @@ static LValue emitLValueForNonMemberVarDecl(SILGenFunction &SGF, return lv; } +/// Map a SILGen access kind back to an AST access kind. +static AccessKind mapAccessKind(SGFAccessKind accessKind) { + switch (accessKind) { + case SGFAccessKind::IgnoredRead: + case SGFAccessKind::BorrowedAddressRead: + case SGFAccessKind::BorrowedObjectRead: + case SGFAccessKind::OwnedAddressRead: + case SGFAccessKind::OwnedObjectRead: + return AccessKind::Read; + + case SGFAccessKind::Write: + return AccessKind::Write; + + case SGFAccessKind::ReadWrite: + return AccessKind::ReadWrite; + } +} + void LValue::addNonMemberVarComponent(SILGenFunction &SGF, SILLocation loc, VarDecl *var, SubstitutionMap subs, @@ -2730,7 +2749,8 @@ void LValue::addNonMemberVarComponent(SILGenFunction &SGF, SILLocation loc, // address. // Check for a local (possibly captured) variable. - auto address = SGF.maybeEmitValueOfLocalVarDecl(Storage); + auto astAccessKind = mapAccessKind(this->AccessKind); + auto address = SGF.maybeEmitValueOfLocalVarDecl(Storage, astAccessKind); // The only other case that should get here is a global variable. if (!address) { @@ -2765,10 +2785,20 @@ void LValue::addNonMemberVarComponent(SILGenFunction &SGF, SILLocation loc, } ManagedValue -SILGenFunction::maybeEmitValueOfLocalVarDecl(VarDecl *var) { +SILGenFunction::maybeEmitValueOfLocalVarDecl( + VarDecl *var, AccessKind accessKind) { // For local decls, use the address we allocated or the value if we have it. auto It = VarLocs.find(var); if (It != VarLocs.end()) { + // If the variable is part of an async let, ensure that the child task + // has completed first. + if (var->isAsyncLet() && accessKind != AccessKind::Write) { + auto patternBinding = var->getParentPatternBinding(); + unsigned index = patternBinding->getPatternEntryIndexForVarDecl(var); + completeAsyncLetChildTask(patternBinding, index); + } + + // If this has an address, return it. By-value let's have no address. SILValue ptr = It->second.value; if (ptr->getType().isAddress()) @@ -2791,7 +2821,8 @@ SILGenFunction::emitAddressOfLocalVarDecl(SILLocation loc, VarDecl *var, SGFAccessKind accessKind) { assert(var->getDeclContext()->isLocalContext()); assert(var->getImplInfo().isSimpleStored()); - auto address = maybeEmitValueOfLocalVarDecl(var); + AccessKind astAccessKind = mapAccessKind(accessKind); + auto address = maybeEmitValueOfLocalVarDecl(var, astAccessKind); assert(address); assert(address.isLValue()); return address; @@ -2806,7 +2837,7 @@ RValue SILGenFunction::emitRValueForNonMemberVarDecl(SILLocation loc, FormalEvaluationScope scope(*this); auto *var = cast(declRef.getDecl()); - auto localValue = maybeEmitValueOfLocalVarDecl(var); + auto localValue = maybeEmitValueOfLocalVarDecl(var, AccessKind::Read); // If this VarDecl is represented as an address, emit it as an lvalue, then // perform a load to get the rvalue. diff --git a/lib/SILGen/SILGenPoly.cpp b/lib/SILGen/SILGenPoly.cpp index 570297f95beaa..2273061c6e51e 100644 --- a/lib/SILGen/SILGenPoly.cpp +++ b/lib/SILGen/SILGenPoly.cpp @@ -4485,6 +4485,8 @@ SILGenFunction::emitVTableThunk(SILDeclRef base, // Concurrency //===----------------------------------------------------------------------===// +/// If the current function is associated with an actor, then this +/// function emits a hop_to_executor to that actor's executor at loc. void SILGenFunction::emitHopToCurrentExecutor(SILLocation loc) { if (actor) B.createHopToExecutor(loc, actor); diff --git a/lib/SILGen/SILGenProlog.cpp b/lib/SILGen/SILGenProlog.cpp index 69ff8502e8f88..3def9debc36ce 100644 --- a/lib/SILGen/SILGenProlog.cpp +++ b/lib/SILGen/SILGenProlog.cpp @@ -456,7 +456,6 @@ void SILGenFunction::emitProlog(CaptureInfo captureInfo, // Initialize 'actor' if the function is an actor-isolated function or // closure. - if (auto *funcDecl = dyn_cast_or_null(FunctionDC->getAsDecl())) { auto actorIsolation = getActorIsolation(funcDecl); @@ -465,14 +464,16 @@ void SILGenFunction::emitProlog(CaptureInfo captureInfo, case ActorIsolation::Independent: case ActorIsolation::IndependentUnsafe: break; + case ActorIsolation::ActorInstance: { assert(selfParam && "no self parameter for ActorInstance isolation"); ManagedValue selfArg = ManagedValue::forUnmanaged(F.getSelfArgument()); actor = selfArg.borrow(*this, F.getLocation()).getValue(); break; } + case ActorIsolation::GlobalActor: - loadGlobalActor(actorIsolation.getGlobalActor()); + actor = emitLoadGlobalActorExecutor(actorIsolation.getGlobalActor()); break; } } else if (auto *closureExpr = dyn_cast(FunctionDC)) { @@ -480,23 +481,28 @@ void SILGenFunction::emitProlog(CaptureInfo captureInfo, switch (actorIsolation.getKind()) { case ClosureActorIsolation::Independent: break; - case ClosureActorIsolation::ActorInstance: { - VarDecl *actorDecl = actorIsolation.getActorInstance(); - RValue actorInstanceRV = emitRValueForDecl(F.getLocation(), - actorDecl, actorDecl->getType(), AccessSemantics::Ordinary); - ManagedValue actorInstance = std::move(actorInstanceRV).getScalarValue(); - actor = actorInstance.borrow(*this, F.getLocation()).getValue(); + + case ClosureActorIsolation::ActorInstance: + actor = emitLoadActorExecutor(actorIsolation.getActorInstance()); break; - } + case ClosureActorIsolation::GlobalActor: - loadGlobalActor(actorIsolation.getGlobalActor()); + actor = emitLoadGlobalActorExecutor(actorIsolation.getGlobalActor()); break; } } + emitHopToCurrentExecutor(F.getLocation()); } -void SILGenFunction::loadGlobalActor(Type globalActor) { +SILValue SILGenFunction::emitLoadActorExecutor(VarDecl *actorDecl) { + RValue actorInstanceRV = emitRValueForDecl(F.getLocation(), + actorDecl, actorDecl->getType(), AccessSemantics::Ordinary); + ManagedValue actorInstance = std::move(actorInstanceRV).getScalarValue(); + return actorInstance.borrow(*this, F.getLocation()).getValue(); +} + +SILValue SILGenFunction::emitLoadGlobalActorExecutor(Type globalActor) { assert(F.isAsync()); CanType actorType = CanType(globalActor); NominalTypeDecl *nominal = actorType->getNominalOrBoundGenericNominal(); @@ -517,7 +523,7 @@ void SILGenFunction::loadGlobalActor(Type globalActor) { actorType, /*isSuper*/ false, sharedInstanceDecl, PreparedArguments(), subs, AccessSemantics::Ordinary, instanceType, SGFContext()); ManagedValue actorInstance = std::move(actorInstanceRV).getScalarValue(); - actor = actorInstance.borrow(*this, loc).getValue(); + return actorInstance.borrow(*this, loc).getValue(); } static void emitIndirectResultParameters(SILGenFunction &SGF, Type resultType, diff --git a/lib/SILGen/SILGenThunk.cpp b/lib/SILGen/SILGenThunk.cpp index dbd2ba3674cdc..1691387a6c8a9 100644 --- a/lib/SILGen/SILGenThunk.cpp +++ b/lib/SILGen/SILGenThunk.cpp @@ -33,6 +33,8 @@ #include "swift/SIL/SILArgument.h" #include "swift/SIL/TypeLowering.h" +#include "clang/AST/ASTContext.h" + using namespace swift; using namespace Lowering; @@ -139,6 +141,46 @@ SILGenFunction::emitGlobalFunctionRef(SILLocation loc, SILDeclRef constant, return B.createFunctionRefFor(loc, f); } +static const clang::Type *prependParameterType( + ASTContext &ctx, + const clang::Type *oldBlockPtrTy, + const clang::Type *newParameterTy) { + if (!oldBlockPtrTy) + return nullptr; + + SmallVector newParamTypes; + newParamTypes.push_back(clang::QualType(newParameterTy, 0)); + clang::QualType returnType; + clang::FunctionProtoType::ExtProtoInfo newExtProtoInfo{}; + using ExtParameterInfo = clang::FunctionProtoType::ExtParameterInfo; + SmallVector newExtParamInfos; + + auto blockPtrTy = cast(oldBlockPtrTy); + auto blockPointeeTy = blockPtrTy->getPointeeType().getTypePtr(); + if (auto fnNoProtoTy = dyn_cast(blockPointeeTy)) { + returnType = fnNoProtoTy->getReturnType(); + newExtProtoInfo.ExtInfo = fnNoProtoTy->getExtInfo(); + } else { + auto fnProtoTy = cast(blockPointeeTy); + llvm::copy(fnProtoTy->getParamTypes(), std::back_inserter(newParamTypes)); + returnType = fnProtoTy->getReturnType(); + newExtProtoInfo = fnProtoTy->getExtProtoInfo(); + auto extParamInfos = fnProtoTy->getExtParameterInfosOrNull(); + if (extParamInfos) { + auto oldExtParamInfos = + ArrayRef(extParamInfos, fnProtoTy->getNumParams()); + newExtParamInfos.push_back(clang::FunctionProtoType::ExtParameterInfo()); + llvm::copy(oldExtParamInfos, std::back_inserter(newExtParamInfos)); + newExtProtoInfo.ExtParameterInfos = newExtParamInfos.data(); + } + } + + auto &clangCtx = ctx.getClangModuleLoader()->getClangASTContext(); + auto newFnTy = + clangCtx.getFunctionType(returnType, newParamTypes, newExtProtoInfo); + return clangCtx.getPointerType(newFnTy).getTypePtr(); +} + SILFunction * SILGenModule::getOrCreateForeignAsyncCompletionHandlerImplFunction( CanSILFunctionType blockType, @@ -159,10 +201,17 @@ SILGenModule::getOrCreateForeignAsyncCompletionHandlerImplFunction( std::copy(blockType->getParameters().begin(), blockType->getParameters().end(), std::back_inserter(implArgs)); + + auto newClangTy = prependParameterType( + getASTContext(), + blockType->getClangTypeInfo().getType(), + getASTContext().getClangTypeForIRGen(blockStorageTy)); auto implTy = SILFunctionType::get(GenericSignature(), - blockType->getExtInfo() - .withRepresentation(SILFunctionTypeRepresentation::CFunctionPointer), + blockType->getExtInfo().intoBuilder() + .withRepresentation(SILFunctionTypeRepresentation::CFunctionPointer) + .withClangFunctionType(newClangTy) + .build(), SILCoroutineKind::None, ParameterConvention::Direct_Unowned, implArgs, {}, blockType->getResults(), diff --git a/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp b/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp index e0e995c4e071e..3a2e165d7f781 100644 --- a/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp +++ b/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp @@ -450,6 +450,9 @@ EscapeAnalysis::ConnectionGraph::getNode(SILValue V) { if (Node) { CGNode *targetNode = Node->getMergeTarget(); targetNode->mergeFlags(false /*isInterior*/, hasReferenceOnly); + // Update the node in Values2Nodes, so that next time we don't need to find + // the final merge target. + Node = targetNode; return targetNode; } if (isa(ptrBase)) { @@ -768,7 +771,8 @@ void EscapeAnalysis::ConnectionGraph::mergeAllScheduledNodes() { if (From->mappedValue) { // values previously mapped to 'From' but not transferred to 'To's // mappedValue must remain mapped to 'From'. Lookups on those values will - // find 'To' via the mergeTarget. Dropping a value's mapping is illegal + // find 'To' via the mergeTarget and will remap those values to 'To' + // on-the-fly for efficiency. Dropping a value's mapping is illegal // because it could cause a node to be recreated without the edges that // have already been discovered. if (!To->mappedValue) { diff --git a/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp b/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp index 0a1d5083ce5e6..cf6cef3eab45a 100644 --- a/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp +++ b/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp @@ -20,12 +20,15 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "sil-simplify" + #include "swift/SILOptimizer/Analysis/SimplifyInstruction.h" +#include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/InstructionUtils.h" #include "swift/SIL/PatternMatch.h" #include "swift/SIL/SILVisitor.h" #include "swift/SILOptimizer/Analysis/ValueTracking.h" #include "swift/SILOptimizer/Utils/InstOptUtils.h" +#include "swift/SILOptimizer/Utils/OwnershipOptUtils.h" using namespace swift; using namespace swift::PatternMatch; @@ -298,6 +301,13 @@ SILValue InstSimplifier::visitAddressToPointerInst(AddressToPointerInst *ATPI) { SILValue InstSimplifier::visitPointerToAddressInst(PointerToAddressInst *PTAI) { // (pointer_to_address strict (address_to_pointer x)) -> x + // + // NOTE: We can not perform this optimization in OSSA without dealing with + // interior pointers since we may be escaping an interior pointer address from + // a borrow scope. + if (PTAI->getFunction()->hasOwnership()) + return SILValue(); + // If this address is not strict, then it cannot be replaced by an address // that may be strict. if (auto *ATPI = dyn_cast(PTAI->getOperand())) @@ -332,6 +342,9 @@ visitUnconditionalCheckedCastInst(UnconditionalCheckedCastInst *UCCI) { /// If the only use of a cast is a destroy, just destroy the cast operand. static SILValue simplifyDeadCast(SingleValueInstruction *Cast) { + if (!Cast->hasUsesOfAnyResult()) + return SILValue(); + for (Operand *op : Cast->getUses()) { switch (op->getUser()->getKind()) { case SILInstructionKind::DestroyValueInst: @@ -718,54 +731,67 @@ case BuiltinValueKind::id: return SILValue(); } -/// Try to simplify the specified instruction, performing local -/// analysis of the operands of the instruction, without looking at its uses -/// (e.g. constant folding). If a simpler result can be found, it is -/// returned, otherwise a null SILValue is returned. -/// -SILValue swift::simplifyInstruction(SILInstruction *I) { - return InstSimplifier().visit(I); -} - -/// Replace an instruction with a simplified result, including any debug uses, -/// and erase the instruction. If the instruction initiates a scope, do not -/// replace the end of its scope; it will be deleted along with its parent. -/// -/// This is a simple transform based on the above analysis. -/// -/// Return an iterator to the next (nondeleted) instruction. -SILBasicBlock::iterator swift::replaceAllSimplifiedUsesAndErase( - SILInstruction *I, SILValue result, - std::function eraseHandler) { +//===----------------------------------------------------------------------===// +// Top Level Entrypoints +//===----------------------------------------------------------------------===// - auto *SVI = cast(I); - assert(SVI != result && "Cannot RAUW a value with itself"); - SILBasicBlock::iterator nextii = std::next(I->getIterator()); +static SILBasicBlock::iterator +replaceAllUsesAndEraseInner(SingleValueInstruction *svi, SILValue newValue, + std::function eraseNotify) { + assert(svi != newValue && "Cannot RAUW a value with itself"); + SILBasicBlock::iterator nextii = std::next(svi->getIterator()); // Only SingleValueInstructions are currently simplified. - while (!SVI->use_empty()) { - Operand *use = *SVI->use_begin(); + while (!svi->use_empty()) { + Operand *use = *svi->use_begin(); SILInstruction *user = use->getUser(); // Erase the end of scope marker. if (isEndOfScopeMarker(user)) { if (&*nextii == user) ++nextii; - if (eraseHandler) - eraseHandler(user); + if (eraseNotify) + eraseNotify(user); else user->eraseFromParent(); continue; } - use->set(result); + use->set(newValue); } - if (eraseHandler) - eraseHandler(I); + if (eraseNotify) + eraseNotify(svi); else - I->eraseFromParent(); + svi->eraseFromParent(); return nextii; } +/// Replace an instruction with a simplified result, including any debug uses, +/// and erase the instruction. If the instruction initiates a scope, do not +/// replace the end of its scope; it will be deleted along with its parent. +/// +/// This is a simple transform based on the above analysis. +/// +/// We assume that when ownership is enabled that the IR is in valid OSSA form +/// before this is called. It will perform fixups as necessary to preserve OSSA. +/// +/// Return an iterator to the next (nondeleted) instruction. +SILBasicBlock::iterator swift::replaceAllSimplifiedUsesAndErase( + SILInstruction *i, SILValue result, + std::function eraseNotify, + std::function newInstNotify, + DeadEndBlocks *deadEndBlocks) { + auto *svi = cast(i); + assert(svi != result && "Cannot RAUW a value with itself"); + + if (svi->getFunction()->hasOwnership()) { + JointPostDominanceSetComputer computer(*deadEndBlocks); + OwnershipFixupContext ctx{eraseNotify, newInstNotify, *deadEndBlocks, + computer}; + return ctx.replaceAllUsesAndEraseFixingOwnership(svi, result); + } + return replaceAllUsesAndEraseInner(svi, result, eraseNotify); +} + /// Simplify invocations of builtin operations that may overflow. /// All such operations return a tuple (result, overflow_flag). /// This function try to simplify such operations, but returns only a @@ -777,3 +803,27 @@ SILBasicBlock::iterator swift::replaceAllSimplifiedUsesAndErase( SILValue swift::simplifyOverflowBuiltinInstruction(BuiltinInst *BI) { return InstSimplifier().simplifyOverflowBuiltin(BI); } + +/// Try to simplify the specified instruction, performing local +/// analysis of the operands of the instruction, without looking at its uses +/// (e.g. constant folding). If a simpler result can be found, it is +/// returned, otherwise a null SILValue is returned. +/// +/// NOTE: We assume that the insertion point associated with the SILValue must +/// dominate \p i. +SILValue swift::simplifyInstruction(SILInstruction *i) { + SILValue result = InstSimplifier().visit(i); + if (!result) + return SILValue(); + + // If we have a result, we know that we must have a single value instruction + // by assumption since we have not implemented support in the rest of inst + // simplify for non-single value instructions. We put the cast here so that + // this code is not updated at this point in time. + auto *svi = cast(i); + if (svi->getFunction()->hasOwnership()) + if (!OwnershipFixupContext::canFixUpOwnershipForRAUW(svi, result)) + return SILValue(); + + return result; +} diff --git a/lib/SILOptimizer/Differentiation/Common.cpp b/lib/SILOptimizer/Differentiation/Common.cpp index 20cdd11a781fe..5dcd5f77edd35 100644 --- a/lib/SILOptimizer/Differentiation/Common.cpp +++ b/lib/SILOptimizer/Differentiation/Common.cpp @@ -329,14 +329,16 @@ VarDecl *getTangentStoredProperty(ADContext &context, VarDecl *originalField, } VarDecl *getTangentStoredProperty(ADContext &context, - FieldIndexCacheBase *projectionInst, + SingleValueInstruction *projectionInst, CanType baseType, DifferentiationInvoker invoker) { assert(isa(projectionInst) || isa(projectionInst) || isa(projectionInst)); + Projection proj(projectionInst); auto loc = getValidLocation(projectionInst); - return getTangentStoredProperty(context, projectionInst->getField(), baseType, + auto *field = proj.getVarDecl(projectionInst->getOperand(0)->getType()); + return getTangentStoredProperty(context, field, baseType, loc, invoker); } @@ -399,6 +401,35 @@ void emitZeroIntoBuffer(SILBuilder &builder, CanType type, builder.emitDestroyValueOperation(loc, getter); } +SILValue emitMemoryLayoutSize( + SILBuilder &builder, SILLocation loc, CanType type) { + auto &ctx = builder.getASTContext(); + auto id = ctx.getIdentifier(getBuiltinName(BuiltinValueKind::Sizeof)); + auto *builtin = cast(getBuiltinValueDecl(ctx, id)); + auto metatypeTy = SILType::getPrimitiveObjectType( + CanMetatypeType::get(type, MetatypeRepresentation::Thin)); + auto metatypeVal = builder.createMetatype(loc, metatypeTy); + return builder.createBuiltin( + loc, id, SILType::getBuiltinWordType(ctx), + SubstitutionMap::get( + builtin->getGenericSignature(), ArrayRef{type}, {}), + {metatypeVal}); +} + +SILValue emitProjectTopLevelSubcontext( + SILBuilder &builder, SILLocation loc, SILValue context, + SILType subcontextType) { + assert(context.getOwnershipKind() == OwnershipKind::Guaranteed); + auto &ctx = builder.getASTContext(); + auto id = ctx.getIdentifier( + getBuiltinName(BuiltinValueKind::AutoDiffProjectTopLevelSubcontext)); + assert(context->getType() == SILType::getNativeObjectType(ctx)); + auto *subcontextAddr = builder.createBuiltin( + loc, id, SILType::getRawPointerType(ctx), SubstitutionMap(), {context}); + return builder.createPointerToAddress( + loc, subcontextAddr, subcontextType.getAddressType(), /*isStrict*/ true); +} + //===----------------------------------------------------------------------===// // Utilities for looking up derivatives of functions //===----------------------------------------------------------------------===// diff --git a/lib/SILOptimizer/Differentiation/JVPCloner.cpp b/lib/SILOptimizer/Differentiation/JVPCloner.cpp index b6ee6518ba75a..74b73d47d92b9 100644 --- a/lib/SILOptimizer/Differentiation/JVPCloner.cpp +++ b/lib/SILOptimizer/Differentiation/JVPCloner.cpp @@ -26,11 +26,16 @@ #include "swift/SILOptimizer/Differentiation/PullbackCloner.h" #include "swift/SILOptimizer/Differentiation/Thunk.h" +#include "swift/SIL/LoopInfo.h" #include "swift/SIL/TypeSubstCloner.h" +#include "swift/SILOptimizer/Analysis/LoopAnalysis.h" #include "swift/SILOptimizer/PassManager/PrettyStackTrace.h" #include "swift/SILOptimizer/Utils/SILOptFunctionBuilder.h" #include "llvm/ADT/DenseMap.h" +using namespace swift; +using namespace autodiff; + namespace swift { namespace autodiff { @@ -57,6 +62,9 @@ class JVPCloner::Implementation final /// Info from activity analysis on the original function. const DifferentiableActivityInfo &activityInfo; + /// The loop info. + SILLoopInfo *loopInfo; + /// The differential info. LinearMapInfo differentialInfo; @@ -380,6 +388,8 @@ class JVPCloner::Implementation final /// Run JVP generation. Returns true on error. bool run(); + SILFunction &getJVP() const { return *jvp; } + void postProcess(SILInstruction *orig, SILInstruction *cloned) { if (errorOccurred) return; @@ -509,11 +519,13 @@ class JVPCloner::Implementation final return; } } - auto borrowedDiffFunc = builder.emitBeginBorrowOperation(loc, origCallee); - jvpValue = builder.createDifferentiableFunctionExtract( - loc, NormalDifferentiableFunctionTypeComponent::JVP, - borrowedDiffFunc); - jvpValue = builder.emitCopyValueOperation(loc, jvpValue); + builder.emitScopedBorrowOperation( + loc, origCallee, [&](SILValue borrowedDiffFunc) { + jvpValue = builder.createDifferentiableFunctionExtract( + loc, NormalDifferentiableFunctionTypeComponent::JVP, + borrowedDiffFunc); + jvpValue = builder.emitCopyValueOperation(loc, jvpValue); + }); } // If JVP has not yet been found, emit an `differentiable_function` @@ -604,11 +616,13 @@ class JVPCloner::Implementation final // Record the `differentiable_function` instruction. context.getDifferentiableFunctionInstWorklist().push_back(diffFuncInst); - auto borrowedADFunc = builder.emitBeginBorrowOperation(loc, diffFuncInst); - auto extractedJVP = builder.createDifferentiableFunctionExtract( - loc, NormalDifferentiableFunctionTypeComponent::JVP, borrowedADFunc); - jvpValue = builder.emitCopyValueOperation(loc, extractedJVP); - builder.emitEndBorrowOperation(loc, borrowedADFunc); + builder.emitScopedBorrowOperation( + loc, diffFuncInst, [&](SILValue borrowedADFunc) { + auto extractedJVP = builder.createDifferentiableFunctionExtract( + loc, NormalDifferentiableFunctionTypeComponent::JVP, + borrowedADFunc); + jvpValue = builder.emitCopyValueOperation(loc, extractedJVP); + }); builder.emitDestroyValueOperation(loc, diffFuncInst); } @@ -1038,7 +1052,7 @@ class JVPCloner::Implementation final auto structType = remapSILTypeInDifferential(sei->getOperand()->getType()).getASTType(); auto *tanField = - getTangentStoredProperty(context, sei, structType, invoker); + getTangentStoredProperty(context, sei, structType, invoker); if (!tanField) { errorOccurred = true; return; @@ -1069,7 +1083,7 @@ class JVPCloner::Implementation final auto structType = remapSILTypeInDifferential(seai->getOperand()->getType()).getASTType(); auto *tanField = - getTangentStoredProperty(context, seai, structType, invoker); + getTangentStoredProperty(context, seai, structType, invoker); if (!tanField) { errorOccurred = true; return; @@ -1403,8 +1417,11 @@ JVPCloner::Implementation::Implementation(ADContext &context, invoker(invoker), activityInfo(getActivityInfo(context, original, witness->getSILAutoDiffIndices(), jvp)), + loopInfo(context.getPassManager().getAnalysis() + ->get(original)), differentialInfo(context, AutoDiffLinearMapKind::Differential, original, - jvp, witness->getSILAutoDiffIndices(), activityInfo), + jvp, witness->getSILAutoDiffIndices(), activityInfo, + loopInfo), differentialBuilder(SILBuilder( *createEmptyDifferential(context, witness, &differentialInfo))), diffLocalAllocBuilder(getDifferential()) { @@ -1727,7 +1744,16 @@ bool JVPCloner::Implementation::run() { return errorOccurred; } -bool JVPCloner::run() { return impl.run(); } - } // end namespace autodiff } // end namespace swift + +bool JVPCloner::run() { + bool foundError = impl.run(); +#ifndef NDEBUG + if (!foundError) + getJVP().verify(); +#endif + return foundError; +} + +SILFunction &JVPCloner::getJVP() const { return impl.getJVP(); } diff --git a/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp b/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp index 6de6cee60781e..a7c48fc386da1 100644 --- a/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp +++ b/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp @@ -23,7 +23,6 @@ #include "swift/AST/ParameterList.h" #include "swift/AST/SourceFile.h" #include "swift/SIL/LoopInfo.h" -#include "swift/SILOptimizer/Analysis/LoopAnalysis.h" namespace swift { namespace autodiff { @@ -56,9 +55,10 @@ static GenericParamList *cloneGenericParameters(ASTContext &ctx, LinearMapInfo::LinearMapInfo(ADContext &context, AutoDiffLinearMapKind kind, SILFunction *original, SILFunction *derivative, SILAutoDiffIndices indices, - const DifferentiableActivityInfo &activityInfo) + const DifferentiableActivityInfo &activityInfo, + SILLoopInfo *loopInfo) : kind(kind), original(original), derivative(derivative), - activityInfo(activityInfo), indices(indices), + activityInfo(activityInfo), loopInfo(loopInfo), indices(indices), synthesizedFile(context.getOrCreateSynthesizedFile(original)), typeConverter(context.getTypeConverter()) { generateDifferentiationDataStructures(context, derivative); @@ -146,21 +146,30 @@ LinearMapInfo::createBranchingTraceDecl(SILBasicBlock *originalBB, file.addTopLevelDecl(branchingTraceDecl); // Add basic block enum cases. for (auto *predBB : originalBB->getPredecessorBlocks()) { - auto bbId = "bb" + std::to_string(predBB->getDebugID()); - auto *linearMapStruct = getLinearMapStruct(predBB); - assert(linearMapStruct); - auto linearMapStructTy = - linearMapStruct->getDeclaredInterfaceType()->getCanonicalType(); // Create dummy declaration representing enum case parameter. auto *decl = new (astCtx) ParamDecl(loc, loc, Identifier(), loc, Identifier(), moduleDecl); decl->setSpecifier(ParamDecl::Specifier::Default); - if (linearMapStructTy->hasArchetype()) - decl->setInterfaceType(linearMapStructTy->mapTypeOutOfContext()); - else - decl->setInterfaceType(linearMapStructTy); + // If predecessor block is in a loop, its linear map struct will be + // indirectly referenced in memory owned by the context object. The payload + // is just a raw pointer. + if (loopInfo->getLoopFor(predBB)) { + blocksInLoop.insert(predBB); + decl->setInterfaceType(astCtx.TheRawPointerType); + } + // Otherwise the payload is the linear map struct. + else { + auto *linearMapStruct = getLinearMapStruct(predBB); + assert(linearMapStruct); + auto linearMapStructTy = + linearMapStruct->getDeclaredInterfaceType()->getCanonicalType(); + decl->setInterfaceType( + linearMapStructTy->hasArchetype() + ? linearMapStructTy->mapTypeOutOfContext() : linearMapStructTy); + } // Create enum element and enum case declarations. auto *paramList = ParameterList::create(astCtx, {decl}); + auto bbId = "bb" + std::to_string(predBB->getDebugID()); auto *enumEltDecl = new (astCtx) EnumElementDecl( /*IdentifierLoc*/ loc, DeclName(astCtx.getIdentifier(bbId)), paramList, loc, /*RawValueExpr*/ nullptr, branchingTraceDecl); @@ -173,10 +182,6 @@ LinearMapInfo::createBranchingTraceDecl(SILBasicBlock *originalBB, // Record enum element declaration. branchingTraceEnumCases.insert({{predBB, originalBB}, enumEltDecl}); } - // If original block is in a loop, mark branching trace enum as indirect. - if (loopInfo->getLoopFor(originalBB)) - branchingTraceDecl->getAttrs().add(new (astCtx) - IndirectAttr(/*Implicit*/ true)); return branchingTraceDecl; } @@ -359,9 +364,6 @@ void LinearMapInfo::addLinearMapToStruct(ADContext &context, ApplyInst *ai) { void LinearMapInfo::generateDifferentiationDataStructures( ADContext &context, SILFunction *derivativeFn) { auto &astCtx = original->getASTContext(); - auto *loopAnalysis = context.getPassManager().getAnalysis(); - auto *loopInfo = loopAnalysis->get(original); - // Get the derivative function generic signature. CanGenericSignature derivativeFnGenSig = nullptr; if (auto *derivativeFnGenEnv = derivativeFn->getGenericEnvironment()) diff --git a/lib/SILOptimizer/Differentiation/PullbackCloner.cpp b/lib/SILOptimizer/Differentiation/PullbackCloner.cpp index 2fcd7ea574f1a..63bfd4fcfca1b 100644 --- a/lib/SILOptimizer/Differentiation/PullbackCloner.cpp +++ b/lib/SILOptimizer/Differentiation/PullbackCloner.cpp @@ -86,9 +86,6 @@ class PullbackCloner::Implementation final /// adjoint buffers. llvm::DenseMap, SILValue> bufferMap; - /// Mapping from pullback basic blocks to pullback struct arguments. - llvm::DenseMap pullbackStructArguments; - /// Mapping from pullback struct field declarations to pullback struct /// elements destructured from the linear map basic block argument. In the /// beginning of each pullback basic block, the block's pullback struct is @@ -130,6 +127,9 @@ class PullbackCloner::Implementation final /// The seed arguments of the pullback function. SmallVector seeds; + /// The `AutoDiffLinearMapContext` object, if any. + SILValue contextValue = nullptr; + llvm::BumpPtrAllocator allocator; bool errorOccurred = false; @@ -138,7 +138,6 @@ class PullbackCloner::Implementation final SILModule &getModule() const { return getContext().getModule(); } ASTContext &getASTContext() const { return getPullback().getASTContext(); } SILFunction &getOriginal() const { return vjpCloner.getOriginal(); } - SILFunction &getPullback() const { return vjpCloner.getPullback(); } SILDifferentiabilityWitness *getWitness() const { return vjpCloner.getWitness(); } @@ -782,6 +781,10 @@ class PullbackCloner::Implementation final /// parameters. void emitZeroDerivativesForNonvariedResult(SILValue origNonvariedResult); + /// Public helper so that our users can get the underlying newly created + /// function. + SILFunction &getPullback() const { return vjpCloner.getPullback(); } + using TrampolineBlockSet = SmallPtrSet; /// Determines the pullback successor block for a given original block and one @@ -1740,7 +1743,14 @@ PullbackCloner::~PullbackCloner() { delete &impl; } // Entry point //--------------------------------------------------------------------------// -bool PullbackCloner::run() { return impl.run(); } +bool PullbackCloner::run() { + bool foundError = impl.run(); +#ifndef NDEBUG + if (!foundError) + impl.getPullback().verify(); +#endif + return foundError; +} bool PullbackCloner::Implementation::run() { PrettyStackTraceSILFunction trace("generating pullback for", &getOriginal()); @@ -1822,7 +1832,9 @@ bool PullbackCloner::Implementation::run() { } } // Diagnose unsupported stored property projections. - if (auto *inst = dyn_cast(v)) { + if (isa(v) || isa(v) || + isa(v)) { + auto *inst = cast(v); assert(inst->getNumOperands() == 1); auto baseType = remapType(inst->getOperand(0)->getType()).getASTType(); if (!getTangentStoredProperty(getContext(), inst, baseType, @@ -1895,11 +1907,28 @@ bool PullbackCloner::Implementation::run() { if (origBB == origExit) { assert(pullbackBB->isEntry()); createEntryArguments(&pullback); - auto *mainPullbackStruct = pullbackBB->getArguments().back(); - assert(mainPullbackStruct->getType() == pbStructLoweredType); - pullbackStructArguments[origBB] = mainPullbackStruct; - // Destructure the pullback struct to get the elements. builder.setInsertionPoint(pullbackBB); + // Obtain the context object, if any, and the top-level subcontext, i.e. + // the main pullback struct. + SILValue mainPullbackStruct; + if (getPullbackInfo().hasLoops()) { + // The last argument is the context object (`Builtin.NativeObject`). + contextValue = pullbackBB->getArguments().back(); + assert(contextValue->getType() == + SILType::getNativeObjectType(getASTContext())); + // Load the pullback struct. + auto subcontextAddr = emitProjectTopLevelSubcontext( + builder, pbLoc, contextValue, pbStructLoweredType); + mainPullbackStruct = builder.createLoad( + pbLoc, subcontextAddr, + pbStructLoweredType.isTrivial(getPullback()) ? + LoadOwnershipQualifier::Trivial : LoadOwnershipQualifier::Take); + } else { + // Obtain and destructure pullback struct elements. + mainPullbackStruct = pullbackBB->getArguments().back(); + assert(mainPullbackStruct->getType() == pbStructLoweredType); + } + auto *dsi = builder.createDestructureStruct(pbLoc, mainPullbackStruct); initializePullbackStructElements(origBB, dsi->getResults()); continue; @@ -1938,7 +1967,6 @@ bool PullbackCloner::Implementation::run() { // Add a pullback struct argument. auto *pbStructArg = pullbackBB->createPhiArgument(pbStructLoweredType, OwnershipKind::Owned); - pullbackStructArguments[origBB] = pbStructArg; // Destructure the pullback struct to get the elements. builder.setInsertionPoint(pullbackBB); auto *dsi = builder.createDestructureStruct(pbLoc, pbStructArg); @@ -1969,7 +1997,7 @@ bool PullbackCloner::Implementation::run() { auto *pullbackEntry = pullback.getEntryBlock(); // The pullback function has type: - // `(seed0, seed1, ..., exit_pb_struct) -> (d_arg0, ..., d_argn)`. + // `(seed0, seed1, ..., exit_pb_struct|context_obj) -> (d_arg0, ..., d_argn)`. auto pbParamArgs = pullback.getArgumentsWithoutIndirectResults(); assert(getIndices().results->getNumIndices() == pbParamArgs.size() - 1 && pbParamArgs.size() >= 2); @@ -2328,17 +2356,22 @@ SILBasicBlock *PullbackCloner::Implementation::buildPullbackSuccessor( } // Propagate pullback struct argument. SILBuilder pullbackTrampolineBBBuilder(pullbackTrampolineBB); - auto *predPBStructVal = pullbackTrampolineBB->getArguments().front(); - auto boxType = dyn_cast(predPBStructVal->getType().getASTType()); - if (!boxType) { - trampolineArguments.push_back(predPBStructVal); + auto *pullbackTrampolineBBArg = pullbackTrampolineBB->getArguments().front(); + if (vjpCloner.getLoopInfo()->getLoopFor(origPredBB)) { + assert(pullbackTrampolineBBArg->getType() == + SILType::getRawPointerType(getASTContext())); + auto pbStructType = + remapType(getPullbackInfo().getLinearMapStructLoweredType(origPredBB)); + auto predPbStructAddr = pullbackTrampolineBBBuilder.createPointerToAddress( + loc, pullbackTrampolineBBArg, pbStructType.getAddressType(), + /*isStrict*/ true); + auto predPbStructVal = pullbackTrampolineBBBuilder.createLoad( + loc, predPbStructAddr, + pbStructType.isTrivial(getPullback()) ? + LoadOwnershipQualifier::Trivial : LoadOwnershipQualifier::Take); + trampolineArguments.push_back(predPbStructVal); } else { - auto *projectBox = pullbackTrampolineBBBuilder.createProjectBox( - loc, predPBStructVal, /*index*/ 0); - auto loaded = pullbackTrampolineBBBuilder.emitLoadValueOperation( - loc, projectBox, LoadOwnershipQualifier::Copy); - pullbackTrampolineBBBuilder.emitDestroyValueOperation(loc, predPBStructVal); - trampolineArguments.push_back(loaded); + trampolineArguments.push_back(pullbackTrampolineBBArg); } // Branch from pullback trampoline block to pullback block. pullbackTrampolineBBBuilder.createBranch(loc, pullbackBB, @@ -2535,7 +2568,7 @@ bool PullbackCloner::Implementation::runForSemanticMemberGetter() { // Get getter argument and result values. // Getter type: $(Self) -> Result - // Pullback type: $(Result', PB_Struct) -> Self' + // Pullback type: $(Result', PB_Struct|Context) -> Self' assert(original.getLoweredFunctionType()->getNumParameters() == 1); assert(pullback.getLoweredFunctionType()->getNumParameters() == 2); assert(pullback.getLoweredFunctionType()->getNumResults() == 1); @@ -2547,8 +2580,10 @@ bool PullbackCloner::Implementation::runForSemanticMemberGetter() { "Getter should have one semantic result"); auto origResult = origFormalResults[*getIndices().results->begin()]; - auto tangentVectorSILTy = pullback.getConventions().getSingleSILResultType( - TypeExpansionContext::minimal()); + auto tangentVectorSILTy = pullback.getConventions().getResults().front() + .getSILStorageType(getModule(), + pullback.getLoweredFunctionType(), + TypeExpansionContext::minimal()); auto tangentVectorTy = tangentVectorSILTy.getASTType(); auto *tangentVectorDecl = tangentVectorTy->getStructOrBoundGenericStruct(); diff --git a/lib/SILOptimizer/Differentiation/VJPCloner.cpp b/lib/SILOptimizer/Differentiation/VJPCloner.cpp index 09d26e3b99baa..e84f799e50622 100644 --- a/lib/SILOptimizer/Differentiation/VJPCloner.cpp +++ b/lib/SILOptimizer/Differentiation/VJPCloner.cpp @@ -27,6 +27,7 @@ #include "swift/SIL/TerminatorUtils.h" #include "swift/SIL/TypeSubstCloner.h" +#include "swift/SILOptimizer/Analysis/LoopAnalysis.h" #include "swift/SILOptimizer/PassManager/PrettyStackTrace.h" #include "swift/SILOptimizer/Utils/CFGOptUtils.h" #include "swift/SILOptimizer/Utils/SILOptFunctionBuilder.h" @@ -64,6 +65,9 @@ class VJPCloner::Implementation final /// Info from activity analysis on the original function. const DifferentiableActivityInfo &activityInfo; + /// The loop info. + SILLoopInfo *loopInfo; + /// The linear map info. LinearMapInfo pullbackInfo; @@ -71,6 +75,16 @@ class VJPCloner::Implementation final /// predecessor enum argument). SmallPtrSet remappedBasicBlocks; + /// The `AutoDiffLinearMapContext` object. If null, no explicit context is + /// needed (no loops). + SILValue pullbackContextValue; + /// The unique, borrowed context object. This is valid until the exit block. + SILValue borrowedPullbackContextValue; + + /// The generic signature of the `Builtin.autoDiffAllocateSubcontext(_:_:)` + /// declaration. It is used for creating a builtin call. + GenericSignature builtinAutoDiffAllocateSubcontextGenericSignature; + bool errorOccurred = false; /// Mapping from original blocks to pullback values. Used to build pullback @@ -93,6 +107,31 @@ class VJPCloner::Implementation final /// Run VJP generation. Returns true on error. bool run(); + /// Initializes a context object if needed. + void emitLinearMapContextInitializationIfNeeded() { + if (!pullbackInfo.hasLoops()) + return; + // Get linear map struct size. + auto *returnBB = &*original->findReturnBB(); + auto pullbackStructType = + remapType(pullbackInfo.getLinearMapStructLoweredType(returnBB)); + Builder.setInsertionPoint(vjp->getEntryBlock()); + auto topLevelSubcontextSize = emitMemoryLayoutSize( + Builder, original->getLocation(), pullbackStructType.getASTType()); + // Create an context. + pullbackContextValue = Builder.createBuiltin( + original->getLocation(), + getASTContext().getIdentifier( + getBuiltinName(BuiltinValueKind::AutoDiffCreateLinearMapContext)), + SILType::getNativeObjectType(getASTContext()), + SubstitutionMap(), {topLevelSubcontextSize}); + borrowedPullbackContextValue = Builder.createBeginBorrow( + original->getLocation(), pullbackContextValue); + LLVM_DEBUG(getADDebugStream() + << "Context object initialized because there are loops\n" + << *vjp->getEntryBlock() << '\n'); + } + /// Get the lowered SIL type of the given AST type. SILType getLoweredType(Type type) { auto vjpGenSig = vjp->getLoweredFunctionType()->getSubstGenericSignature(); @@ -101,11 +140,17 @@ class VJPCloner::Implementation final return vjp->getLoweredType(pattern, type); } - /// Get the lowered SIL type of the given nominal type declaration. - SILType getNominalDeclLoweredType(NominalTypeDecl *nominal) { - auto nominalType = - getOpASTType(nominal->getDeclaredInterfaceType()->getCanonicalType()); - return getLoweredType(nominalType); + GenericSignature getBuiltinAutoDiffAllocateSubcontextDecl() { + if (builtinAutoDiffAllocateSubcontextGenericSignature) + return builtinAutoDiffAllocateSubcontextGenericSignature; + auto &ctx = getASTContext(); + auto *decl = cast(getBuiltinValueDecl( + ctx, ctx.getIdentifier( + getBuiltinName(BuiltinValueKind::AutoDiffAllocateSubcontext)))); + builtinAutoDiffAllocateSubcontextGenericSignature = + decl->getGenericSignature(); + assert(builtinAutoDiffAllocateSubcontextGenericSignature); + return builtinAutoDiffAllocateSubcontextGenericSignature; } // Creates a trampoline block for given original terminator instruction, the @@ -173,8 +218,6 @@ class VJPCloner::Implementation final void visitReturnInst(ReturnInst *ri) { auto loc = ri->getOperand().getLoc(); - auto &builder = getBuilder(); - // Build pullback struct value for original block. auto *origExit = ri->getParent(); auto *pbStructVal = buildPullbackValueStructValue(ri); @@ -183,17 +226,35 @@ class VJPCloner::Implementation final auto *origRetInst = cast(origExit->getTerminator()); auto origResult = getOpValue(origRetInst->getOperand()); SmallVector origResults; - extractAllElements(origResult, builder, origResults); + extractAllElements(origResult, Builder, origResults); // Get and partially apply the pullback. auto vjpGenericEnv = vjp->getGenericEnvironment(); auto vjpSubstMap = vjpGenericEnv ? vjpGenericEnv->getForwardingSubstitutionMap() : vjp->getForwardingSubstitutionMap(); - auto *pullbackRef = builder.createFunctionRef(loc, pullback); - auto *pullbackPartialApply = - builder.createPartialApply(loc, pullbackRef, vjpSubstMap, {pbStructVal}, - ParameterConvention::Direct_Guaranteed); + auto *pullbackRef = Builder.createFunctionRef(loc, pullback); + + // Prepare partial application arguments. + SILValue partialApplyArg; + if (borrowedPullbackContextValue) { + // Initialize the top-level subcontext buffer with the top-level pullback + // struct. + auto addr = emitProjectTopLevelSubcontext( + Builder, loc, borrowedPullbackContextValue, pbStructVal->getType()); + Builder.createStore( + loc, pbStructVal, addr, + pbStructVal->getType().isTrivial(*pullback) ? + StoreOwnershipQualifier::Trivial : StoreOwnershipQualifier::Init); + partialApplyArg = pullbackContextValue; + Builder.createEndBorrow(loc, borrowedPullbackContextValue); + } else { + partialApplyArg = pbStructVal; + } + + auto *pullbackPartialApply = Builder.createPartialApply( + loc, pullbackRef, vjpSubstMap, {partialApplyArg}, + ParameterConvention::Direct_Guaranteed); auto pullbackType = vjp->getLoweredFunctionType() ->getResults() .back() @@ -213,7 +274,7 @@ class VJPCloner::Implementation final } else if (pullbackSubstType->isABICompatibleWith(pullbackFnType, *vjp) .isCompatible()) { pullbackValue = - builder.createConvertFunction(loc, pullbackPartialApply, pullbackType, + Builder.createConvertFunction(loc, pullbackPartialApply, pullbackType, /*withoutActuallyEscaping*/ false); } else { llvm::report_fatal_error("Pullback value type is not ABI-compatible " @@ -224,8 +285,8 @@ class VJPCloner::Implementation final SmallVector directResults; directResults.append(origResults.begin(), origResults.end()); directResults.push_back(pullbackValue); - builder.createReturn(ri->getLoc(), - joinElements(directResults, builder, loc)); + Builder.createReturn(ri->getLoc(), + joinElements(directResults, Builder, loc)); } void visitBranchInst(BranchInst *bi) { @@ -427,18 +488,22 @@ class VJPCloner::Implementation final return; } } - auto origFnType = origCallee->getType().castTo(); - auto origFnUnsubstType = origFnType->getUnsubstitutedType(getModule()); - if (origFnType != origFnUnsubstType) { - origCallee = builder.createConvertFunction( - loc, origCallee, SILType::getPrimitiveObjectType(origFnUnsubstType), - /*withoutActuallyEscaping*/ false); - } - auto borrowedDiffFunc = builder.emitBeginBorrowOperation(loc, origCallee); - vjpValue = builder.createDifferentiableFunctionExtract( - loc, NormalDifferentiableFunctionTypeComponent::VJP, - borrowedDiffFunc); - vjpValue = builder.emitCopyValueOperation(loc, vjpValue); + builder.emitScopedBorrowOperation( + loc, origCallee, [&](SILValue borrowedDiffFunc) { + auto origFnType = origCallee->getType().castTo(); + auto origFnUnsubstType = + origFnType->getUnsubstitutedType(getModule()); + if (origFnType != origFnUnsubstType) { + borrowedDiffFunc = builder.createConvertFunction( + loc, borrowedDiffFunc, + SILType::getPrimitiveObjectType(origFnUnsubstType), + /*withoutActuallyEscaping*/ false); + } + vjpValue = builder.createDifferentiableFunctionExtract( + loc, NormalDifferentiableFunctionTypeComponent::VJP, + borrowedDiffFunc); + vjpValue = builder.emitCopyValueOperation(loc, vjpValue); + }); auto vjpFnType = vjpValue->getType().castTo(); auto vjpFnUnsubstType = vjpFnType->getUnsubstitutedType(getModule()); if (vjpFnType != vjpFnUnsubstType) { @@ -540,11 +605,14 @@ class VJPCloner::Implementation final // Record the `differentiable_function` instruction. context.getDifferentiableFunctionInstWorklist().push_back(diffFuncInst); - auto borrowedADFunc = builder.emitBeginBorrowOperation(loc, diffFuncInst); - auto extractedVJP = getBuilder().createDifferentiableFunctionExtract( - loc, NormalDifferentiableFunctionTypeComponent::VJP, borrowedADFunc); - vjpValue = builder.emitCopyValueOperation(loc, extractedVJP); - builder.emitEndBorrowOperation(loc, borrowedADFunc); + builder.emitScopedBorrowOperation( + loc, diffFuncInst, [&](SILValue borrowedADFunc) { + auto extractedVJP = + getBuilder().createDifferentiableFunctionExtract( + loc, NormalDifferentiableFunctionTypeComponent::VJP, + borrowedADFunc); + vjpValue = builder.emitCopyValueOperation(loc, extractedVJP); + }); builder.emitDestroyValueOperation(loc, diffFuncInst); } @@ -700,8 +768,10 @@ VJPCloner::Implementation::Implementation(VJPCloner &cloner, ADContext &context, vjp(vjp), invoker(invoker), activityInfo(getActivityInfoHelper( context, original, witness->getSILAutoDiffIndices(), vjp)), + loopInfo(context.getPassManager().getAnalysis() + ->get(original)), pullbackInfo(context, AutoDiffLinearMapKind::Pullback, original, vjp, - witness->getSILAutoDiffIndices(), activityInfo) { + witness->getSILAutoDiffIndices(), activityInfo, loopInfo) { // Create empty pullback function. pullback = createEmptyPullback(); context.recordGeneratedFunction(pullback); @@ -728,6 +798,7 @@ const SILAutoDiffIndices VJPCloner::getIndices() const { } DifferentiationInvoker VJPCloner::getInvoker() const { return impl.invoker; } LinearMapInfo &VJPCloner::getPullbackInfo() const { return impl.pullbackInfo; } +SILLoopInfo *VJPCloner::getLoopInfo() const { return impl.loopInfo; } const DifferentiableActivityInfo &VJPCloner::getActivityInfo() const { return impl.activityInfo; } @@ -864,13 +935,21 @@ SILFunction *VJPCloner::Implementation::createEmptyPullback() { pbParams.push_back(inoutParamTanParam); } - // Accept a pullback struct in the pullback parameter list. This is the - // returned pullback's closure context. - auto *origExit = &*original->findReturnBB(); - auto *pbStruct = pullbackInfo.getLinearMapStruct(origExit); - auto pbStructType = - pbStruct->getDeclaredInterfaceType()->getCanonicalType(witnessCanGenSig); - pbParams.push_back({pbStructType, ParameterConvention::Direct_Owned}); + if (pullbackInfo.hasLoops()) { + // Accept a `AutoDiffLinarMapContext` heap object if there are loops. + pbParams.push_back({ + getASTContext().TheNativeObjectType, + ParameterConvention::Direct_Guaranteed + }); + } else { + // Accept a pullback struct in the pullback parameter list. This is the + // returned pullback's closure context. + auto *origExit = &*original->findReturnBB(); + auto *pbStruct = pullbackInfo.getLinearMapStruct(origExit); + auto pbStructType = + pbStruct->getDeclaredInterfaceType()->getCanonicalType(witnessCanGenSig); + pbParams.push_back({pbStructType, ParameterConvention::Direct_Owned}); + } // Add pullback results for the requested wrt parameters. for (auto i : indices.parameters->getIndices()) { @@ -946,8 +1025,8 @@ VJPCloner::Implementation::buildPullbackValueStructValue(TermInst *termInst) { auto loc = RegularLocation::getAutoGeneratedLocation(); auto origBB = termInst->getParent(); auto *vjpBB = BBMap[origBB]; - auto *pbStruct = pullbackInfo.getLinearMapStruct(origBB); - auto structLoweredTy = getNominalDeclLoweredType(pbStruct); + auto structLoweredTy = + remapType(pullbackInfo.getLinearMapStructLoweredType(origBB)); auto bbPullbackValues = pullbackValues[origBB]; if (!origBB->isEntry()) { auto *predEnumArg = vjpBB->getArguments().back(); @@ -961,25 +1040,36 @@ EnumInst *VJPCloner::Implementation::buildPredecessorEnumValue( SILBuilder &builder, SILBasicBlock *predBB, SILBasicBlock *succBB, SILValue pbStructVal) { auto loc = RegularLocation::getAutoGeneratedLocation(); - auto *succEnum = pullbackInfo.getBranchingTraceDecl(succBB); - auto enumLoweredTy = getNominalDeclLoweredType(succEnum); + auto enumLoweredTy = + remapType(pullbackInfo.getBranchingTraceEnumLoweredType(succBB)); auto *enumEltDecl = pullbackInfo.lookUpBranchingTraceEnumElement(predBB, succBB); auto enumEltType = getOpType(enumLoweredTy.getEnumElementType( enumEltDecl, getModule(), TypeExpansionContext::minimal())); - // If the enum element type does not have a box type (i.e. the enum case is - // not indirect), then directly create an enum. - auto boxType = dyn_cast(enumEltType.getASTType()); - if (!boxType) - return builder.createEnum(loc, pbStructVal, enumEltDecl, enumLoweredTy); - // Otherwise, box the pullback struct value and create an enum. - auto *newBox = builder.createAllocBox(loc, boxType); - builder.emitScopedBorrowOperation(loc, newBox, [&](SILValue borrowedBox) { - auto *projectBox = builder.createProjectBox(loc, newBox, /*index*/ 0); - builder.emitStoreValueOperation(loc, pbStructVal, projectBox, - StoreOwnershipQualifier::Init); - }); - return builder.createEnum(loc, newBox, enumEltDecl, enumLoweredTy); + // If the predecessor block is in a loop, its predecessor enum payload is a + // `Builtin.RawPointer`. + if (loopInfo->getLoopFor(predBB)) { + auto rawPtrType = SILType::getRawPointerType(getASTContext()); + assert(enumEltType == rawPtrType); + auto pbStructType = pbStructVal->getType(); + SILValue pbStructSize = + emitMemoryLayoutSize(Builder, loc, pbStructType.getASTType()); + auto rawBufferValue = builder.createBuiltin( + loc, + getASTContext().getIdentifier( + getBuiltinName(BuiltinValueKind::AutoDiffAllocateSubcontext)), + rawPtrType, SubstitutionMap(), + {borrowedPullbackContextValue, pbStructSize}); + auto typedBufferValue = builder.createPointerToAddress( + loc, rawBufferValue, pbStructType.getAddressType(), + /*isStrict*/ true); + builder.createStore( + loc, pbStructVal, typedBufferValue, + pbStructType.isTrivial(*pullback) ? + StoreOwnershipQualifier::Trivial : StoreOwnershipQualifier::Init); + return builder.createEnum(loc, rawBufferValue, enumEltDecl, enumLoweredTy); + } + return builder.createEnum(loc, pbStructVal, enumEltDecl, enumLoweredTy); } bool VJPCloner::Implementation::run() { @@ -991,6 +1081,8 @@ bool VJPCloner::Implementation::run() { auto *entry = vjp->createBasicBlock(); createEntryArguments(vjp); + emitLinearMapContextInitializationIfNeeded(); + // Clone. SmallVector entryArgs(entry->getArguments().begin(), entry->getArguments().end()); @@ -1019,7 +1111,14 @@ bool VJPCloner::Implementation::run() { return errorOccurred; } -bool VJPCloner::run() { return impl.run(); } +bool VJPCloner::run() { + bool foundError = impl.run(); +#ifndef NDEBUG + if (!foundError) + getVJP().verify(); +#endif + return foundError; +} } // end namespace autodiff } // end namespace swift diff --git a/lib/SILOptimizer/IPO/UsePrespecialized.cpp b/lib/SILOptimizer/IPO/UsePrespecialized.cpp index b2f7bfde4e40f..bdf9d6210c821 100644 --- a/lib/SILOptimizer/IPO/UsePrespecialized.cpp +++ b/lib/SILOptimizer/IPO/UsePrespecialized.cpp @@ -105,9 +105,9 @@ bool UsePrespecialized::replaceByPrespecialized(SILFunction &F) { // Create a name of the specialization. All external pre-specializations // are serialized without bodies. Thus use IsNotSerialized here. Mangle::GenericSpecializationMangler NewGenericMangler(ReferencedF, - Subs, IsNotSerialized, - /*isReAbstracted*/ true); - std::string ClonedName = NewGenericMangler.mangle(); + IsNotSerialized); + std::string ClonedName = NewGenericMangler.mangleReabstracted(Subs, + ReInfo.needAlternativeMangling()); SILFunction *NewF = nullptr; // If we already have this specialization, reuse it. diff --git a/lib/SILOptimizer/Mandatory/MandatoryCombine.cpp b/lib/SILOptimizer/Mandatory/MandatoryCombine.cpp index 5d552c25a14a1..52615de975f8b 100644 --- a/lib/SILOptimizer/Mandatory/MandatoryCombine.cpp +++ b/lib/SILOptimizer/Mandatory/MandatoryCombine.cpp @@ -25,12 +25,15 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "sil-mandatory-combiner" + #include "swift/Basic/LLVM.h" #include "swift/Basic/STLExtras.h" +#include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/SILInstructionWorklist.h" #include "swift/SIL/SILVisitor.h" #include "swift/SILOptimizer/PassManager/Passes.h" #include "swift/SILOptimizer/PassManager/Transforms.h" +#include "swift/SILOptimizer/Utils/CanonicalizeInstruction.h" #include "swift/SILOptimizer/Utils/InstOptUtils.h" #include "swift/SILOptimizer/Utils/StackNesting.h" #include "llvm/ADT/STLExtras.h" @@ -53,6 +56,55 @@ static bool areAllValuesTrivial(Values values, SILFunction &function) { }); } +//===----------------------------------------------------------------------===// +// CanonicalizeInstruction subclass for use in Mandatory Combiner. +//===----------------------------------------------------------------------===// + +namespace { + +class MandatoryCombineCanonicalize final : CanonicalizeInstruction { +public: + using Worklist = SmallSILInstructionWorklist<256>; + +private: + Worklist &worklist; + bool changed = false; + +public: + MandatoryCombineCanonicalize(Worklist &worklist, DeadEndBlocks &deadEndBlocks) + : CanonicalizeInstruction(DEBUG_TYPE, deadEndBlocks), worklist(worklist) { + } + + void notifyNewInstruction(SILInstruction *inst) override { + worklist.add(inst); + worklist.addUsersOfAllResultsToWorklist(inst); + changed = true; + } + + // Just delete the given 'inst' and record its operands. The callback isn't + // allowed to mutate any other instructions. + void killInstruction(SILInstruction *inst) override { + worklist.eraseSingleInstFromFunction(*inst, + /*AddOperandsToWorklist*/ true); + changed = true; + } + + void notifyHasNewUsers(SILValue value) override { + if (worklist.size() < 10000) { + worklist.addUsersToWorklist(value); + } + changed = true; + } + + bool tryCanonicalize(SILInstruction *inst) { + changed = false; + canonicalize(inst); + return changed; + } +}; + +} // anonymous namespace + //===----------------------------------------------------------------------===// // MandatoryCombiner Interface //===----------------------------------------------------------------------===// @@ -80,9 +132,11 @@ class MandatoryCombiner final InstModCallbacks instModCallbacks; SmallVectorImpl &createdInstructions; SmallVector instructionsPendingDeletion; + DeadEndBlocks &deadEndBlocks; public: - MandatoryCombiner(SmallVectorImpl &createdInstructions) + MandatoryCombiner(SmallVectorImpl &createdInstructions, + DeadEndBlocks &deadEndBlocks) : worklist("MC"), madeChange(false), iteration(0), instModCallbacks( [&](SILInstruction *instruction) { @@ -93,7 +147,8 @@ class MandatoryCombiner final [this](SILValue oldValue, SILValue newValue) { worklist.replaceValueUsesWith(oldValue, newValue); }), - createdInstructions(createdInstructions){}; + createdInstructions(createdInstructions), + deadEndBlocks(deadEndBlocks){}; void addReachableCodeToWorklist(SILFunction &function); @@ -137,6 +192,13 @@ class MandatoryCombiner final // MandatoryCombiner Non-Visitor Utility Methods //===----------------------------------------------------------------------===// +static llvm::cl::opt EnableCanonicalizationAndTrivialDCE( + "sil-mandatory-combine-enable-canon-and-simple-dce", llvm::cl::Hidden, + llvm::cl::init(false), + llvm::cl::desc("An option for compiler developers that cause the Mandatory " + "Combiner to be more aggressive at eliminating trivially " + "dead code and canonicalizing SIL")); + void MandatoryCombiner::addReachableCodeToWorklist(SILFunction &function) { SmallVector blockWorklist; SmallPtrSet blockAlreadyAddedToWorklist; @@ -148,6 +210,8 @@ void MandatoryCombiner::addReachableCodeToWorklist(SILFunction &function) { blockAlreadyAddedToWorklist.insert(firstBlock); } + bool compilingWithOptimization = function.getEffectiveOptimizationMode() != + OptimizationMode::NoOptimization; while (!blockWorklist.empty()) { auto *block = blockWorklist.pop_back_val(); @@ -156,6 +220,12 @@ void MandatoryCombiner::addReachableCodeToWorklist(SILFunction &function) { ++iterator; if (isInstructionTriviallyDead(instruction)) { + if (EnableCanonicalizationAndTrivialDCE) { + if (compilingWithOptimization) { + instruction->replaceAllUsesOfAllResultsWithUndef(); + instruction->eraseFromParent(); + } + } continue; } @@ -177,6 +247,10 @@ bool MandatoryCombiner::doOneIteration(SILFunction &function, madeChange = false; addReachableCodeToWorklist(function); + MandatoryCombineCanonicalize mcCanonicialize(worklist, deadEndBlocks); + + bool compilingWithOptimization = function.getEffectiveOptimizationMode() != + OptimizationMode::NoOptimization; while (!worklist.isEmpty()) { auto *instruction = worklist.pop_back_val(); @@ -184,6 +258,21 @@ bool MandatoryCombiner::doOneIteration(SILFunction &function, continue; } + if (EnableCanonicalizationAndTrivialDCE) { + if (compilingWithOptimization) { + if (isInstructionTriviallyDead(instruction)) { + worklist.eraseInstFromFunction(*instruction); + madeChange = true; + continue; + } + } + + if (mcCanonicialize.tryCanonicalize(instruction)) { + madeChange = true; + continue; + } + } + #ifndef NDEBUG std::string instructionDescription; #endif @@ -306,7 +395,8 @@ class MandatoryCombine final : public SILFunctionTransform { return; } - MandatoryCombiner combiner(createdInstructions); + DeadEndBlocks deadEndBlocks(function); + MandatoryCombiner combiner(createdInstructions, deadEndBlocks); bool madeChange = combiner.runOnFunction(*function); if (madeChange) { diff --git a/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp b/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp index a21342b4a1bc6..9521833d28342 100644 --- a/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp +++ b/lib/SILOptimizer/Mandatory/OwnershipModelEliminator.cpp @@ -23,6 +23,8 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "sil-ownership-model-eliminator" + +#include "swift/Basic/BlotSetVector.h" #include "swift/SIL/Projection.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILFunction.h" @@ -45,153 +47,223 @@ DumpBefore("sil-dump-before-ome-to-path", llvm::cl::Hidden); namespace { +/// A high level SILInstruction visitor that lowers Ownership SSA from SIL. +/// +/// NOTE: Erasing instructions must always be done by the method +/// eraseInstruction /and/ any instructions that are created in one visit must +/// not be deleted in the same visit since after each visit, we empty the +/// tracking list into the instructionsToSimplify array. We do this in order to +/// ensure that when we use inst-simplify on these instructions, we have +/// consistent non-ossa vs ossa code rather than an intermediate state. struct OwnershipModelEliminatorVisitor : SILInstructionVisitor { - SILBuilder &B; - SILOpenedArchetypesTracker OpenedArchetypesTracker; + SmallVector trackingList; + SmallBlotSetVector instructionsToSimplify; + + /// Points at either a user passed in SILBuilderContext or points at + /// builderCtxStorage. + SILBuilderContext builderCtx; + + SILOpenedArchetypesTracker openedArchetypesTracker; - OwnershipModelEliminatorVisitor(SILBuilder &B) - : B(B), OpenedArchetypesTracker(&B.getFunction()) { - B.setOpenedArchetypesTracker(&OpenedArchetypesTracker); + /// Construct an OME visitor for eliminating ownership from \p fn. + OwnershipModelEliminatorVisitor(SILFunction &fn) + : trackingList(), instructionsToSimplify(), + builderCtx(fn.getModule(), &trackingList), + openedArchetypesTracker(&fn) { + builderCtx.setOpenedArchetypesTracker(&openedArchetypesTracker); } - void beforeVisit(SILInstruction *I) { - B.setInsertionPoint(I); - B.setCurrentDebugScope(I->getDebugScope()); + /// A "syntactic" high level function that combines our insertPt with a + /// builder ctx. + /// + /// Since this is syntactic and we assume that our caller is passing in a + /// lambda that if we inline will be eliminated, we mark this function always + /// inline. + template + ResultTy LLVM_ATTRIBUTE_ALWAYS_INLINE + withBuilder(SILInstruction *insertPt, + llvm::function_ref visitor) { + SILBuilderWithScope builder(insertPt, builderCtx); + return visitor(builder, insertPt->getLoc()); + } + + void drainTrackingList() { + // Called before we visit a new instruction and before we ever erase an + // instruction. This ensures that we can post-process instructions that need + // simplification in a purely non-ossa world instead of an indeterminate + // state mid elimination. + while (!trackingList.empty()) { + instructionsToSimplify.insert(trackingList.pop_back_val()); + } } - bool visitSILInstruction(SILInstruction *I) { return false; } - bool visitLoadInst(LoadInst *LI); - bool visitStoreInst(StoreInst *SI); - bool visitStoreBorrowInst(StoreBorrowInst *SI); - bool visitCopyValueInst(CopyValueInst *CVI); - bool visitDestroyValueInst(DestroyValueInst *DVI); - bool visitLoadBorrowInst(LoadBorrowInst *LBI); - bool visitBeginBorrowInst(BeginBorrowInst *BBI) { - BBI->replaceAllUsesWith(BBI->getOperand()); - BBI->eraseFromParent(); + void beforeVisit(SILInstruction *instToVisit) { + // Add any elements to the tracking list that we currently have in the + // tracking list that we haven't added yet. + drainTrackingList(); + } + + void eraseInstruction(SILInstruction *i) { + // Before we erase anything, drain the tracking list. + drainTrackingList(); + + // Make sure to blot our instruction. + instructionsToSimplify.erase(i); + i->eraseFromParent(); + } + + void eraseInstructionAndRAUW(SingleValueInstruction *i, SILValue newValue) { + // Make sure to blot our instruction. + i->replaceAllUsesWith(newValue); + eraseInstruction(i); + } + + bool visitSILInstruction(SILInstruction *) { return false; } + bool visitLoadInst(LoadInst *li); + bool visitStoreInst(StoreInst *si); + bool visitStoreBorrowInst(StoreBorrowInst *si); + bool visitCopyValueInst(CopyValueInst *cvi); + bool visitDestroyValueInst(DestroyValueInst *dvi); + bool visitLoadBorrowInst(LoadBorrowInst *lbi); + bool visitBeginBorrowInst(BeginBorrowInst *bbi) { + eraseInstructionAndRAUW(bbi, bbi->getOperand()); return true; } - bool visitEndBorrowInst(EndBorrowInst *EBI) { - EBI->eraseFromParent(); + bool visitEndBorrowInst(EndBorrowInst *ebi) { + eraseInstruction(ebi); return true; } - bool visitEndLifetimeInst(EndLifetimeInst *ELI) { - ELI->eraseFromParent(); + bool visitEndLifetimeInst(EndLifetimeInst *eli) { + eraseInstruction(eli); return true; } bool visitUncheckedOwnershipConversionInst( - UncheckedOwnershipConversionInst *UOCI) { - UOCI->replaceAllUsesWith(UOCI->getOperand()); - UOCI->eraseFromParent(); + UncheckedOwnershipConversionInst *uoci) { + eraseInstructionAndRAUW(uoci, uoci->getOperand()); return true; } - bool visitUnmanagedRetainValueInst(UnmanagedRetainValueInst *URVI); - bool visitUnmanagedReleaseValueInst(UnmanagedReleaseValueInst *URVI); - bool visitUnmanagedAutoreleaseValueInst(UnmanagedAutoreleaseValueInst *UAVI); - bool visitCheckedCastBranchInst(CheckedCastBranchInst *CBI); - bool visitSwitchEnumInst(SwitchEnumInst *SWI); - bool visitDestructureStructInst(DestructureStructInst *DSI); - bool visitDestructureTupleInst(DestructureTupleInst *DTI); + bool visitUnmanagedRetainValueInst(UnmanagedRetainValueInst *urvi); + bool visitUnmanagedReleaseValueInst(UnmanagedReleaseValueInst *urvi); + bool visitUnmanagedAutoreleaseValueInst(UnmanagedAutoreleaseValueInst *uavi); + bool visitCheckedCastBranchInst(CheckedCastBranchInst *cbi); + bool visitSwitchEnumInst(SwitchEnumInst *swi); + bool visitDestructureStructInst(DestructureStructInst *dsi); + bool visitDestructureTupleInst(DestructureTupleInst *dti); // We lower this to unchecked_bitwise_cast losing our assumption of layout // compatibility. - bool visitUncheckedValueCastInst(UncheckedValueCastInst *UVCI) { - auto *NewVal = B.createUncheckedBitwiseCast( - UVCI->getLoc(), UVCI->getOperand(), UVCI->getType()); - UVCI->replaceAllUsesWith(NewVal); - UVCI->eraseFromParent(); - return true; + bool visitUncheckedValueCastInst(UncheckedValueCastInst *uvci) { + return withBuilder(uvci, [&](SILBuilder &b, SILLocation loc) { + auto *newVal = b.createUncheckedBitwiseCast(loc, uvci->getOperand(), + uvci->getType()); + eraseInstructionAndRAUW(uvci, newVal); + return true; + }); } + + void splitDestructure(SILInstruction *destructure, + SILValue destructureOperand); }; } // end anonymous namespace -bool OwnershipModelEliminatorVisitor::visitLoadInst(LoadInst *LI) { - auto Qualifier = LI->getOwnershipQualifier(); +bool OwnershipModelEliminatorVisitor::visitLoadInst(LoadInst *li) { + auto qualifier = li->getOwnershipQualifier(); // If the qualifier is unqualified, there is nothing further to do // here. Just return. - if (Qualifier == LoadOwnershipQualifier::Unqualified) + if (qualifier == LoadOwnershipQualifier::Unqualified) return false; - SILValue Result = B.emitLoadValueOperation(LI->getLoc(), LI->getOperand(), - LI->getOwnershipQualifier()); + auto result = withBuilder(li, [&](SILBuilder &b, SILLocation loc) { + return b.emitLoadValueOperation(loc, li->getOperand(), + li->getOwnershipQualifier()); + }); // Then remove the qualified load and use the unqualified load as the def of // all of LI's uses. - LI->replaceAllUsesWith(Result); - LI->eraseFromParent(); + eraseInstructionAndRAUW(li, result); return true; } -bool OwnershipModelEliminatorVisitor::visitStoreInst(StoreInst *SI) { - auto Qualifier = SI->getOwnershipQualifier(); +bool OwnershipModelEliminatorVisitor::visitStoreInst(StoreInst *si) { + auto qualifier = si->getOwnershipQualifier(); // If the qualifier is unqualified, there is nothing further to do // here. Just return. - if (Qualifier == StoreOwnershipQualifier::Unqualified) + if (qualifier == StoreOwnershipQualifier::Unqualified) return false; - B.emitStoreValueOperation(SI->getLoc(), SI->getSrc(), SI->getDest(), - SI->getOwnershipQualifier()); + withBuilder(si, [&](SILBuilder &b, SILLocation loc) { + b.emitStoreValueOperation(loc, si->getSrc(), si->getDest(), + si->getOwnershipQualifier()); + }); // Then remove the qualified store. - SI->eraseFromParent(); + eraseInstruction(si); return true; } bool OwnershipModelEliminatorVisitor::visitStoreBorrowInst( - StoreBorrowInst *SI) { - B.emitStoreValueOperation(SI->getLoc(), SI->getSrc(), SI->getDest(), - StoreOwnershipQualifier::Init); + StoreBorrowInst *si) { + withBuilder(si, [&](SILBuilder &b, SILLocation loc) { + b.emitStoreValueOperation(loc, si->getSrc(), si->getDest(), + StoreOwnershipQualifier::Unqualified); + }); // Then remove the qualified store. - SI->eraseFromParent(); + eraseInstruction(si); return true; } -bool -OwnershipModelEliminatorVisitor::visitLoadBorrowInst(LoadBorrowInst *LBI) { +bool OwnershipModelEliminatorVisitor::visitLoadBorrowInst(LoadBorrowInst *lbi) { // Break down the load borrow into an unqualified load. - auto *UnqualifiedLoad = B.createLoad(LBI->getLoc(), LBI->getOperand(), - LoadOwnershipQualifier::Unqualified); + auto newLoad = + withBuilder(lbi, [&](SILBuilder &b, SILLocation loc) { + return b.createLoad(loc, lbi->getOperand(), + LoadOwnershipQualifier::Unqualified); + }); // Then remove the qualified load and use the unqualified load as the def of // all of LI's uses. - LBI->replaceAllUsesWith(UnqualifiedLoad); - LBI->eraseFromParent(); + eraseInstructionAndRAUW(lbi, newLoad); return true; } -bool OwnershipModelEliminatorVisitor::visitCopyValueInst(CopyValueInst *CVI) { +bool OwnershipModelEliminatorVisitor::visitCopyValueInst(CopyValueInst *cvi) { // A copy_value of an address-only type cannot be replaced. - if (CVI->getType().isAddressOnly(B.getFunction())) + if (cvi->getType().isAddressOnly(*cvi->getFunction())) return false; // Now that we have set the unqualified ownership flag, destroy value // operation will delegate to the appropriate strong_release, etc. - B.emitCopyValueOperation(CVI->getLoc(), CVI->getOperand()); - CVI->replaceAllUsesWith(CVI->getOperand()); - CVI->eraseFromParent(); + withBuilder(cvi, [&](SILBuilder &b, SILLocation loc) { + b.emitCopyValueOperation(loc, cvi->getOperand()); + }); + eraseInstructionAndRAUW(cvi, cvi->getOperand()); return true; } bool OwnershipModelEliminatorVisitor::visitUnmanagedRetainValueInst( - UnmanagedRetainValueInst *URVI) { + UnmanagedRetainValueInst *urvi) { // Now that we have set the unqualified ownership flag, destroy value // operation will delegate to the appropriate strong_release, etc. - B.emitCopyValueOperation(URVI->getLoc(), URVI->getOperand()); - URVI->eraseFromParent(); + withBuilder(urvi, [&](SILBuilder &b, SILLocation loc) { + b.emitCopyValueOperation(loc, urvi->getOperand()); + }); + eraseInstruction(urvi); return true; } bool OwnershipModelEliminatorVisitor::visitUnmanagedReleaseValueInst( - UnmanagedReleaseValueInst *URVI) { + UnmanagedReleaseValueInst *urvi) { // Now that we have set the unqualified ownership flag, destroy value // operation will delegate to the appropriate strong_release, etc. - B.emitDestroyValueOperation(URVI->getLoc(), URVI->getOperand()); - URVI->eraseFromParent(); + withBuilder(urvi, [&](SILBuilder &b, SILLocation loc) { + b.emitDestroyValueOperation(loc, urvi->getOperand()); + }); + eraseInstruction(urvi); return true; } @@ -199,26 +271,30 @@ bool OwnershipModelEliminatorVisitor::visitUnmanagedAutoreleaseValueInst( UnmanagedAutoreleaseValueInst *UAVI) { // Now that we have set the unqualified ownership flag, destroy value // operation will delegate to the appropriate strong_release, etc. - B.createAutoreleaseValue(UAVI->getLoc(), UAVI->getOperand(), - UAVI->getAtomicity()); - UAVI->eraseFromParent(); + withBuilder(UAVI, [&](SILBuilder &b, SILLocation loc) { + b.createAutoreleaseValue(loc, UAVI->getOperand(), UAVI->getAtomicity()); + }); + eraseInstruction(UAVI); return true; } -bool OwnershipModelEliminatorVisitor::visitDestroyValueInst(DestroyValueInst *DVI) { +bool OwnershipModelEliminatorVisitor::visitDestroyValueInst( + DestroyValueInst *dvi) { // A destroy_value of an address-only type cannot be replaced. - if (DVI->getOperand()->getType().isAddressOnly(B.getFunction())) + if (dvi->getOperand()->getType().isAddressOnly(*dvi->getFunction())) return false; // Now that we have set the unqualified ownership flag, destroy value // operation will delegate to the appropriate strong_release, etc. - B.emitDestroyValueOperation(DVI->getLoc(), DVI->getOperand()); - DVI->eraseFromParent(); + withBuilder(dvi, [&](SILBuilder &b, SILLocation loc) { + b.emitDestroyValueOperation(loc, dvi->getOperand()); + }); + eraseInstruction(dvi); return true; } bool OwnershipModelEliminatorVisitor::visitCheckedCastBranchInst( - CheckedCastBranchInst *CBI) { + CheckedCastBranchInst *cbi) { // In ownership qualified SIL, checked_cast_br must pass its argument to the // fail case so we can clean it up. In non-ownership qualified SIL, we expect // no argument from the checked_cast_br in the default case. The way that we @@ -227,16 +303,16 @@ bool OwnershipModelEliminatorVisitor::visitCheckedCastBranchInst( // 1. We replace all uses of the argument to the false block with a use of the // checked cast branch's operand. // 2. We delete the argument from the false block. - SILBasicBlock *FailureBlock = CBI->getFailureBB(); - if (FailureBlock->getNumArguments() == 0) + SILBasicBlock *failureBlock = cbi->getFailureBB(); + if (failureBlock->getNumArguments() == 0) return false; - FailureBlock->getArgument(0)->replaceAllUsesWith(CBI->getOperand()); - FailureBlock->eraseArgument(0); + failureBlock->getArgument(0)->replaceAllUsesWith(cbi->getOperand()); + failureBlock->eraseArgument(0); return true; } bool OwnershipModelEliminatorVisitor::visitSwitchEnumInst( - SwitchEnumInst *SWEI) { + SwitchEnumInst *swei) { // In ownership qualified SIL, switch_enum must pass its argument to the fail // case so we can clean it up. In non-ownership qualified SIL, we expect no // argument from the switch_enum in the default case. The way that we handle @@ -245,70 +321,69 @@ bool OwnershipModelEliminatorVisitor::visitSwitchEnumInst( // 1. We replace all uses of the argument to the false block with a use of the // checked cast branch's operand. // 2. We delete the argument from the false block. - if (!SWEI->hasDefault()) + if (!swei->hasDefault()) return false; - SILBasicBlock *DefaultBlock = SWEI->getDefaultBB(); - if (DefaultBlock->getNumArguments() == 0) + SILBasicBlock *defaultBlock = swei->getDefaultBB(); + if (defaultBlock->getNumArguments() == 0) return false; - DefaultBlock->getArgument(0)->replaceAllUsesWith(SWEI->getOperand()); - DefaultBlock->eraseArgument(0); + defaultBlock->getArgument(0)->replaceAllUsesWith(swei->getOperand()); + defaultBlock->eraseArgument(0); return true; } -static void splitDestructure(SILBuilder &B, SILInstruction *I, SILValue Op) { - assert((isa(I) || isa(I)) && +void OwnershipModelEliminatorVisitor::splitDestructure( + SILInstruction *destructureInst, SILValue destructureOperand) { + assert((isa(destructureInst) || + isa(destructureInst)) && "Only destructure operations can be passed to splitDestructure"); // First before we destructure anything, see if we can simplify any of our // instruction operands. - SILModule &M = I->getModule(); - SILLocation Loc = I->getLoc(); - SILType OpType = Op->getType(); + SILModule &M = destructureInst->getModule(); + SILType opType = destructureOperand->getType(); - llvm::SmallVector Projections; - Projection::getFirstLevelProjections(OpType, M, B.getTypeExpansionContext(), - Projections); - assert(Projections.size() == I->getNumResults()); + llvm::SmallVector projections; + Projection::getFirstLevelProjections( + opType, M, TypeExpansionContext(*destructureInst->getFunction()), + projections); + assert(projections.size() == destructureInst->getNumResults()); - auto Results = I->getResults(); - for (unsigned Index : indices(Results)) { - SILValue Result = Results[Index]; + auto destructureResults = destructureInst->getResults(); + for (unsigned index : indices(destructureResults)) { + SILValue result = destructureResults[index]; // If our result doesnt have any uses, do not emit instructions, just skip // it. - if (Result->use_empty()) + if (result->use_empty()) continue; // Otherwise, create a projection. - const auto &Proj = Projections[Index]; - SingleValueInstruction *ProjInst = - Proj.createObjectProjection(B, Loc, Op).get(); - - // If we can simplify, do so. - if (SILValue NewV = simplifyInstruction(ProjInst)) { - Result->replaceAllUsesWith(NewV); - ProjInst->eraseFromParent(); - continue; - } - - Result->replaceAllUsesWith(ProjInst); + const auto &proj = projections[index]; + auto *projInst = withBuilder( + destructureInst, [&](SILBuilder &b, SILLocation loc) { + return proj.createObjectProjection(b, loc, destructureOperand).get(); + }); + + // First RAUW Result with ProjInst. This ensures that we have a complete IR + // before we perform any simplifications. + result->replaceAllUsesWith(projInst); } // Now that all of its uses have been eliminated, erase the destructure. - I->eraseFromParent(); + eraseInstruction(destructureInst); } bool OwnershipModelEliminatorVisitor::visitDestructureStructInst( - DestructureStructInst *DSI) { - splitDestructure(B, DSI, DSI->getOperand()); + DestructureStructInst *dsi) { + splitDestructure(dsi, dsi->getOperand()); return true; } bool OwnershipModelEliminatorVisitor::visitDestructureTupleInst( - DestructureTupleInst *DTI) { - splitDestructure(B, DTI, DTI->getOperand()); + DestructureTupleInst *dti) { + splitDestructure(dti, dti->getOperand()); return true; } @@ -316,87 +391,111 @@ bool OwnershipModelEliminatorVisitor::visitDestructureTupleInst( // Top Level Entry Point //===----------------------------------------------------------------------===// -static bool stripOwnership(SILFunction &F) { +static bool stripOwnership(SILFunction &func) { // If F is an external declaration, do not process it. - if (F.isExternalDeclaration()) + if (func.isExternalDeclaration()) return false; // Set F to have unqualified ownership. - F.setOwnershipEliminated(); + func.setOwnershipEliminated(); - bool MadeChange = false; - SILBuilder B(F); - OwnershipModelEliminatorVisitor Visitor(B); + bool madeChange = false; + SmallVector createdInsts; + OwnershipModelEliminatorVisitor visitor(func); - for (auto &BB : F) { + for (auto &block : func) { // Change all arguments to have OwnershipKind::None. - for (auto *Arg : BB.getArguments()) { - Arg->setOwnershipKind(OwnershipKind::None); + for (auto *arg : block.getArguments()) { + arg->setOwnershipKind(OwnershipKind::None); } - for (auto II = BB.begin(), IE = BB.end(); II != IE;) { + for (auto ii = block.begin(), ie = block.end(); ii != ie;) { // Since we are going to be potentially removing instructions, we need // to make sure to increment our iterator before we perform any // visits. - SILInstruction *I = &*II; - ++II; + SILInstruction *inst = &*ii; + ++ii; - MadeChange |= Visitor.visit(I); + madeChange |= visitor.visit(inst); } } - return MadeChange; + + // Once we have finished processing all instructions, we should be + // consistently in non-ossa form meaning that it is now safe for us to invoke + // utilities that assume that they are in a consistent ossa or non-ossa form + // such as inst simplify. Now go through any instructions and simplify using + // inst simplify! + // + // DISCUSSION: We want our utilities to be able to assume if f.hasOwnership() + // is false then the utility is allowed to assume the function the utility is + // invoked within is in non-ossa form structurally (e.x.: non-ossa does not + // have arguments on the default result of checked_cast_br). + while (!visitor.instructionsToSimplify.empty()) { + auto value = visitor.instructionsToSimplify.pop_back_val(); + if (!value.hasValue()) + continue; + if (SILValue newValue = simplifyInstruction(*value)) { + replaceAllSimplifiedUsesAndErase(*value, newValue, + [&](SILInstruction *instToErase) { + visitor.eraseInstruction(instToErase); + }); + madeChange = true; + } + } + + return madeChange; } static void prepareNonTransparentSILFunctionForOptimization(ModuleDecl *, - SILFunction *F) { - if (!F->hasOwnership() || F->isTransparent()) + SILFunction *f) { + if (!f->hasOwnership() || f->isTransparent()) return; LLVM_DEBUG(llvm::dbgs() << "After deserialization, stripping ownership in:" - << F->getName() << "\n"); + << f->getName() << "\n"); - stripOwnership(*F); + stripOwnership(*f); } -static void prepareSILFunctionForOptimization(ModuleDecl *, SILFunction *F) { - if (!F->hasOwnership()) +static void prepareSILFunctionForOptimization(ModuleDecl *, SILFunction *f) { + if (!f->hasOwnership()) return; LLVM_DEBUG(llvm::dbgs() << "After deserialization, stripping ownership in:" - << F->getName() << "\n"); + << f->getName() << "\n"); - stripOwnership(*F); + stripOwnership(*f); } namespace { struct OwnershipModelEliminator : SILFunctionTransform { - bool SkipTransparent; - bool SkipStdlibModule; + bool skipTransparent; + bool skipStdlibModule; - OwnershipModelEliminator(bool SkipTransparent, bool SkipStdlibModule) - : SkipTransparent(SkipTransparent), SkipStdlibModule(SkipStdlibModule) {} + OwnershipModelEliminator(bool skipTransparent, bool skipStdlibModule) + : skipTransparent(skipTransparent), skipStdlibModule(skipStdlibModule) {} void run() override { if (DumpBefore.size()) { getFunction()->dump(DumpBefore.c_str()); } - auto *F = getFunction(); - auto &Mod = getFunction()->getModule(); + auto *f = getFunction(); + auto &mod = getFunction()->getModule(); // If we are supposed to skip the stdlib module and we are in the stdlib // module bail. - if (SkipStdlibModule && Mod.isStdlibModule()) { + if (skipStdlibModule && mod.isStdlibModule()) { return; } - if (!F->hasOwnership()) + if (!f->hasOwnership()) return; // If we were asked to not strip ownership from transparent functions in // /our/ module, return. - if (SkipTransparent && F->isTransparent()) + if (skipTransparent && f->isTransparent()) return; // Verify here to make sure ownership is correct before we strip. @@ -417,10 +516,10 @@ struct OwnershipModelEliminator : SILFunctionTransform { "Found verification error when verifying before lowering " "ownership. Please re-run with -sil-verify-all to identify the " "actual pass that introduced the verification error."); - F->verify(); + f->verify(); } - if (stripOwnership(*F)) { + if (stripOwnership(*f)) { auto InvalidKind = SILAnalysis::InvalidationKind::BranchesAndInstructions; invalidateAnalysis(InvalidKind); } @@ -431,18 +530,18 @@ struct OwnershipModelEliminator : SILFunctionTransform { using NotificationHandlerTy = FunctionBodyDeserializationNotificationHandler; std::unique_ptr ptr; - if (SkipTransparent) { - if (!Mod.hasRegisteredDeserializationNotificationHandlerForNonTransparentFuncOME()) { + if (skipTransparent) { + if (!mod.hasRegisteredDeserializationNotificationHandlerForNonTransparentFuncOME()) { ptr.reset(new NotificationHandlerTy( prepareNonTransparentSILFunctionForOptimization)); - Mod.registerDeserializationNotificationHandler(std::move(ptr)); - Mod.setRegisteredDeserializationNotificationHandlerForNonTransparentFuncOME(); + mod.registerDeserializationNotificationHandler(std::move(ptr)); + mod.setRegisteredDeserializationNotificationHandlerForNonTransparentFuncOME(); } } else { - if (!Mod.hasRegisteredDeserializationNotificationHandlerForAllFuncOME()) { + if (!mod.hasRegisteredDeserializationNotificationHandlerForAllFuncOME()) { ptr.reset(new NotificationHandlerTy(prepareSILFunctionForOptimization)); - Mod.registerDeserializationNotificationHandler(std::move(ptr)); - Mod.setRegisteredDeserializationNotificationHandlerForAllFuncOME(); + mod.registerDeserializationNotificationHandler(std::move(ptr)); + mod.setRegisteredDeserializationNotificationHandlerForAllFuncOME(); } } } diff --git a/lib/SILOptimizer/Mandatory/RawSILInstLowering.cpp b/lib/SILOptimizer/Mandatory/RawSILInstLowering.cpp index cbcbb6c5f5c0a..12c2e83ee226e 100644 --- a/lib/SILOptimizer/Mandatory/RawSILInstLowering.cpp +++ b/lib/SILOptimizer/Mandatory/RawSILInstLowering.cpp @@ -148,7 +148,6 @@ static void getAssignByWrapperArgsRecursively(SmallVectorImpl &args, case SILArgumentConvention::Indirect_Inout: case SILArgumentConvention::Indirect_InoutAliasable: case SILArgumentConvention::Indirect_Out: - case SILArgumentConvention::Direct_Deallocating: llvm_unreachable("wrong convention for setter/initializer src argument"); } args.push_back(src); diff --git a/lib/SILOptimizer/Mandatory/SILGenCleanup.cpp b/lib/SILOptimizer/Mandatory/SILGenCleanup.cpp index e13226fd4d6c5..b2259fd17d802 100644 --- a/lib/SILOptimizer/Mandatory/SILGenCleanup.cpp +++ b/lib/SILOptimizer/Mandatory/SILGenCleanup.cpp @@ -16,6 +16,7 @@ #define DEBUG_TYPE "silgen-cleanup" +#include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/SILInstruction.h" #include "swift/SILOptimizer/PassManager/Transforms.h" #include "swift/SILOptimizer/Utils/CanonicalizeInstruction.h" @@ -28,7 +29,8 @@ struct SILGenCanonicalize final : CanonicalizeInstruction { bool changed = false; llvm::SmallPtrSet deadOperands; - SILGenCanonicalize() : CanonicalizeInstruction(DEBUG_TYPE) {} + SILGenCanonicalize(DeadEndBlocks &deadEndBlocks) + : CanonicalizeInstruction(DEBUG_TYPE, deadEndBlocks) {} void notifyNewInstruction(SILInstruction *) override { changed = true; } @@ -86,7 +88,8 @@ void SILGenCleanup::run() { LLVM_DEBUG(llvm::dbgs() << "\nRunning SILGenCleanup on " << function.getName() << "\n"); - SILGenCanonicalize sgCanonicalize; + DeadEndBlocks deadEndBlocks(&function); + SILGenCanonicalize sgCanonicalize(deadEndBlocks); // Iterate over all blocks even if they aren't reachable. No phi-less // dataflow cycles should have been created yet, and these transformations diff --git a/lib/SILOptimizer/SILCombiner/SILCombine.cpp b/lib/SILOptimizer/SILCombiner/SILCombine.cpp index e2042b4435b21..5c2b12d2322d9 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombine.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombine.cpp @@ -112,9 +112,10 @@ class SILCombineCanonicalize final : CanonicalizeInstruction { bool changed = false; public: - SILCombineCanonicalize( - SmallSILInstructionWorklist<256> &Worklist) - : CanonicalizeInstruction(DEBUG_TYPE), Worklist(Worklist) {} + SILCombineCanonicalize(SmallSILInstructionWorklist<256> &Worklist, + DeadEndBlocks &deadEndBlocks) + : CanonicalizeInstruction(DEBUG_TYPE, deadEndBlocks), Worklist(Worklist) { + } void notifyNewInstruction(SILInstruction *inst) override { Worklist.add(inst); @@ -137,7 +138,7 @@ class SILCombineCanonicalize final : CanonicalizeInstruction { changed = true; } - bool tryCanonicalize(SILInstruction *inst) { + bool tryCanonicalize(SILInstruction *inst, DeadEndBlocks &deadEndBlocks) { changed = false; canonicalize(inst); return changed; @@ -153,7 +154,7 @@ bool SILCombiner::doOneIteration(SILFunction &F, unsigned Iteration) { // Add reachable instructions to our worklist. addReachableCodeToWorklist(&*F.begin()); - SILCombineCanonicalize scCanonicalize(Worklist); + SILCombineCanonicalize scCanonicalize(Worklist, deadEndBlocks); // Process until we run out of items in our worklist. while (!Worklist.isEmpty()) { @@ -177,7 +178,7 @@ bool SILCombiner::doOneIteration(SILFunction &F, unsigned Iteration) { } // Canonicalize the instruction. - if (scCanonicalize.tryCanonicalize(I)) { + if (scCanonicalize.tryCanonicalize(I, deadEndBlocks)) { MadeChange = true; continue; } diff --git a/lib/SILOptimizer/SILCombiner/SILCombiner.h b/lib/SILOptimizer/SILCombiner/SILCombiner.h index 4dd78cc66237d..4896ff08582a1 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombiner.h +++ b/lib/SILOptimizer/SILCombiner/SILCombiner.h @@ -21,6 +21,7 @@ #ifndef SWIFT_SILOPTIMIZER_PASSMANAGER_SILCOMBINER_H #define SWIFT_SILOPTIMIZER_PASSMANAGER_SILCOMBINER_H +#include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILInstruction.h" #include "swift/SIL/SILInstructionWorklist.h" @@ -59,6 +60,8 @@ class SILCombiner : /// Worklist containing all of the instructions primed for simplification. SmallSILInstructionWorklist<256> Worklist; + DeadEndBlocks deadEndBlocks; + /// Variable to track if the SILCombiner made any changes. bool MadeChange; @@ -83,19 +86,21 @@ class SILCombiner : AliasAnalysis *AA, DominanceAnalysis *DA, ProtocolConformanceAnalysis *PCA, ClassHierarchyAnalysis *CHA, bool removeCondFails) - : AA(AA), DA(DA), PCA(PCA), CHA(CHA), Worklist("SC"), MadeChange(false), + : AA(AA), DA(DA), PCA(PCA), CHA(CHA), Worklist("SC"), + deadEndBlocks(&B.getFunction()), MadeChange(false), RemoveCondFails(removeCondFails), Iteration(0), Builder(B), - CastOpt(FuncBuilder, nullptr /*SILBuilderContext*/, - /* ReplaceValueUsesAction */ - [&](SILValue Original, SILValue Replacement) { - replaceValueUsesWith(Original, Replacement); - }, - /* ReplaceInstUsesAction */ - [&](SingleValueInstruction *I, ValueBase *V) { - replaceInstUsesWith(*I, V); - }, - /* EraseAction */ - [&](SILInstruction *I) { eraseInstFromFunction(*I); }) {} + CastOpt( + FuncBuilder, nullptr /*SILBuilderContext*/, + /* ReplaceValueUsesAction */ + [&](SILValue Original, SILValue Replacement) { + replaceValueUsesWith(Original, Replacement); + }, + /* ReplaceInstUsesAction */ + [&](SingleValueInstruction *I, ValueBase *V) { + replaceInstUsesWith(*I, V); + }, + /* EraseAction */ + [&](SILInstruction *I) { eraseInstFromFunction(*I); }) {} bool runOnFunction(SILFunction &F); diff --git a/lib/SILOptimizer/SemanticARC/BorrowScopeOpts.cpp b/lib/SILOptimizer/SemanticARC/BorrowScopeOpts.cpp index 617751128e4ca..83ded3439f9fb 100644 --- a/lib/SILOptimizer/SemanticARC/BorrowScopeOpts.cpp +++ b/lib/SILOptimizer/SemanticARC/BorrowScopeOpts.cpp @@ -18,12 +18,17 @@ /// //===----------------------------------------------------------------------===// +#include "Context.h" #include "SemanticARCOptVisitor.h" using namespace swift; using namespace swift::semanticarc; bool SemanticARCOptVisitor::visitBeginBorrowInst(BeginBorrowInst *bbi) { + // Quickly check if we are supposed to perform this transformation. + if (!ctx.shouldPerform(ARCTransformKind::RedundantBorrowScopeElimPeephole)) + return false; + auto kind = bbi->getOperand().getOwnershipKind(); SmallVector endBorrows; for (auto *op : bbi->getUses()) { diff --git a/lib/SILOptimizer/SemanticARC/CMakeLists.txt b/lib/SILOptimizer/SemanticARC/CMakeLists.txt index 8f0310f1db49f..98c028162a060 100644 --- a/lib/SILOptimizer/SemanticARC/CMakeLists.txt +++ b/lib/SILOptimizer/SemanticARC/CMakeLists.txt @@ -7,4 +7,5 @@ target_sources(swiftSILOptimizer PRIVATE OwnedToGuaranteedPhiOpt.cpp Context.cpp SemanticARCOptVisitor.cpp + OwnershipConversionElimination.cpp ) diff --git a/lib/SILOptimizer/SemanticARC/Context.h b/lib/SILOptimizer/SemanticARC/Context.h index 0afd24175f2a9..bf0d74d3336b8 100644 --- a/lib/SILOptimizer/SemanticARC/Context.h +++ b/lib/SILOptimizer/SemanticARC/Context.h @@ -14,6 +14,7 @@ #define SWIFT_SILOPTIMIZER_SEMANTICARC_CONTEXT_H #include "OwnershipLiveRange.h" +#include "SemanticARCOpts.h" #include "swift/Basic/BlotSetVector.h" #include "swift/Basic/FrozenMultiMap.h" @@ -30,6 +31,7 @@ namespace semanticarc { struct LLVM_LIBRARY_VISIBILITY Context { SILFunction &fn; + ARCTransformKind transformKind = ARCTransformKind::All; Optional deadEndBlocks; ValueLifetimeAnalysis::Frontier lifetimeFrontier; SmallMultiMapCache addressToExhaustiveWriteListCache; @@ -62,7 +64,44 @@ struct LLVM_LIBRARY_VISIBILITY Context { /// our LiveRange can not see through joined live ranges, we know that we /// should only be able to have a single owned value introducer for each /// consumed operand. - FrozenMultiMap joinedOwnedIntroducerToConsumedOperands; + /// + /// NOTE: To work around potential invalidation of our consuming operands when + /// adding values to edges on the CFG, we store our Operands as a + /// SILBasicBlock and an operand number. We only add values to edges and never + /// remove/modify edges so the operand number should be safe. + struct ConsumingOperandState { + PointerUnion parent; + unsigned operandNumber; + + ConsumingOperandState() : parent(nullptr), operandNumber(UINT_MAX) {} + + ConsumingOperandState(Operand *op) + : parent(), operandNumber(op->getOperandNumber()) { + if (auto *ti = dyn_cast(op->getUser())) { + parent = ti->getParent(); + } else { + parent = op->getUser(); + } + } + + ConsumingOperandState(const ConsumingOperandState &other) : + parent(other.parent), operandNumber(other.operandNumber) {} + + ConsumingOperandState &operator=(const ConsumingOperandState &other) { + parent = other.parent; + operandNumber = other.operandNumber; + return *this; + } + + ~ConsumingOperandState() = default; + + operator bool() const { + return bool(parent) && operandNumber != UINT_MAX; + } + }; + + FrozenMultiMap + joinedOwnedIntroducerToConsumedOperands; /// If set to true, then we should only run cheap optimizations that do not /// build up data structures or analyze code in depth. @@ -91,6 +130,25 @@ struct LLVM_LIBRARY_VISIBILITY Context { void verify() const; + bool shouldPerform(ARCTransformKind testKind) const { + // When asserts are enabled, we allow for specific arc transforms to be + // turned on/off via LLVM args. So check that if we have asserts, perform + // all optimizations otherwise. +#ifndef NDEBUG + if (transformKind == ARCTransformKind::Invalid) + return false; + return bool(testKind & transformKind); +#else + return true; +#endif + } + + void reset() { + lifetimeFrontier.clear(); + addressToExhaustiveWriteListCache.clear(); + joinedOwnedIntroducerToConsumedOperands.reset(); + } + private: static bool constructCacheValue(SILValue initialValue, diff --git a/lib/SILOptimizer/SemanticARC/CopyValueOpts.cpp b/lib/SILOptimizer/SemanticARC/CopyValueOpts.cpp index 32276f11c6ba8..a6edae7ef82af 100644 --- a/lib/SILOptimizer/SemanticARC/CopyValueOpts.cpp +++ b/lib/SILOptimizer/SemanticARC/CopyValueOpts.cpp @@ -19,6 +19,8 @@ #include "OwnershipPhiOperand.h" #include "SemanticARCOptVisitor.h" #include "swift/SIL/LinearLifetimeChecker.h" +#include "swift/SIL/OwnershipUtils.h" +#include "swift/SIL/Projection.h" using namespace swift; using namespace swift::semanticarc; @@ -222,9 +224,9 @@ bool SemanticARCOptVisitor::performGuaranteedCopyValueOptimization( }); if (canOptimizePhi) { + Context::ConsumingOperandState state(opPhi); opPhi.visitResults([&](SILValue value) { - ctx.joinedOwnedIntroducerToConsumedOperands.insert(value, - opPhi.getOperand()); + ctx.joinedOwnedIntroducerToConsumedOperands.insert(value, state); return true; }); } @@ -285,51 +287,21 @@ bool SemanticARCOptVisitor::eliminateDeadLiveRangeCopyValue( // Live Range Joining //===----------------------------------------------------------------------===// -// Handle simple checking where we do not need to form live ranges and visit a -// bunch of instructions. -static bool canSafelyJoinSimpleRange(SILValue cviOperand, - DestroyValueInst *cviOperandDestroy, - CopyValueInst *cvi) { - // We only handle cases where our copy_value has a single lifetime ending - // use. We are not working with live ranges here so we do can treat forwarding - // insts like any other value. - auto *cviConsumer = cvi->getSingleConsumingUse(); - if (!cviConsumer) { - return false; - } - - // Ok, we may be able to eliminate this. The main thing we need to be careful - // of here is that if the lifetime of %0 ends /after/ the lifetime of - // %1. Otherwise we would be shrinking the lifetime of the object - // potentially. Consider the following SIL that we would miscompile in such a - // case. - // - // // potential_miscompile.sil - // %0 = ... - // %1 = copy_value %0 - // apply %cviConsumer(%1) - // apply %guaranteedUser(%0) - // destroy_value %0 - // - // Easily, if we were to eliminate the copy_value, destroy_value, the object's - // lifetime could potentially be shrunk before guaranteedUser is executed, - // causing guaranteedUser to be a use-after-free. - // - // As an extra wrinkle, until all interior pointer constructs (e.x.: - // project_box) are guaranteed to be guaranted by a begin_borrow, we can not - // in general safely shrink lifetimes. So even if we think we can prove that - // all non-consuming uses of %0 are before apply %cviConsumer, we may miss - // implicit uses that are not guarded yet by a begin_borrow, resulting in - // use-after-frees. - // - // With that in mind, we only handle cases today where we can prove that - // destroy_value is strictly before the consuming use of the operand. This - // guarantees that we are not shrinking the lifetime of the underlying object. +/// Given that our copy_value and destroy_value are in different blocks +/// determine if we can eliminate the copy/destroy. +/// +/// We assume that our copy_value \p cvi has a single consuming use +/// (\p cviConsumingUse) and that the destroy_value \p cviOperandDestroy is the +/// only destroy of the copy_value's operand. +static bool canJoinIfCopyDiesInFunctionExitingBlock( + SILValue cviOperand, DestroyValueInst *cviOperandDestroy, + CopyValueInst *cvi, Operand *cviConsumingUse) { + // This is a simple optimization, so at least handle hand-offs at returns. // - // First we handle the simple case: where the cviConsumer is a return inst. In - // such a case, we know for sure that cviConsumer post-dominates the - // destroy_value. - auto cviConsumerIter = cviConsumer->getUser()->getIterator(); + // First if our copy_value's consuming use is a return inst, then we know that + // the copy_value is live over the destroy_value \p cviOperandDestroy so we + // can eliminate the two safely. + auto cviConsumerIter = cviConsumingUse->getUser()->getIterator(); if (isa(cviConsumerIter)) { return true; } @@ -343,23 +315,251 @@ static bool canSafelyJoinSimpleRange(SILValue cviOperand, return true; } - // Otherwise, we only support joining live ranges where the cvi and the cvi's - // operand's destroy are in the same block with the destroy_value of cvi - // operand needing to be strictly after the copy_value. This analysis can be - // made significantly stronger by using LiveRanges, but this is simple for - // now. - auto cviOperandDestroyIter = cviOperandDestroy->getIterator(); - if (cviConsumingBlock != cviOperandDestroyIter->getParent()) { + return false; +} + +static Operand *lookThroughSingleForwardingUse(Operand *use) { + auto forwardingOperand = ForwardingOperand::get(use); + if (!forwardingOperand) + return nullptr; + auto forwardedValue = (*forwardingOperand).getSingleForwardedValue(); + if (!forwardedValue) + return nullptr; + auto *singleConsumingUse = forwardedValue->getSingleConsumingUse(); + if (!singleConsumingUse) + return nullptr; + return singleConsumingUse; +} + +/// Walk from inst to the end of the inst->getParent() looking for \p use's +/// user. Every instruction that we visit that is not said user is added to +/// foundInsts if foundInsts is not nullptr. We do not include \p inst in \p +/// foundInsts. +static bool isUseBetweenInstAndBlockEnd( + SILInstruction *inst, Operand *use, + SmallPtrSetImpl *foundInsts = nullptr) { + auto userOfUse = use->getUser(); + auto instRegion = llvm::make_range(std::next(inst->getIterator()), + inst->getParent()->end()); + for (auto &i : instRegion) { + if (&i == userOfUse) + return true; + if (foundInsts) + foundInsts->insert(&i); + } + return false; +} + +/// Optimize assuming that \p singleCVIConsumingUse and \p dvi are in the same +/// block. +/// +/// Importantly since \p singleCVIConsumingUse and \p dvi are in the same block, +/// we know that \p cvi must be post-dominated by dvi since its only consuming +/// use is single cvi consuming use by assumption. +static bool tryJoinIfDestroyConsumingUseInSameBlock( + SemanticARCOptVisitor &ctx, CopyValueInst *cvi, DestroyValueInst *dvi, + SILValue operand, Operand *singleCVIConsumingUse) { + // First see if our destroy_value is in between singleCVIConsumingUse and the + // end of block. If this is not true, then we know the destroy_value must be + // /before/ our singleCVIConsumingUse meaning that by joining the lifetimes, + // we are not going to shrink the overall composite lifetime. + SmallPtrSet visitedInsts; + if (!isUseBetweenInstAndBlockEnd(singleCVIConsumingUse->getUser(), + &dvi->getAllOperands()[0], &visitedInsts)) { + ctx.eraseInstruction(dvi); + ctx.eraseAndRAUWSingleValueInstruction(cvi, operand); + return true; + } + + // If we reached this point, isUseBetweenInstAndBlockEnd succeeded implying + // that we found destroy_value to be after our consuming use. Noting that + // additionally, the routine places all instructions in between consuming use + // and destroy_value into visitedInsts for our use, we may still be able to + // optimize if: + // + // 1. singleCVIConsumingUse is actually a forwarding user and forms the head + // of a chain of same-block forwarding uses the last of which is /after/ + // the destroy_value. + // + // 2. Our copy_value's operand does not have any direct uses or live dependent + // borrow scopes in between the first forwarding use and the + // destroy_value. This ensures that we do not need to deal with splitting + // borrow scopes or having to deal with "shape"-mismatches in between uses + // of the copy_value's operand and the current running forwarded value. + // + // This choice of optimization was just an attempt to be pragmatic given we + // want to be able to run this optimization at -Onone. + // + // With that in mind, lets first check 1. + Operand *currentForwardingUse = singleCVIConsumingUse; + while (auto *op = lookThroughSingleForwardingUse(currentForwardingUse)) { + // Visited insts contain all instructions in between singleCVIConsumingUse + // and the destroy_value, so if our forwarding inst is not in VisitedInsts, + // it must not be in the region and currentForwardingUse must be the last + // use. + if (!visitedInsts.count(op->getUser())) + break; + currentForwardingUse = op; + } + + // Ok now see if we were able to find a forwarding inst that was later than + // destroy_value... + if (currentForwardingUse == singleCVIConsumingUse || + visitedInsts.count(currentForwardingUse->getUser())) { + // If not, see if this use did have a forwardedValue but that forwardedValue + // has multiple end lifetime uses. In that case, we can optimize if there + // aren't any uses/etc + auto forwardingOperand = ForwardingOperand::get(currentForwardingUse); + if (!forwardingOperand) + return false; + auto forwardedValue = (*forwardingOperand).getSingleForwardedValue(); + if (!forwardedValue) + return false; + + // If our forwarding value has a single consuming use and that use is in the + // same block as our destroy_value, bail if the single consuming use is + // before our destroy_value. + if (auto *singleConsumingUse = forwardedValue->getSingleConsumingUse()) { + if (singleConsumingUse->getParentBlock() == dvi->getParentBlock() && + !isUseBetweenInstAndBlockEnd(dvi, singleConsumingUse)) { + return false; + } + } + + // If our forwarded value has multiple lifetime ending uses or a single + // consuming use that is after the destroy_value, we still need to perform + // our safety check below to know if we can optimized. + } + + // Otherwise, we looked through at least one forwarded use and our final use + // was past dvi in the current block! So we can optimize! + // + // As one last safety check, make sure that our copy_value operand does not + // have any uses in our code region. If it does, we would need to rewrite + // forwarded values so that the types match up, which is more than this humble + // optimization is trying to do here given we want to run this at -Onone. + // + // TODO: Can we make this more aggressive and by how much? E.x.: Can we allow + // debug_value users but move them to before our singleCVIConsumingUse? + for (auto *use : operand->getUses()) { + auto *user = use->getUser(); + + // First if our user is dvi, just continue. + if (user == dvi) + continue; + + // Then see if the user itself is a visitedInst. If so, we have a use that + // may require us to do some sort of transform, we can't optimize. + if (visitedInsts.count(use->getUser())) + return false; + + // Ok, we have a use that isn't in our visitedInsts region. That being said, + // we may still have a use that introduces a new BorrowScope onto our + // copy_value's operand that overlaps with our forwarding value region. In + // such a case, we can not optimize. + // + // To prove this since we know that any such scope must end at our + // destroy_value (since that is when the copy_value's operand is destroyed), + // we need to only find scopes that end within the region in between the + // singleConsumingUse (the original forwarded use) and the destroy_value. In + // such a case, we must bail! + if (auto operand = BorrowingOperand::get(use)) + if (!operand->visitLocalEndScopeUses([&](Operand *endScopeUse) { + // Return false if we did see the relevant end scope instruction + // in the block. That means that we are going to exit early and + // return false. + return !visitedInsts.count(endScopeUse->getUser()); + })) + return false; + } + + // Ok, we now know that we can eliminate this value. + ctx.eraseInstruction(dvi); + ctx.eraseAndRAUWSingleValueInstruction(cvi, operand); + return true; +} + +/// Given that: +/// +/// 1. Our copy_value's operand has a single consuming use (and that use is a +/// destroy_value). +/// 2. Our copy_value has a single consuming use. +/// +/// try and perform various optimizations to eliminate our copy_value, +/// destroy_value. Example: +/// +/// ``` +/// %1 = copy_value %0 // in some block +/// ... +/// +/// bbN: +/// destroy_value %0 +/// br bbFunctionExistingBlock +/// +/// bbFunctionExistingBlock: +/// consumingUse %1 +/// return +/// ``` +/// +/// will be optimized to: +/// +/// ``` +/// ... +/// +/// bbN: +/// br bbFunctionExistingBlock +/// +/// bbFunctionExistingBlock: +/// consumingUse %0 +/// return +/// ``` +static bool tryJoiningIfCopyOperandHasSingleDestroyValue( + SemanticARCOptVisitor &ctx, CopyValueInst *cvi, SILValue operand) { + // First perform our quick checks to see if our operand has a single + // destroy_value and our copy_value has a single consuming use. If either are + // false, we can not optimize so bail early. + auto *dvi = operand->getSingleConsumingUserOfType(); + if (!dvi) + return false; + + auto *singleCVIConsumingUse = cvi->getSingleConsumingUse(); + if (!singleCVIConsumingUse) return false; + + // Otherwise, first check to see if our operand's consuming use is a return + // inst or is in a function exiting block and dvi is not. With this + // information, we can conclude in both cases that singleCviConsumingUse must + // post-dominate destroy_value and can eliminate the hand off traffic. + if (canJoinIfCopyDiesInFunctionExitingBlock(operand, dvi, cvi, + singleCVIConsumingUse)) { + ctx.eraseInstruction(dvi); + ctx.eraseAndRAUWSingleValueInstruction(cvi, operand); + return true; } - // TODO: This should really be llvm::find, but for some reason, the templates - // do not match up given the current state of the iterators. This impl works - // in a pinch though. - return llvm::any_of( - llvm::make_range(cviOperandDestroyIter, - cviOperandDestroyIter->getParent()->end()), - [&](const SILInstruction &val) { return &*cviConsumerIter == &val; }); + // Otherwise, try to prove that dvi and singleCVIConsumingUse are not in the same block. + // block with dvi being strictly before singleCVIConsumingUse, that is: + // + // %operand = ... + // ... + // %copiedOperand = cvi %operand + // ... + // dvi %operand + // cviConsumer %copiedOperand + // + // In such a case, all we know is that dvi and cviConsumer are in the same + // block. Since dvi is the only destroy of %operand, we know that dvi must + // post-dominate %copiedOperand and %operand. + if (dvi->getParent() != singleCVIConsumingUse->getParentBlock()) + return false; + + // First see if our initial use is after dvi. Then we do not need to do any + // more complex work. We actually check here if we find our destroy_value in + // between our consuming use and the end block. The reason why we do this is + // so that if we fail, visitedInsts will contain all instructions in between + // the consuming use and the destroy_value. + return tryJoinIfDestroyConsumingUseInSameBlock(ctx, cvi, dvi, operand, + singleCVIConsumingUse); } // # The Problem We Are Solving @@ -411,36 +611,81 @@ bool SemanticARCOptVisitor::tryJoiningCopyValueLiveRangeWithOperand( return false; } - // Then check if our operand has a single destroy_value. If it does and that - // destroy_value is strictly before the consumer of our copy_value in the same - // block as the consumer of said copy_value then we can always join the live - // ranges. + // Then we handle two different use cases: // - // Example: + // 1. First we optimize a special case where our copy_value has a single + // consuming use and our copy_value's operand has a single consuming use + // and that single use is a destroy_value. // - // ``` - // %1 = copy_value %0 - // ... - // destroy_value %0 - // apply %consumingUser(%1) - // ``` - // -> + // 2. The second is a more general optimization where our copy_value has + // multiple destroy_value, but we know that our copy_value is in the same + // block as one of those destroy_value. + if (tryJoiningIfCopyOperandHasSingleDestroyValue(*this, cvi, operand)) + return true; + + // Otherwise, use a more conservative analysis that requires our copy_value + // and destroy_value, but is looser about how we handle the consuming use: // - // ``` - // apply %consumingUser(%0) - // ``` + // 1. Since our copy_value and destroy_value are in the same block, if our + // copy_value has multiple consuming uses, we know those consuming uses + // must be outside of our current block and must be dominated by the + // copy_value, destroy_value. So we can immediately optimize. // - // DISCUSSION: We need to ensure that the consuming use of the copy_value is - // strictly after the destroy_value to ensure that we do not shrink the live - // range of the operand if the operand has any normal uses beyond our copy - // value. Otherwise, we could have normal uses /after/ the consuming use of - // our copy_value. - if (auto *dvi = operand->getSingleConsumingUserOfType()) { - if (canSafelyJoinSimpleRange(operand, dvi, cvi)) { + // 2. Otherwise, if we have a single consuming use and it is in the same block + // as our copy_value, destroy_value, we attempt to prove that the consuming + // use (after looking through a forwarding use chain) is later in the + // current block than the destroy_value. We use the last forwarding + // instruction in a chain of SIL instructions that end in the current + // block. Since we are looking through forwarding uses, we need to create + // new-borrow scopes at each forwarding instruction as we clone if we have + // any guaranteed elements in between our destroy_value and final + // forwarding use. + auto *singleCVIConsumingUse = cvi->getSingleConsumingUse(); + for (auto *use : operand->getConsumingUses()) { + auto *dvi = dyn_cast(use->getUser()); + if (!dvi) + continue; + + // First setup our condition... We only optimize if our copy_value and + // destroy_value are in the same block. Additionally since our destroy_value + // is destroying the operand of the copy_value, we must have that cvi is + // strictly before dvi in the block. + if (dvi->getParent() != cvi->getParent()) { + continue; + } + + // If we had multiple consuming uses of our copy_value, then we know that + // the copy_value must be live out of the current block implying that we + // can optimize without any further analysis since we know we will not be + // shrinking lifetimes of owned values. + if (singleCVIConsumingUse == nullptr) { eraseInstruction(dvi); eraseAndRAUWSingleValueInstruction(cvi, operand); return true; } + + // Then note that if our copy_value has a single consuming use, if that use + // is not in the same block as our copy_value/destroy_value, it must be live + // out of the block and thus we are not shrinking any lifetimes. + if (singleCVIConsumingUse->getParentBlock() != cvi->getParent()) { + eraseInstruction(dvi); + eraseAndRAUWSingleValueInstruction(cvi, operand); + return true; + } + + // Ok, we know that all of the following instructions are in the same block + // together: + // + // 1. our copy_value (cvi). + // 2. The consumer of our copy_value (singleCVIConsumingUse). + // 3. A destroy_value of the copy_value's operand (dvi). + // + // So call our subroutine that optimizes given the destroy_value, consume + // are in the same block and that the copy_value is post-dominated by the + // destroy_value. + if (tryJoinIfDestroyConsumingUseInSameBlock(*this, cvi, dvi, operand, + singleCVIConsumingUse)) + return true; } // Otherwise, we couldn't handle this case, so return false. @@ -451,6 +696,10 @@ bool SemanticARCOptVisitor::tryJoiningCopyValueLiveRangeWithOperand( return false; } +//===----------------------------------------------------------------------===// +// Owned Copy Value Optimizations +//===----------------------------------------------------------------------===// + /// Given an owned value that is completely enclosed within its parent owned /// value and is not consumed, eliminate the copy. bool SemanticARCOptVisitor::tryPerformOwnedCopyValueOptimization( @@ -517,22 +766,26 @@ bool SemanticARCOptVisitor::tryPerformOwnedCopyValueOptimization( bool SemanticARCOptVisitor::visitCopyValueInst(CopyValueInst *cvi) { // If our copy value inst has only destroy_value users, it is a dead live // range. Try to eliminate them. - if (eliminateDeadLiveRangeCopyValue(cvi)) { + if (ctx.shouldPerform(ARCTransformKind::RedundantCopyValueElimPeephole) && + eliminateDeadLiveRangeCopyValue(cvi)) { return true; } // Then see if copy_value operand's lifetime ends after our copy_value via a // destroy_value. If so, we can join their lifetimes. - if (tryJoiningCopyValueLiveRangeWithOperand(cvi)) { + if (ctx.shouldPerform(ARCTransformKind::LifetimeJoiningPeephole) && + tryJoiningCopyValueLiveRangeWithOperand(cvi)) { return true; } // Then try to perform the guaranteed copy value optimization. - if (performGuaranteedCopyValueOptimization(cvi)) { + if (ctx.shouldPerform(ARCTransformKind::RedundantCopyValueElimPeephole) && + performGuaranteedCopyValueOptimization(cvi)) { return true; } - if (tryPerformOwnedCopyValueOptimization(cvi)) { + if (ctx.shouldPerform(ARCTransformKind::RedundantCopyValueElimPeephole) && + tryPerformOwnedCopyValueOptimization(cvi)) { return true; } diff --git a/lib/SILOptimizer/SemanticARC/LoadCopyToLoadBorrowOpt.cpp b/lib/SILOptimizer/SemanticARC/LoadCopyToLoadBorrowOpt.cpp index 71323e34eac8f..5562946bd50b7 100644 --- a/lib/SILOptimizer/SemanticARC/LoadCopyToLoadBorrowOpt.cpp +++ b/lib/SILOptimizer/SemanticARC/LoadCopyToLoadBorrowOpt.cpp @@ -329,6 +329,10 @@ bool SemanticARCOptVisitor::visitLoadInst(LoadInst *li) { if (ctx.onlyGuaranteedOpts) return false; + // If we are not supposed to perform this transform, bail. + if (!ctx.shouldPerform(ARCTransformKind::LoadCopyToLoadBorrowPeephole)) + return false; + if (li->getOwnershipQualifier() != LoadOwnershipQualifier::Copy) return false; diff --git a/lib/SILOptimizer/SemanticARC/OwnedToGuaranteedPhiOpt.cpp b/lib/SILOptimizer/SemanticARC/OwnedToGuaranteedPhiOpt.cpp index 3e85d78adaedc..94dbda3909adc 100644 --- a/lib/SILOptimizer/SemanticARC/OwnedToGuaranteedPhiOpt.cpp +++ b/lib/SILOptimizer/SemanticARC/OwnedToGuaranteedPhiOpt.cpp @@ -19,12 +19,18 @@ #include "Context.h" #include "OwnershipPhiOperand.h" #include "Transforms.h" +#include "swift/Basic/STLExtras.h" using namespace swift; using namespace swift::semanticarc; +namespace { +using ConsumingOperandState = Context::ConsumingOperandState; +} // anonymous namespace + +template static bool canEliminatePhi( - Context::FrozenMultiMapRange optimizableIntroducerRange, + OperandRangeTy optimizableIntroducerRange, ArrayRef incomingValueOperandList, SmallVectorImpl &ownedValueIntroducerAccumulator) { for (auto incomingValueOperand : incomingValueOperandList) { @@ -161,9 +167,19 @@ bool swift::semanticarc::tryConvertOwnedPhisToGuaranteedPhis(Context &ctx) { // eliminated if it was not for the given phi. If all of them are, we can // optimize! { - auto rawFoundOptimizableIntroducerArray = pair.second; - if (!canEliminatePhi(rawFoundOptimizableIntroducerArray, - incomingValueOperandList, ownedValueIntroducers)) { + std::function lambda = + [&](const Context::ConsumingOperandState &state) -> Operand * { + unsigned opNum = state.operandNumber; + if (state.parent.is()) { + SILBasicBlock *block = state.parent.get(); + return &block->getTerminator()->getAllOperands()[opNum]; + } + SILInstruction *inst = state.parent.get(); + return &inst->getAllOperands()[opNum]; + }; + auto operandsTransformed = makeTransformRange(pair.second, lambda); + if (!canEliminatePhi(operandsTransformed, incomingValueOperandList, + ownedValueIntroducers)) { continue; } } diff --git a/lib/SILOptimizer/SemanticARC/OwnershipConversionElimination.cpp b/lib/SILOptimizer/SemanticARC/OwnershipConversionElimination.cpp new file mode 100644 index 0000000000000..21319e3505fbf --- /dev/null +++ b/lib/SILOptimizer/SemanticARC/OwnershipConversionElimination.cpp @@ -0,0 +1,67 @@ +//===--- OwnershipConversionElimination.cpp -------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "SemanticARC/SemanticARCOpts.h" +#include "SemanticARCOptVisitor.h" +#include "swift/SIL/LinearLifetimeChecker.h" + +using namespace swift; +using namespace semanticarc; + +//===----------------------------------------------------------------------===// +// Top Level Entrypoint +//===----------------------------------------------------------------------===// + +bool SemanticARCOptVisitor::visitUncheckedOwnershipConversionInst( + UncheckedOwnershipConversionInst *uoci) { + // Return false if we are supposed to only be running guaranteed opts. + if (ctx.onlyGuaranteedOpts) + return false; + + // Then check if we are running tests and shouldn't perform this optimization + // since we are testing something else. + if (!ctx.shouldPerform(ARCTransformKind::OwnershipConversionElimPeephole)) + return false; + + // Otherwise, shrink our state space so that we only consider conversions from + // owned or guaranteed to unowned. These are always legal and it is sometimes + // convenient to insert them to avoid forwarding issues when RAUWing + // values. So we eliminate them here. + if (uoci->getConversionOwnershipKind() != OwnershipKind::Unowned) + return false; + + auto op = uoci->getOperand(); + auto opKind = op.getOwnershipKind(); + if (opKind != OwnershipKind::Owned && opKind != OwnershipKind::Guaranteed) + return false; + + // Ok, we can perform our optimization. First go through all of the uses of + // uoci and see if they can accept our operand without any changes and that + // they do not consume values. + SmallVector newUses; + for (auto *use : uoci->getUses()) { + if (use->isLifetimeEnding() || !use->canAcceptKind(opKind)) + return false; + newUses.push_back(use); + } + + // Ok, now we need to perform our lifetime check. + SmallVector consumingUses(op->getConsumingUses()); + SmallPtrSet visitedBlocks; + LinearLifetimeChecker checker(visitedBlocks, ctx.getDeadEndBlocks()); + if (!checker.validateLifetime(op, consumingUses, newUses)) + return false; + + // Otherwise, we can perform our rauw. + eraseAndRAUWSingleValueInstruction(uoci, op); + return true; +} diff --git a/lib/SILOptimizer/SemanticARC/OwnershipPhiOperand.h b/lib/SILOptimizer/SemanticARC/OwnershipPhiOperand.h index 88ba1007d2ea2..c54791e490ff3 100644 --- a/lib/SILOptimizer/SemanticARC/OwnershipPhiOperand.h +++ b/lib/SILOptimizer/SemanticARC/OwnershipPhiOperand.h @@ -64,6 +64,9 @@ class LLVM_LIBRARY_VISIBILITY OwnershipPhiOperand { } } + operator const Operand *() const { return op; } + operator Operand *() { return op; } + Operand *getOperand() const { return op; } SILValue getValue() const { return op->get(); } SILType getType() const { return op->get()->getType(); } diff --git a/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.cpp b/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.cpp index 65d6178653c35..0fe9af3ecae4f 100644 --- a/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.cpp +++ b/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.cpp @@ -23,6 +23,15 @@ using namespace swift; using namespace swift::semanticarc; +bool SemanticARCOptVisitor::optimizeWithoutFixedPoint() { + bool madeChange = false; + + // First process the worklist until we reach a fixed point. + madeChange |= processWorklist(); + + return madeChange; +} + bool SemanticARCOptVisitor::optimize() { bool madeChange = false; diff --git a/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.h b/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.h index 1e9746ffab4c8..ced5d731ee552 100644 --- a/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.h +++ b/lib/SILOptimizer/SemanticARC/SemanticARCOptVisitor.h @@ -15,6 +15,8 @@ #include "Context.h" #include "OwnershipLiveRange.h" +#include "SemanticARCOpts.h" + #include "swift/Basic/BlotSetVector.h" #include "swift/Basic/FrozenMultiMap.h" #include "swift/Basic/MultiMapCache.h" @@ -34,7 +36,7 @@ namespace semanticarc { /// visitors do, we maintain a visitedSinceLastMutation list to ensure that we /// revisit all interesting instructions in between mutations. struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor - : SILInstructionVisitor { + : SILValueVisitor { /// Our main worklist. We use this after an initial run through. SmallBlotSetVector worklist; @@ -56,6 +58,12 @@ struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor eraseAndRAUWSingleValueInstruction(i, value); })) {} + void reset() { + ctx.reset(); + worklist.clear(); + visitedSinceLastMutation.clear(); + } + DeadEndBlocks &getDeadEndBlocks() { return ctx.getDeadEndBlocks(); } /// Given a single value instruction, RAUW it with newValue, add newValue to @@ -119,9 +127,19 @@ struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor }); } - /// The default visitor. bool visitSILInstruction(SILInstruction *i) { - assert(!isGuaranteedForwardingValueKind(SILNodeKind(i->getKind())) && + assert((isa(i) || + !isa(i)) && + "Should have forwarding visitor for all ownership forwarding " + "non-term instructions"); + return false; + } + + /// The default visitor. + bool visitValueBase(ValueBase *v) { + auto *inst = v->getDefiningInstruction(); + (void)inst; + assert((!inst || !isa(inst)) && "Should have forwarding visitor for all ownership forwarding " "instructions"); return false; @@ -130,6 +148,9 @@ struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor bool visitCopyValueInst(CopyValueInst *cvi); bool visitBeginBorrowInst(BeginBorrowInst *bbi); bool visitLoadInst(LoadInst *li); + bool + visitUncheckedOwnershipConversionInst(UncheckedOwnershipConversionInst *uoci); + static bool shouldVisitInst(SILInstruction *i) { switch (i->getKind()) { default: @@ -137,6 +158,7 @@ struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor case SILInstructionKind::CopyValueInst: case SILInstructionKind::BeginBorrowInst: case SILInstructionKind::LoadInst: + case SILInstructionKind::UncheckedOwnershipConversionInst: return true; } } @@ -149,8 +171,11 @@ struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor return false; \ } FORWARDING_INST(Tuple) + FORWARDING_INST(Object) FORWARDING_INST(Struct) FORWARDING_INST(Enum) + FORWARDING_INST(UncheckedValueCast) + FORWARDING_INST(ThinToThickFunction) FORWARDING_INST(OpenExistentialRef) FORWARDING_INST(Upcast) FORWARDING_INST(UncheckedRefCast) @@ -161,6 +186,7 @@ struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor FORWARDING_INST(UncheckedEnumData) FORWARDING_INST(MarkUninitialized) FORWARDING_INST(SelectEnum) + FORWARDING_INST(SelectValue) FORWARDING_INST(DestructureStruct) FORWARDING_INST(DestructureTuple) FORWARDING_INST(TupleExtract) @@ -175,23 +201,9 @@ struct LLVM_LIBRARY_VISIBILITY SemanticARCOptVisitor FORWARDING_INST(LinearFunctionExtract) #undef FORWARDING_INST -#define FORWARDING_TERM(NAME) \ - bool visit##NAME##Inst(NAME##Inst *cls) { \ - for (auto succValues : cls->getSuccessorBlockArgumentLists()) { \ - for (SILValue v : succValues) { \ - worklist.insert(v); \ - } \ - } \ - return false; \ - } - - FORWARDING_TERM(SwitchEnum) - FORWARDING_TERM(CheckedCastBranch) - FORWARDING_TERM(Branch) -#undef FORWARDING_TERM - bool processWorklist(); bool optimize(); + bool optimizeWithoutFixedPoint(); bool performGuaranteedCopyValueOptimization(CopyValueInst *cvi); bool eliminateDeadLiveRangeCopyValue(CopyValueInst *cvi); diff --git a/lib/SILOptimizer/SemanticARC/SemanticARCOpts.cpp b/lib/SILOptimizer/SemanticARC/SemanticARCOpts.cpp index b8eb67ec2f2e0..42e1bdae310e0 100644 --- a/lib/SILOptimizer/SemanticARC/SemanticARCOpts.cpp +++ b/lib/SILOptimizer/SemanticARC/SemanticARCOpts.cpp @@ -12,6 +12,7 @@ #define DEBUG_TYPE "sil-semantic-arc-opts" +#include "SemanticARCOpts.h" #include "SemanticARCOptVisitor.h" #include "Transforms.h" @@ -22,22 +23,28 @@ using namespace swift; using namespace swift::semanticarc; -namespace { - -/// An enum used so that at the command line, we can override -enum class TransformToPerformKind { - Peepholes, - OwnedToGuaranteedPhi, -}; - -} // anonymous namespace - -static llvm::cl::list TransformsToPerform( +static llvm::cl::list TransformsToPerform( llvm::cl::values( - clEnumValN(TransformToPerformKind::Peepholes, - "sil-semantic-arc-peepholes", - "Perform ARC canonicalizations and peepholes"), - clEnumValN(TransformToPerformKind::OwnedToGuaranteedPhi, + clEnumValN(ARCTransformKind::AllPeepholes, + "sil-semantic-arc-peepholes-all", + "Perform All ARC canonicalizations and peepholes"), + clEnumValN(ARCTransformKind::LoadCopyToLoadBorrowPeephole, + "sil-semantic-arc-peepholes-loadcopy-to-loadborrow", + "Perform the load [copy] to load_borrow peephole"), + clEnumValN(ARCTransformKind::RedundantBorrowScopeElimPeephole, + "sil-semantic-arc-peepholes-redundant-borrowscope-elim", + "Perform the redundant borrow scope elimination peephole"), + clEnumValN(ARCTransformKind::RedundantCopyValueElimPeephole, + "sil-semantic-arc-peepholes-redundant-copyvalue-elim", + "Perform the redundant copy_value peephole"), + clEnumValN(ARCTransformKind::LifetimeJoiningPeephole, + "sil-semantic-arc-peepholes-lifetime-joining", + "Perform the join lifetimes peephole"), + clEnumValN(ARCTransformKind::OwnershipConversionElimPeephole, + "sil-semantic-arc-peepholes-ownership-conversion-elim", + "Eliminate unchecked_ownership_conversion insts that are " + "not needed"), + clEnumValN(ARCTransformKind::OwnedToGuaranteedPhi, "sil-semantic-arc-owned-to-guaranteed-phi", "Perform Owned To Guaranteed Phi. NOTE: Seeded by peephole " "optimizer for compile time saving purposes, so run this " @@ -64,23 +71,53 @@ struct SemanticARCOpts : SILFunctionTransform { #ifndef NDEBUG void performCommandlineSpecifiedTransforms(SemanticARCOptVisitor &visitor) { for (auto transform : TransformsToPerform) { + visitor.ctx.transformKind = transform; + SWIFT_DEFER { + visitor.ctx.transformKind = ARCTransformKind::Invalid; + visitor.reset(); + }; switch (transform) { - case TransformToPerformKind::Peepholes: - if (performPeepholes(visitor)) { + case ARCTransformKind::LifetimeJoiningPeephole: + case ARCTransformKind::RedundantCopyValueElimPeephole: + case ARCTransformKind::RedundantBorrowScopeElimPeephole: + case ARCTransformKind::LoadCopyToLoadBorrowPeephole: + case ARCTransformKind::AllPeepholes: + case ARCTransformKind::OwnershipConversionElimPeephole: + // We never assume we are at fixed point when running these transforms. + if (performPeepholesWithoutFixedPoint(visitor)) { invalidateAnalysis(SILAnalysis::InvalidationKind::Instructions); } continue; - case TransformToPerformKind::OwnedToGuaranteedPhi: + case ARCTransformKind::OwnedToGuaranteedPhi: if (tryConvertOwnedPhisToGuaranteedPhis(visitor.ctx)) { invalidateAnalysis( SILAnalysis::InvalidationKind::BranchesAndInstructions); } continue; + case ARCTransformKind::All: + case ARCTransformKind::Invalid: + llvm_unreachable("unsupported option"); } } } #endif + bool performPeepholesWithoutFixedPoint(SemanticARCOptVisitor &visitor) { + // Add all the results of all instructions that we want to visit to the + // worklist. + for (auto &block : *getFunction()) { + for (auto &inst : block) { + if (SemanticARCOptVisitor::shouldVisitInst(&inst)) { + for (SILValue v : inst.getResults()) { + visitor.worklist.insert(v); + } + } + } + } + // Then process the worklist, performing peepholes. + return visitor.optimizeWithoutFixedPoint(); + } + bool performPeepholes(SemanticARCOptVisitor &visitor) { // Add all the results of all instructions that we want to visit to the // worklist. diff --git a/lib/SILOptimizer/SemanticARC/SemanticARCOpts.h b/lib/SILOptimizer/SemanticARC/SemanticARCOpts.h new file mode 100644 index 0000000000000..16af3ce4245e6 --- /dev/null +++ b/lib/SILOptimizer/SemanticARC/SemanticARCOpts.h @@ -0,0 +1,50 @@ +//===--- SemanticARCOpts.h ------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_SILOPTIMIZER_SEMANTICARC_SEMANTICARCOPTS_H +#define SWIFT_SILOPTIMIZER_SEMANTICARC_SEMANTICARCOPTS_H + +#include +#include + +namespace swift { +namespace semanticarc { + +/// An enum used so that at the command line, we can override which transforms +/// we perform. +enum class ARCTransformKind : uint64_t { + Invalid = 0, + OwnedToGuaranteedPhi = 0x1, + LoadCopyToLoadBorrowPeephole = 0x2, + RedundantBorrowScopeElimPeephole = 0x4, + // TODO: Split RedundantCopyValueElimPeephole into more granular categories + // such as dead live range, guaranteed copy_value opt, etc. + RedundantCopyValueElimPeephole = 0x8, + LifetimeJoiningPeephole = 0x10, + OwnershipConversionElimPeephole = 0x20, + + AllPeepholes = LoadCopyToLoadBorrowPeephole | + RedundantBorrowScopeElimPeephole | + RedundantCopyValueElimPeephole | LifetimeJoiningPeephole | + OwnershipConversionElimPeephole, + All = AllPeepholes | OwnedToGuaranteedPhi, +}; + +inline ARCTransformKind operator&(ARCTransformKind lhs, ARCTransformKind rhs) { + using UnderlyingTy = std::underlying_type::type; + return ARCTransformKind(UnderlyingTy(lhs) & UnderlyingTy(rhs)); +} + +} // namespace semanticarc +} // namespace swift + +#endif diff --git a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp index f9c0eac952315..f3a8b7bf6e613 100644 --- a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp +++ b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp @@ -141,6 +141,7 @@ static bool isBarrier(SILInstruction *inst) { case BuiltinValueKind::COWBufferForReading: case BuiltinValueKind::IntInstrprofIncrement: case BuiltinValueKind::GetCurrentAsyncTask: + case BuiltinValueKind::AutoDiffCreateLinearMapContext: return false; // Handle some rare builtins that may be sensitive to object lifetime @@ -168,6 +169,8 @@ static bool isBarrier(SILInstruction *inst) { case BuiltinValueKind::CancelAsyncTask: case BuiltinValueKind::CreateAsyncTask: case BuiltinValueKind::CreateAsyncTaskFuture: + case BuiltinValueKind::AutoDiffProjectTopLevelSubcontext: + case BuiltinValueKind::AutoDiffAllocateSubcontext: return true; } } diff --git a/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp b/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp index b29b31b2db672..05fe7c14c12ae 100644 --- a/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp +++ b/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp @@ -818,28 +818,33 @@ PromotedParamCloner::visitStrongRetainInst(StrongRetainInst *Inst) { SILCloner::visitStrongRetainInst(Inst); } -void PromotedParamCloner::visitCopyValueInst(CopyValueInst *CVI) { +void PromotedParamCloner::visitCopyValueInst(CopyValueInst *cvi) { // If it's a copy of a promoted parameter, just drop the instruction. - auto *Tmp = CVI; - while (auto *CopyOp = dyn_cast(Tmp->getOperand())) { - Tmp = CopyOp; + auto *tmp = cvi; + while (auto *copyOp = dyn_cast(tmp->getOperand())) { + tmp = copyOp; } - if (OrigPromotedParameters.count(Tmp->getOperand())) + if (OrigPromotedParameters.count(tmp->getOperand())) return; - SILCloner::visitCopyValueInst(CVI); + SILCloner::visitCopyValueInst(cvi); } -void PromotedParamCloner::visitProjectBoxInst(ProjectBoxInst *Inst) { - // If it's a projection of a promoted parameter, drop the instruction. - // Its uses will be replaced by the promoted address. - if (OrigPromotedParameters.count(Inst->getOperand())) { - auto *origArg = cast(Inst->getOperand()); - recordFoldedValue(Inst, NewPromotedArgs[origArg->getIndex()]); +void PromotedParamCloner::visitProjectBoxInst(ProjectBoxInst *pbi) { + // If it's a projection of a promoted parameter (or a copy_value of a promoted + // parameter), drop the instruction. Its uses will be replaced by the + // promoted address. + SILValue box = pbi->getOperand(); + while (auto *copyOp = dyn_cast(box)) { + box = copyOp->getOperand(); + } + if (OrigPromotedParameters.count(box)) { + auto *origArg = cast(box); + recordFoldedValue(pbi, NewPromotedArgs[origArg->getIndex()]); return; } - SILCloner::visitProjectBoxInst(Inst); + SILCloner::visitProjectBoxInst(pbi); } // While cloning during specialization, make sure apply instructions do not have diff --git a/lib/SILOptimizer/Transforms/GenericSpecializer.cpp b/lib/SILOptimizer/Transforms/GenericSpecializer.cpp index 326cb1b6311c3..b1def68621806 100644 --- a/lib/SILOptimizer/Transforms/GenericSpecializer.cpp +++ b/lib/SILOptimizer/Transforms/GenericSpecializer.cpp @@ -28,10 +28,6 @@ using namespace swift; -// For testing during bring up. -static llvm::cl::opt EnableGenericSpecializerWithOwnership( - "sil-generic-specializer-enable-ownership", llvm::cl::init(false)); - namespace { class GenericSpecializer : public SILFunctionTransform { @@ -42,10 +38,6 @@ class GenericSpecializer : public SILFunctionTransform { void run() override { SILFunction &F = *getFunction(); - // TODO: We should be able to handle ownership. - if (F.hasOwnership() && !EnableGenericSpecializerWithOwnership) - return; - LLVM_DEBUG(llvm::dbgs() << "***** GenericSpecializer on function:" << F.getName() << " *****\n"); diff --git a/lib/SILOptimizer/Transforms/TempLValueOpt.cpp b/lib/SILOptimizer/Transforms/TempLValueOpt.cpp index 36303388a2a02..6fe12aaecdabc 100644 --- a/lib/SILOptimizer/Transforms/TempLValueOpt.cpp +++ b/lib/SILOptimizer/Transforms/TempLValueOpt.cpp @@ -253,6 +253,7 @@ bool TempLValueOptPass::tempLValueOpt(CopyAddrInst *copyInst) { user->eraseFromParent(); break; default: + AA->invalidateInstruction(user); use->set(destination); } } diff --git a/lib/SILOptimizer/Transforms/TempRValueElimination.cpp b/lib/SILOptimizer/Transforms/TempRValueElimination.cpp index 6f060ec7d2db2..0ca0ad518fe2e 100644 --- a/lib/SILOptimizer/Transforms/TempRValueElimination.cpp +++ b/lib/SILOptimizer/Transforms/TempRValueElimination.cpp @@ -506,6 +506,8 @@ bool TempRValueOptPass::tryOptimizeCopyIntoTemp(CopyAddrInst *copyInst) { while (!tempObj->use_empty()) { Operand *use = *tempObj->use_begin(); SILInstruction *user = use->getUser(); + aa->invalidateInstruction(user); + switch (user->getKind()) { case SILInstructionKind::DestroyAddrInst: case SILInstructionKind::DeallocStackInst: diff --git a/lib/SILOptimizer/Utils/CFGOptUtils.cpp b/lib/SILOptimizer/Utils/CFGOptUtils.cpp index 1188b449e6eeb..a26217a46a2b0 100644 --- a/lib/SILOptimizer/Utils/CFGOptUtils.cpp +++ b/lib/SILOptimizer/Utils/CFGOptUtils.cpp @@ -23,16 +23,9 @@ using namespace swift; -/// Adds a new argument to an edge between a branch and a destination -/// block. -/// -/// \param branch The terminator to add the argument to. -/// \param dest The destination block of the edge. -/// \param val The value to the arguments of the branch. -/// \return The created branch. The old branch is deleted. -/// The argument is appended at the end of the argument tuple. TermInst *swift::addNewEdgeValueToBranch(TermInst *branch, SILBasicBlock *dest, - SILValue val) { + SILValue val, + const InstModCallbacks &callbacks) { SILBuilderWithScope builder(branch); TermInst *newBr = nullptr; @@ -59,6 +52,7 @@ TermInst *swift::addNewEdgeValueToBranch(TermInst *branch, SILBasicBlock *dest, cbi->getLoc(), cbi->getCondition(), cbi->getTrueBB(), trueArgs, cbi->getFalseBB(), falseArgs, cbi->getTrueBBCount(), cbi->getFalseBBCount()); + callbacks.createdNewInst(newBr); } else if (auto *bi = dyn_cast(branch)) { SmallVector args; @@ -68,13 +62,13 @@ TermInst *swift::addNewEdgeValueToBranch(TermInst *branch, SILBasicBlock *dest, args.push_back(val); assert(args.size() == dest->getNumArguments()); newBr = builder.createBranch(bi->getLoc(), bi->getDestBB(), args); + callbacks.createdNewInst(newBr); } else { // At the moment we can only add arguments to br and cond_br. llvm_unreachable("Can't add argument to terminator"); } - branch->dropAllReferences(); - branch->eraseFromParent(); + callbacks.deleteInst(branch); return newBr; } diff --git a/lib/SILOptimizer/Utils/CMakeLists.txt b/lib/SILOptimizer/Utils/CMakeLists.txt index 40359206beeb2..fb741a85bbe2f 100644 --- a/lib/SILOptimizer/Utils/CMakeLists.txt +++ b/lib/SILOptimizer/Utils/CMakeLists.txt @@ -21,4 +21,5 @@ target_sources(swiftSILOptimizer PRIVATE SILSSAUpdater.cpp SpecializationMangler.cpp StackNesting.cpp - ValueLifetime.cpp) + ValueLifetime.cpp + OwnershipOptUtils.cpp) diff --git a/lib/SILOptimizer/Utils/CanonicalizeInstruction.cpp b/lib/SILOptimizer/Utils/CanonicalizeInstruction.cpp index 87d8b9c68cff0..56ad6bee6ce34 100644 --- a/lib/SILOptimizer/Utils/CanonicalizeInstruction.cpp +++ b/lib/SILOptimizer/Utils/CanonicalizeInstruction.cpp @@ -24,6 +24,7 @@ #include "swift/SIL/Projection.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILFunction.h" +#include "swift/SIL/SILInstruction.h" #include "swift/SILOptimizer/Analysis/SimplifyInstruction.h" #include "llvm/ADT/Statistic.h" #include "llvm/Support/Debug.h" @@ -80,11 +81,6 @@ killInstAndIncidentalUses(SingleValueInstruction *inst, // intruction that wasn't erased. static Optional simplifyAndReplace(SILInstruction *inst, CanonicalizeInstruction &pass) { - // FIXME: temporarily bypass simplification untill all simplifications - // preserve ownership SIL. - if (inst->getFunction()->hasOwnership()) - return None; - SILValue result = simplifyInstruction(inst); if (!result) return None; @@ -99,7 +95,9 @@ simplifyAndReplace(SILInstruction *inst, CanonicalizeInstruction &pass) { // because the instruction and all non-replaced users will be deleted. auto nextII = replaceAllSimplifiedUsesAndErase( inst, result, - [&pass](SILInstruction *deleted) { pass.killInstruction(deleted); }); + [&pass](SILInstruction *deleted) { pass.killInstruction(deleted); }, + [&pass](SILInstruction *newInst) { pass.notifyNewInstruction(newInst); }, + &pass.deadEndBlocks); // Push the new instruction and any users onto the worklist. pass.notifyHasNewUsers(result); @@ -110,6 +108,54 @@ simplifyAndReplace(SILInstruction *inst, CanonicalizeInstruction &pass) { // Canonicalize Memory Operations //===----------------------------------------------------------------------===// +namespace { + +struct LoadOperation { + llvm::PointerUnion value; + + LoadOperation(SILInstruction *input) : value(nullptr) { + if (auto *li = dyn_cast(input)) { + value = li; + return; + } + + if (auto *lbi = dyn_cast(input)) { + value = lbi; + return; + } + } + + operator bool() const { return !value.isNull(); } + + SingleValueInstruction *operator*() const { + if (value.is()) + return value.get(); + return value.get(); + } + + const SingleValueInstruction *operator->() const { + if (value.is()) + return value.get(); + return value.get(); + } + + SingleValueInstruction *operator->() { + if (value.is()) + return value.get(); + return value.get(); + } + + Optional getOwnershipQualifier() const { + if (auto *lbi = value.dyn_cast()) { + return None; + } + + return value.get()->getOwnershipQualifier(); + } +}; + +} // anonymous namespace + // Replace all uses of an original struct or tuple extract instruction with the // given load instruction. The caller ensures that the load only loads the // extracted field. @@ -120,27 +166,29 @@ simplifyAndReplace(SILInstruction *inst, CanonicalizeInstruction &pass) { // \p loadInst has the form: // (load (struct_element_addr %base, #field) static void replaceUsesOfExtract(SingleValueInstruction *extract, - LoadInst *loadInst, + LoadOperation loadInst, CanonicalizeInstruction &pass) { assert(extract->getType() == loadInst->getType()); - SingleValueInstruction *loadedVal = loadInst; - if (loadInst->getOwnershipQualifier() == LoadOwnershipQualifier::Copy) { - // Borrow the load-copied subelement, with precisely the same scope as - // the aggregate borrow. - assert(extract->getNumOperands() == 1); - auto *origBorrow = cast(extract->getOperand(0)); - auto *newBorrow = SILBuilderWithScope(origBorrow) - .createBeginBorrow(loadInst->getLoc(), loadInst); - pass.notifyNewInstruction(newBorrow); - - assert(extract == origBorrow->getSingleNonEndingUse()->getUser()); - for (auto *origEnd : origBorrow->getEndBorrows()) { - auto *endBorrow = SILBuilderWithScope(origEnd).createEndBorrow( - origEnd->getLoc(), newBorrow); - pass.notifyNewInstruction(endBorrow); + SingleValueInstruction *loadedVal = *loadInst; + if (auto qual = loadInst.getOwnershipQualifier()) { + if (*qual == LoadOwnershipQualifier::Copy) { + // Borrow the load-copied subelement, with precisely the same scope as + // the aggregate borrow. + assert(extract->getNumOperands() == 1); + auto *origBorrow = cast(extract->getOperand(0)); + auto *newBorrow = SILBuilderWithScope(origBorrow) + .createBeginBorrow(loadInst->getLoc(), *loadInst); + pass.notifyNewInstruction(newBorrow); + + assert(extract == origBorrow->getSingleNonEndingUse()->getUser()); + for (auto *origEnd : origBorrow->getEndBorrows()) { + auto *endBorrow = SILBuilderWithScope(origEnd).createEndBorrow( + origEnd->getLoc(), newBorrow); + pass.notifyNewInstruction(endBorrow); + } + loadedVal = newBorrow; } - loadedVal = newBorrow; } LLVM_DEBUG(llvm::dbgs() << "Replacing " << *extract << " with " << *loadedVal << "\n"); @@ -156,28 +204,34 @@ static void replaceUsesOfExtract(SingleValueInstruction *extract, // // TODO: Consider handling LoadBorrowInst. static SILBasicBlock::iterator -splitAggregateLoad(LoadInst *loadInst, CanonicalizeInstruction &pass) { +splitAggregateLoad(LoadOperation loadInst, CanonicalizeInstruction &pass) { // Keep track of the next iterator after any newly added or to-be-deleted // instructions. This must be valid regardless of whether the pass immediately // deletes the instructions or simply records them for later deletion. auto nextII = std::next(loadInst->getIterator()); bool needsBorrow; - switch (loadInst->getOwnershipQualifier()) { - case LoadOwnershipQualifier::Unqualified: - case LoadOwnershipQualifier::Trivial: + if (auto qual = loadInst.getOwnershipQualifier()) { + switch (*qual) { + case LoadOwnershipQualifier::Unqualified: + case LoadOwnershipQualifier::Trivial: + needsBorrow = false; + break; + case LoadOwnershipQualifier::Copy: + needsBorrow = true; + break; + case LoadOwnershipQualifier::Take: + // TODO: To handle a "take", we would need to generate additional destroys + // for any fields that aren't already extracted. This would be + // out-of-place for this transform, and I'm not sure if this a case that + // needs to be handled in CanonicalizeInstruction. + return nextII; + } + } else { + // If we don't have a qual, we have a borrow. needsBorrow = false; - break; - case LoadOwnershipQualifier::Copy: - needsBorrow = true; - break; - case LoadOwnershipQualifier::Take: - // TODO: To handle a "take", we would need to generate additional destroys - // for any fields that aren't already extracted. This would be out-of-place - // for this transform, and I'm not sure if this a case that needs to be - // handled in SILGenCleanup. - return nextII; } + struct ProjInstPair { Projection proj; SingleValueInstruction *extract; @@ -191,12 +245,12 @@ splitAggregateLoad(LoadInst *loadInst, CanonicalizeInstruction &pass) { // Add load projections to a projection list. llvm::SmallVector projections; llvm::SmallVector borrows; - llvm::SmallVector destroys; - for (auto *use : getNonDebugUses(loadInst)) { + llvm::SmallVector lifetimeEndingInsts; + for (auto *use : getNonDebugUses(*loadInst)) { auto *user = use->getUser(); if (needsBorrow) { if (auto *destroy = dyn_cast(user)) { - destroys.push_back(destroy); + lifetimeEndingInsts.push_back(destroy); continue; } auto *borrow = dyn_cast(user); @@ -210,7 +264,14 @@ splitAggregateLoad(LoadInst *loadInst, CanonicalizeInstruction &pass) { borrows.push_back(borrow); user = borrowedOper->getUser(); + } else { + if (isa(user) && + !loadInst.getOwnershipQualifier().hasValue()) { + lifetimeEndingInsts.push_back(user); + continue; + } } + // If we have any non SEI, TEI instruction, don't do anything here. if (!isa(user) && !isa(user)) return nextII; @@ -252,7 +313,7 @@ splitAggregateLoad(LoadInst *loadInst, CanonicalizeInstruction &pass) { // Create a new address projection instruction and load instruction for each // unique projection. Projection *lastProj = nullptr; - LoadInst *lastNewLoad = nullptr; + Optional lastNewLoad; for (auto &pair : projections) { auto &proj = pair.proj; auto *extract = pair.extract; @@ -260,7 +321,7 @@ splitAggregateLoad(LoadInst *loadInst, CanonicalizeInstruction &pass) { // If this projection is the same as the last projection we processed, just // replace all uses of the projection with the load we created previously. if (lastProj && proj == *lastProj) { - replaceUsesOfExtract(extract, lastNewLoad, pass); + replaceUsesOfExtract(extract, *lastNewLoad, pass); nextII = killInstruction(extract, nextII, pass); continue; } @@ -268,45 +329,64 @@ splitAggregateLoad(LoadInst *loadInst, CanonicalizeInstruction &pass) { // This is a unique projection. Create the new address projection and load. lastProj = &proj; // Insert new instructions before the original load. - SILBuilderWithScope LoadBuilder(loadInst); + SILBuilderWithScope LoadBuilder(*loadInst); auto *projInst = proj.createAddressProjection(LoadBuilder, loadInst->getLoc(), - loadInst->getOperand()) + loadInst->getOperand(0)) .get(); pass.notifyNewInstruction(projInst); // When loading a trivial subelement, convert ownership. - LoadOwnershipQualifier loadOwnership = loadInst->getOwnershipQualifier(); - if (loadOwnership != LoadOwnershipQualifier::Unqualified - && projInst->getType().isTrivial(*projInst->getFunction())) { - loadOwnership = LoadOwnershipQualifier::Trivial; + Optional loadOwnership = + loadInst.getOwnershipQualifier(); + if (loadOwnership.hasValue()) { + if (*loadOwnership != LoadOwnershipQualifier::Unqualified && + projInst->getType().isTrivial(*projInst->getFunction())) + loadOwnership = LoadOwnershipQualifier::Trivial; + } else { + if (projInst->getType().isTrivial(*projInst->getFunction())) + loadOwnership = LoadOwnershipQualifier::Trivial; } - lastNewLoad = - LoadBuilder.createLoad(loadInst->getLoc(), projInst, loadOwnership); - pass.notifyNewInstruction(lastNewLoad); - - if (loadOwnership == LoadOwnershipQualifier::Copy) { - // Destroy the loaded value wherever the aggregate load was destroyed. - assert(loadInst->getOwnershipQualifier() == LoadOwnershipQualifier::Copy); - for (DestroyValueInst *destroy : destroys) { - SILBuilderWithScope(destroy).createDestroyValue(destroy->getLoc(), - lastNewLoad); - pass.notifyNewInstruction(destroy); + if (loadOwnership) { + lastNewLoad = + LoadBuilder.createLoad(loadInst->getLoc(), projInst, *loadOwnership); + } else { + lastNewLoad = LoadBuilder.createLoadBorrow(loadInst->getLoc(), projInst); + } + pass.notifyNewInstruction(**lastNewLoad); + + if (loadOwnership) { + if (*loadOwnership == LoadOwnershipQualifier::Copy) { + // Destroy the loaded value wherever the aggregate load was destroyed. + assert(loadInst.getOwnershipQualifier() == + LoadOwnershipQualifier::Copy); + for (SILInstruction *destroy : lifetimeEndingInsts) { + auto *newInst = SILBuilderWithScope(destroy).createDestroyValue( + destroy->getLoc(), **lastNewLoad); + pass.notifyNewInstruction(newInst); + } + } + } else { + for (SILInstruction *destroy : lifetimeEndingInsts) { + auto *newInst = SILBuilderWithScope(destroy).createEndBorrow( + destroy->getLoc(), **lastNewLoad); + pass.notifyNewInstruction(newInst); } } - replaceUsesOfExtract(extract, lastNewLoad, pass); + replaceUsesOfExtract(extract, *lastNewLoad, pass); nextII = killInstruction(extract, nextII, pass); } + // Remove the now unused borrows. for (auto *borrow : borrows) nextII = killInstAndIncidentalUses(borrow, nextII, pass); // Erase the old load. - for (auto *destroy : destroys) + for (auto *destroy : lifetimeEndingInsts) nextII = killInstruction(destroy, nextII, pass); - return killInstAndIncidentalUses(loadInst, nextII, pass); + return killInstAndIncidentalUses(*loadInst, nextII, pass); } // Given a store within a single property struct, recursively form the parent @@ -383,6 +463,94 @@ broadenSingleElementStores(StoreInst *storeInst, return killInstruction(storeInst, nextII, pass); } +//===----------------------------------------------------------------------===// +// Simple ARC Peepholes +//===----------------------------------------------------------------------===// + +static SILBasicBlock::iterator +eliminateSimpleCopies(CopyValueInst *cvi, CanonicalizeInstruction &pass) { + auto next = std::next(cvi->getIterator()); + + // Eliminate copies that only have destroy_value uses. + SmallVector destroys; + for (auto *use : getNonDebugUses(cvi)) { + if (auto *dvi = dyn_cast(use->getUser())) { + destroys.push_back(dvi); + continue; + } + return next; + } + + while (!destroys.empty()) { + next = killInstruction(destroys.pop_back_val(), next, pass); + } + + next = killInstAndIncidentalUses(cvi, next, pass); + return next; +} + +static SILBasicBlock::iterator +eliminateSimpleBorrows(BeginBorrowInst *bbi, CanonicalizeInstruction &pass) { + auto next = std::next(bbi->getIterator()); + + // We know that our borrow is completely within the lifetime of its base value + // if the borrow is never reborrowed. We check for reborrows and do not + // optimize such cases. Otherwise, we can eliminate our borrow and instead use + // our operand. + auto base = bbi->getOperand(); + auto baseOwnership = base.getOwnershipKind(); + SmallVector endBorrows; + for (auto *use : getNonDebugUses(bbi)) { + if (auto *ebi = dyn_cast(use->getUser())) { + endBorrows.push_back(ebi); + continue; + } + + // Otherwise, if we have a use that is non-lifetime ending and can accept + // our base ownership, continue. + if (!use->isLifetimeEnding() && use->canAcceptKind(baseOwnership)) + continue; + + return next; + } + + while (!endBorrows.empty()) { + next = killInstruction(endBorrows.pop_back_val(), next, pass); + } + bbi->replaceAllUsesWith(base); + pass.notifyHasNewUsers(base); + return killInstruction(bbi, next, pass); +} + +/// Delete any result having forwarding instruction that only has destroy_value +/// and debug_value uses. +static SILBasicBlock::iterator +eliminateUnneededForwardingUnarySingleValueInst(SingleValueInstruction *inst, + CanonicalizeInstruction &pass) { + auto next = std::next(inst->getIterator()); + + for (auto *use : getNonDebugUses(inst)) + if (!isa(use->getUser())) + return next; + deleteAllDebugUses(inst); + SILValue op = inst->getOperand(0); + inst->replaceAllUsesWith(op); + pass.notifyHasNewUsers(op); + return killInstruction(inst, next, pass); +} + +static Optional +tryEliminateUnneededForwardingInst(SILInstruction *i, + CanonicalizeInstruction &pass) { + assert(isa(i) && + "Must be an ownership forwarding inst"); + if (auto *svi = dyn_cast(i)) + if (svi->getNumOperands() == 1) + return eliminateUnneededForwardingUnarySingleValueInst(svi, pass); + + return None; +} + //===----------------------------------------------------------------------===// // Top-Level Entry Point //===----------------------------------------------------------------------===// @@ -392,11 +560,29 @@ CanonicalizeInstruction::canonicalize(SILInstruction *inst) { if (auto nextII = simplifyAndReplace(inst, *this)) return nextII.getValue(); - if (auto *loadInst = dyn_cast(inst)) - return splitAggregateLoad(loadInst, *this); + if (auto li = LoadOperation(inst)) { + return splitAggregateLoad(li, *this); + } - if (auto *storeInst = dyn_cast(inst)) + if (auto *storeInst = dyn_cast(inst)) { return broadenSingleElementStores(storeInst, *this); + } + + if (auto *cvi = dyn_cast(inst)) + return eliminateSimpleCopies(cvi, *this); + + if (auto *bbi = dyn_cast(inst)) + return eliminateSimpleBorrows(bbi, *this); + + // If we have ownership and are not in raw SIL, eliminate unneeded forwarding + // insts. We don't do this in raw SIL as not to disturb the codegen read by + // diagnostics. + auto *fn = inst->getFunction(); + if (fn->hasOwnership() && fn->getModule().getStage() != SILStage::Raw) { + if (isa(inst)) + if (auto newNext = tryEliminateUnneededForwardingInst(inst, *this)) + return *newNext; + } // Skip ahead. return std::next(inst->getIterator()); diff --git a/lib/SILOptimizer/Utils/Generics.cpp b/lib/SILOptimizer/Utils/Generics.cpp index 0cecae47ce1f5..5dabe344bf763 100644 --- a/lib/SILOptimizer/Utils/Generics.cpp +++ b/lib/SILOptimizer/Utils/Generics.cpp @@ -683,6 +683,9 @@ void ReabstractionInfo::createSubstitutedAndSpecializedTypes() { TrivialArgs.resize(NumArgs); SILFunctionConventions substConv(SubstitutedType, M); + TypeExpansionContext resilienceExp = getResilienceExpansion(); + TypeExpansionContext minimalExp(ResilienceExpansion::Minimal, + TargetModule, isWholeModule); if (SubstitutedType->getNumDirectFormalResults() == 0) { // The original function has no direct result yet. Try to convert the first @@ -693,18 +696,15 @@ void ReabstractionInfo::createSubstitutedAndSpecializedTypes() { for (SILResultInfo RI : SubstitutedType->getIndirectFormalResults()) { assert(RI.isFormalIndirect()); - auto ResultTy = substConv.getSILType(RI, getResilienceExpansion()); - ResultTy = Callee->mapTypeIntoContext(ResultTy); - auto &TL = M.Types.getTypeLowering(ResultTy, - getResilienceExpansion()); - - if (TL.isLoadable() && - !RI.getReturnValueType(M, SubstitutedType, getResilienceExpansion()) - ->isVoid() && - shouldExpand(M, ResultTy)) { + TypeCategory tc = getReturnTypeCategory(RI, substConv, resilienceExp); + if (tc != NotLoadable) { Conversions.set(IdxForResult); - if (TL.isTrivial()) + if (tc == LoadableAndTrivial) TrivialArgs.set(IdxForResult); + if (resilienceExp != minimalExp && + getReturnTypeCategory(RI, substConv, minimalExp) == NotLoadable) { + hasConvertedResilientParams = true; + } break; } ++IdxForResult; @@ -717,21 +717,20 @@ void ReabstractionInfo::createSubstitutedAndSpecializedTypes() { auto IdxToInsert = IdxForParam; ++IdxForParam; - auto ParamTy = substConv.getSILType(PI, getResilienceExpansion()); - ParamTy = Callee->mapTypeIntoContext(ParamTy); - auto &TL = M.Types.getTypeLowering(ParamTy, - getResilienceExpansion()); - - if (!TL.isLoadable()) { + TypeCategory tc = getParamTypeCategory(PI, substConv, resilienceExp); + if (tc == NotLoadable) continue; - } switch (PI.getConvention()) { case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_In_Guaranteed: Conversions.set(IdxToInsert); - if (TL.isTrivial()) + if (tc == LoadableAndTrivial) TrivialArgs.set(IdxToInsert); + if (resilienceExp != minimalExp && + getParamTypeCategory(PI, substConv, minimalExp) == NotLoadable) { + hasConvertedResilientParams = true; + } break; case ParameterConvention::Indirect_In_Constant: case ParameterConvention::Indirect_Inout: @@ -749,6 +748,43 @@ void ReabstractionInfo::createSubstitutedAndSpecializedTypes() { SpecializedType = createSpecializedType(SubstitutedType, M); } +ReabstractionInfo::TypeCategory ReabstractionInfo:: +getReturnTypeCategory(const SILResultInfo &RI, + const SILFunctionConventions &substConv, + TypeExpansionContext typeExpansion) { + auto &M = Callee->getModule(); + auto ResultTy = substConv.getSILType(RI, typeExpansion); + ResultTy = Callee->mapTypeIntoContext(ResultTy); + auto &TL = M.Types.getTypeLowering(ResultTy, typeExpansion); + + if (!TL.isLoadable()) + return NotLoadable; + + if (RI.getReturnValueType(M, SubstitutedType, typeExpansion) + ->isVoid()) + return NotLoadable; + + if (!shouldExpand(M, ResultTy)) + return NotLoadable; + + return TL.isTrivial() ? LoadableAndTrivial : Loadable; +} + +ReabstractionInfo::TypeCategory ReabstractionInfo:: +getParamTypeCategory(const SILParameterInfo &PI, + const SILFunctionConventions &substConv, + TypeExpansionContext typeExpansion) { + auto &M = Callee->getModule(); + auto ParamTy = substConv.getSILType(PI, typeExpansion); + ParamTy = Callee->mapTypeIntoContext(ParamTy); + auto &TL = M.Types.getTypeLowering(ParamTy, typeExpansion); + + if (!TL.isLoadable()) + return NotLoadable; + + return TL.isTrivial() ? LoadableAndTrivial : Loadable; +} + /// Create a new substituted type with the updated signature. CanSILFunctionType ReabstractionInfo::createSubstitutedType(SILFunction *OrigF, @@ -1818,9 +1854,13 @@ GenericFuncSpecializer::GenericFuncSpecializer( ClonedName = Mangler.mangle(); } else { Mangle::GenericSpecializationMangler Mangler( - GenericFunc, ParamSubs, ReInfo.isSerialized(), /*isReAbstracted*/ true, - /*isInlined*/ false, ReInfo.isPrespecialized()); - ClonedName = Mangler.mangle(); + GenericFunc, ReInfo.isSerialized()); + if (ReInfo.isPrespecialized()) { + ClonedName = Mangler.manglePrespecialized(ParamSubs); + } else { + ClonedName = Mangler.mangleReabstracted(ParamSubs, + ReInfo.needAlternativeMangling()); + } } LLVM_DEBUG(llvm::dbgs() << " Specialized function " << ClonedName << '\n'); } @@ -2140,11 +2180,9 @@ class ReabstractionThunkGenerator { SpecializedFunc(SpecializedFunc), ReInfo(ReInfo), OrigPAI(OrigPAI), Loc(RegularLocation::getAutoGeneratedLocation()) { if (!ReInfo.isPartialSpecialization()) { - Mangle::GenericSpecializationMangler Mangler( - OrigF, ReInfo.getCalleeParamSubstitutionMap(), ReInfo.isSerialized(), - /*isReAbstracted*/ false); - - ThunkName = Mangler.mangle(); + Mangle::GenericSpecializationMangler Mangler(OrigF, ReInfo.isSerialized()); + ThunkName = Mangler.mangleNotReabstracted( + ReInfo.getCalleeParamSubstitutionMap()); } else { Mangle::PartialSpecializationMangler Mangler( OrigF, ReInfo.getSpecializedType(), ReInfo.isSerialized(), @@ -2462,14 +2500,14 @@ usePrespecialized(SILOptFunctionBuilder &funcBuilder, ApplySite apply, if (specializedReInfo.getSpecializedType() != reInfo.getSpecializedType()) continue; - Mangle::GenericSpecializationMangler mangler( - refF, reInfo.getCalleeParamSubstitutionMap(), reInfo.isSerialized(), - /*isReAbstracted*/ true, /*isInlined*/ false, - reInfo.isPrespecialized()); + SubstitutionMap subs = reInfo.getCalleeParamSubstitutionMap(); + Mangle::GenericSpecializationMangler mangler(refF, reInfo.isSerialized()); + std::string name = reInfo.isPrespecialized() ? + mangler.manglePrespecialized(subs) : + mangler.mangleReabstracted(subs, reInfo.needAlternativeMangling()); prespecializedReInfo = reInfo; - return lookupOrCreatePrespecialization(funcBuilder, refF, mangler.mangle(), - reInfo); + return lookupOrCreatePrespecialization(funcBuilder, refF, name, reInfo); } return nullptr; } diff --git a/lib/SILOptimizer/Utils/InstOptUtils.cpp b/lib/SILOptimizer/Utils/InstOptUtils.cpp index c998b7a0e8b03..f27e4f22657d9 100644 --- a/lib/SILOptimizer/Utils/InstOptUtils.cpp +++ b/lib/SILOptimizer/Utils/InstOptUtils.cpp @@ -138,6 +138,12 @@ swift::createDecrementBefore(SILValue ptr, SILInstruction *insertPt) { return builder.createReleaseValue(loc, ptr, builder.getDefaultAtomicity()); } +static bool isOSSAEndScopeWithNoneOperand(SILInstruction *i) { + if (!isa(i) && !isa(i)) + return false; + return i->getOperand(0).getOwnershipKind() == OwnershipKind::None; +} + /// Perform a fast local check to see if the instruction is dead. /// /// This routine only examines the state of the instruction at hand. @@ -178,6 +184,15 @@ bool swift::isInstructionTriviallyDead(SILInstruction *inst) { if (isa(inst)) return true; + // An ossa end scope instruction is trivially dead if its operand has + // OwnershipKind::None. This can occur after CFG simplification in the + // presence of non-payloaded or trivial payload cases of non-trivial enums. + // + // Examples of ossa end_scope instructions: end_borrow, destroy_value. + if (inst->getFunction()->hasOwnership() && + isOSSAEndScopeWithNoneOperand(inst)) + return true; + if (!inst->mayHaveSideEffects()) return true; diff --git a/lib/SILOptimizer/Utils/OwnershipOptUtils.cpp b/lib/SILOptimizer/Utils/OwnershipOptUtils.cpp new file mode 100644 index 0000000000000..3208fe70fb9c3 --- /dev/null +++ b/lib/SILOptimizer/Utils/OwnershipOptUtils.cpp @@ -0,0 +1,810 @@ +//===--- OwnershipOptUtils.cpp --------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// +/// Ownership Utilities that rely on SILOptimizer functionality. +/// +//===----------------------------------------------------------------------===// + +#include "swift/SILOptimizer/Utils/OwnershipOptUtils.h" + +#include "swift/Basic/Defer.h" +#include "swift/SIL/BasicBlockUtils.h" +#include "swift/SIL/InstructionUtils.h" +#include "swift/SIL/LinearLifetimeChecker.h" +#include "swift/SIL/OwnershipUtils.h" +#include "swift/SIL/Projection.h" +#include "swift/SIL/SILArgument.h" +#include "swift/SIL/SILBuilder.h" +#include "swift/SIL/SILInstruction.h" +#include "swift/SILOptimizer/Utils/CFGOptUtils.h" +#include "swift/SILOptimizer/Utils/ValueLifetime.h" + +using namespace swift; + +//===----------------------------------------------------------------------===// +// Low Level RAUW Utility +//===----------------------------------------------------------------------===// + +static SILBasicBlock::iterator +replaceAllUsesAndEraseInner(SingleValueInstruction *svi, SILValue newValue, + std::function eraseNotify) { + assert(svi != newValue && "Cannot RAUW a value with itself"); + SILBasicBlock::iterator nextii = std::next(svi->getIterator()); + + // Only SingleValueInstructions are currently simplified. + while (!svi->use_empty()) { + Operand *use = *svi->use_begin(); + SILInstruction *user = use->getUser(); + // Erase the end of scope marker. + if (isEndOfScopeMarker(user)) { + if (&*nextii == user) + ++nextii; + if (eraseNotify) + eraseNotify(user); + else + user->eraseFromParent(); + continue; + } + use->set(newValue); + } + if (eraseNotify) + eraseNotify(svi); + else + svi->eraseFromParent(); + + return nextii; +} + +//===----------------------------------------------------------------------===// +// Ownership Lifetime Extender +//===----------------------------------------------------------------------===// + +namespace { + +struct OwnershipLifetimeExtender { + OwnershipFixupContext &ctx; + + /// Lifetime extend newValue over owned oldValue assuming that our copy will + /// have its lifetime ended by oldValue's lifetime ending uses after RAUWing. + CopyValueInst * + copyAndExtendForLifetimeEndingRAUW(SILValue value, + SILInstruction *consumingPoint); + + CopyValueInst * + copyAndExtendForNonLifetimeEndingRAUW(SILValue value, + ArrayRef range) { + return copyAndExtendForNonLifetimeEndingRAUW>(value, + range); + } + + template + CopyValueInst *copyAndExtendForNonLifetimeEndingRAUW(SILValue value, + const RangeTy &range); + + template + BeginBorrowInst *copyBorrowAndExtendForRAUW(SILValue newValue, + RangeTy useRange); + + /// We are copy/borrowing new value to be over the same lifetime as old + /// value. We know that oldValue is dominated by newValue. + BeginBorrowInst *copyBorrowAndExtendForLifetimeEndingRAUW(SILValue newValue, + SILValue oldValue); +}; + +} // end anonymous namespace + +/// Lifetime extend newValue over owned oldValue assuming that our copy will +/// have its lifetime ended by oldValue's lifetime ending uses after RAUWing. +CopyValueInst *OwnershipLifetimeExtender::copyAndExtendForLifetimeEndingRAUW( + SILValue value, SILInstruction *consumingPoint) { + auto *newValInsertPt = value->getDefiningInsertionPoint(); + assert(newValInsertPt); + CopyValueInst *copy; + if (!isa(value)) { + SILBuilderWithScope::insertAfter(newValInsertPt, [&](SILBuilder &builder) { + copy = builder.createCopyValue(builder.getInsertionPointLoc(), value); + }); + } else { + SILBuilderWithScope builder(newValInsertPt); + copy = builder.createCopyValue(newValInsertPt->getLoc(), value); + } + if (ctx.newInstNotify) + ctx.newInstNotify(copy); + + auto *result = copy; + ctx.jointPostDomSetComputer.findJointPostDominatingSet( + newValInsertPt->getParent(), consumingPoint->getParent(), + // inputBlocksFoundDuringWalk. + [&](SILBasicBlock *loopBlock) { + // This must be consumingPoint->getParent() since we only have one + // consuming use. In this case, we know that this is the consuming + // point where we will need a control equivalent copy_value (and that + // destroy_value will be put for the out of loop value as appropriate. + assert(loopBlock == consumingPoint->getParent()); + auto front = loopBlock->begin(); + SILBuilderWithScope newBuilder(front); + result = newBuilder.createCopyValue(front->getLoc(), copy); + if (ctx.newInstNotify) + ctx.newInstNotify(result); + + llvm_unreachable("Should never visit this!"); + }, + // Input blocks in joint post dom set. We don't care about thse. + [&](SILBasicBlock *postDomBlock) { + auto front = postDomBlock->begin(); + SILBuilderWithScope newBuilder(front); + auto *dvi = newBuilder.createDestroyValue(front->getLoc(), copy); + if (ctx.newInstNotify) + ctx.newInstNotify(dvi); + }); + return result; +} + +template +CopyValueInst *OwnershipLifetimeExtender::copyAndExtendForNonLifetimeEndingRAUW( + SILValue value, const RangeTy &range) { + auto *newValInsertPt = value->getDefiningInsertionPoint(); + assert(newValInsertPt); + + CopyValueInst *copy; + + if (!isa(value)) { + SILBuilderWithScope::insertAfter(newValInsertPt, [&](SILBuilder &builder) { + copy = builder.createCopyValue(builder.getInsertionPointLoc(), value); + }); + } else { + SILBuilderWithScope builder(newValInsertPt); + copy = builder.createCopyValue(newValInsertPt->getLoc(), value); + } + if (ctx.newInstNotify) + ctx.newInstNotify(copy); + + auto opRange = makeUserRange(range); + ValueLifetimeAnalysis lifetimeAnalysis(copy, opRange); + ValueLifetimeAnalysis::Frontier frontier; + bool result = lifetimeAnalysis.computeFrontier( + frontier, ValueLifetimeAnalysis::DontModifyCFG, &ctx.deBlocks); + assert(result); + + while (!frontier.empty()) { + auto *insertPt = frontier.pop_back_val(); + SILBuilderWithScope frontierBuilder(insertPt); + auto *dvi = frontierBuilder.createDestroyValue(insertPt->getLoc(), copy); + if (ctx.newInstNotify) + ctx.newInstNotify(dvi); + } + + return copy; +} + +template +BeginBorrowInst * +OwnershipLifetimeExtender::copyBorrowAndExtendForRAUW(SILValue newValue, + RangeTy useRange) { + auto *newValInsertPt = newValue->getDefiningInsertionPoint(); + assert(newValInsertPt); + + CopyValueInst *copy = nullptr; + BeginBorrowInst *borrow = nullptr; + if (!isa(newValue)) { + SILBuilderWithScope::insertAfter(newValInsertPt, [&](SILBuilder &builder) { + auto loc = builder.getInsertionPointLoc(); + copy = builder.createCopyValue(loc, newValue); + borrow = builder.createBeginBorrow(loc, copy); + }); + } else { + SILBuilderWithScope builder(newValInsertPt); + auto loc = newValInsertPt->getLoc(); + copy = builder.createCopyValue(loc, newValue); + borrow = builder.createBeginBorrow(loc, copy); + } + assert(copy && borrow); + + auto opRange = makeUserRange(useRange); + ValueLifetimeAnalysis lifetimeAnalysis(copy, opRange); + ValueLifetimeAnalysis::Frontier frontier; + bool result = lifetimeAnalysis.computeFrontier( + frontier, ValueLifetimeAnalysis::DontModifyCFG, &ctx.deBlocks); + assert(result); + + while (!frontier.empty()) { + auto *insertPt = frontier.pop_back_val(); + SILBuilderWithScope frontierBuilder(insertPt); + auto *ebi = frontierBuilder.createEndBorrow(insertPt->getLoc(), borrow); + auto *dvi = frontierBuilder.createDestroyValue(insertPt->getLoc(), copy); + if (ctx.newInstNotify) { + ctx.newInstNotify(ebi); + ctx.newInstNotify(dvi); + } + } + + return borrow; +} + +/// We are copy/borrowing new value to be over the same lifetime as old +/// value. We know that oldValue is dominated by newValue. +BeginBorrowInst * +OwnershipLifetimeExtender::copyBorrowAndExtendForLifetimeEndingRAUW( + SILValue newValue, SILValue oldValue) { + auto *newValInsertPt = newValue->getDefiningInsertionPoint(); + assert(newValInsertPt); + + CopyValueInst *copy = nullptr; + if (!isa(newValue)) { + SILBuilderWithScope::insertAfter(newValInsertPt, [&](SILBuilder &builder) { + auto loc = builder.getInsertionPointLoc(); + copy = builder.createCopyValue(loc, newValue); + }); + } else { + SILBuilderWithScope builder(newValInsertPt); + auto loc = newValInsertPt->getLoc(); + copy = builder.createCopyValue(loc, newValue); + } + + // Then insert the begin_borrow at the old value point. We are going to RAUW + // this in our caller. + auto *oldValInsertPt = oldValue->getDefiningInsertionPoint(); + assert(oldValInsertPt); + auto *borrow = SILBuilderWithScope(oldValInsertPt) + .createBeginBorrow(oldValInsertPt->getLoc(), copy); + if (ctx.newInstNotify) { + ctx.newInstNotify(borrow); + } + + ValueLifetimeAnalysis lifetimeAnalysis(copy, oldValue->getUses()); + decltype(lifetimeAnalysis)::Frontier frontier; + bool result = lifetimeAnalysis.computeFrontier( + frontier, ValueLifetimeAnalysis::DontModifyCFG, &ctx.deBlocks); + assert(result); + + while (!frontier.empty()) { + auto *insertPt = frontier.pop_back_val(); + SILBuilderWithScope frontierBuilder(insertPt); + auto *dvi = frontierBuilder.createDestroyValue(insertPt->getLoc(), copy); + if (ctx.newInstNotify) { + ctx.newInstNotify(dvi); + } + } + + return borrow; +} + +//===----------------------------------------------------------------------===// +// Ownership Fixup RAUW +//===----------------------------------------------------------------------===// + +/// Given an old value and a new value, lifetime extend new value as appropriate +/// so we can RAUW new value with old value and preserve ownership +/// invariants. We leave fixing up the lifetime of old value to our caller. +namespace { + +struct OwnershipRAUWUtility { + SingleValueInstruction *oldValue; + SILValue newValue; + OwnershipFixupContext &ctx; + + void rewriteReborrows(SILValue borrow, ArrayRef reborrows); + void eliminateReborrowsOfRecursiveBorrows( + ArrayRef transitiveReborrows, + SmallVectorImpl &usePoints); + + SILBasicBlock::iterator handleUnowned(); + + SILBasicBlock::iterator handleGuaranteed(); + + SILBasicBlock::iterator perform(); + + /// Insert copies/borrows as appropriate to eliminate any reborrows of + /// borrowed value, given we are going to replace it with newValue. + void eliminateReborrows(BorrowedValue oldBorrowedValue, SILValue newValue); + + OwnershipLifetimeExtender getLifetimeExtender() { return {ctx}; } + + InstModCallbacks getCallbacks() const { + return InstModCallbacks( + ctx.eraseNotify, ctx.newInstNotify, + [](SILValue, SILValue) { llvm_unreachable("unhandled"); }, + [](SingleValueInstruction *, SILValue) { + llvm_unreachable("unhandled"); + }); + } +}; + +} // anonymous namespace + +static void cleanupOperandsBeforeDeletion( + SILInstruction *oldValue, + std::function newNotifyInst) { + SILBuilderWithScope builder(oldValue); + for (auto &op : oldValue->getAllOperands()) { + if (!op.isLifetimeEnding()) { + continue; + } + + switch (op.get().getOwnershipKind()) { + case OwnershipKind::Any: + llvm_unreachable("Invalid ownership for value"); + case OwnershipKind::Owned: { + auto *dvi = builder.createDestroyValue(oldValue->getLoc(), op.get()); + if (newNotifyInst) + newNotifyInst(dvi); + continue; + } + case OwnershipKind::Guaranteed: { + // Should only happen once we model destructures as true reborrows. + auto *ebi = builder.createEndBorrow(oldValue->getLoc(), op.get()); + if (newNotifyInst) + newNotifyInst(ebi); + continue; + } + case OwnershipKind::None: + continue; + case OwnershipKind::Unowned: + llvm_unreachable("Unowned object can never be consumed?!"); + } + llvm_unreachable("Covered switch isn't covered"); + } +} + +static SILPhiArgument * +insertOwnedBaseValueAlongBranchEdge(BranchInst *bi, SILValue innerCopy, + const InstModCallbacks &callbacks) { + auto *destBB = bi->getDestBB(); + // We need to create the phi argument before calling addNewEdgeValueToBranch + // since it checks that the destination block has enough arguments for the + // argument. + auto *phiArg = + destBB->createPhiArgument(innerCopy->getType(), OwnershipKind::Owned); + addNewEdgeValueToBranch(bi, destBB, innerCopy, callbacks); + + // Grab our predecessor blocks, ignoring us, add to the branch edge an + // undef corresponding to our value. + // + // We gather all predecessor blocks in a separate array to avoid + // iterator invalidation issues as we mess with terminators. + SmallVector predecessorBlocks( + destBB->getPredecessorBlocks()); + + for (auto *predBlock : predecessorBlocks) { + if (predBlock == innerCopy->getParentBlock()) + continue; + addNewEdgeValueToBranch( + predBlock->getTerminator(), destBB, + SILUndef::get(innerCopy->getType(), *destBB->getParent()), callbacks); + } + + return phiArg; +} + +static void getAllBorrowedValueUsePoints( + SILValue value, SmallVectorImpl &usePoints, + SmallVectorImpl &reborrowPoints) { + assert(value.getOwnershipKind() == OwnershipKind::Guaranteed); + + unsigned firstOffset = usePoints.size(); + llvm::copy(value->getUses(), std::back_inserter(usePoints)); + + if (usePoints.size() == firstOffset) + return; + + // NOTE: Use points resizes in this loop so usePoints.size() may be + // different every time. + for (unsigned i = firstOffset; i < usePoints.size(); ++i) { + if (auto fOperand = ForwardingOperand::get(usePoints[i])) { + fOperand->visitForwardedValues([&](SILValue transitiveValue) { + for (auto *transitiveUse : transitiveValue->getUses()) + usePoints.push_back(transitiveUse); + return true; + }); + continue; + } + + if (auto borrowingOp = BorrowingOperand::get(usePoints[i])) { + // If we have a reborrow, we have no further work to do, our reborrow is + // already a use and we will handle the reborrow separately. + if (borrowingOp->isReborrow()) + continue; + + // Otherwise, try to grab additional end scope instructions to find more + // liveness info. Stash any reborrow uses so that we can eliminate the + // reborrow before we are done processing. + borrowingOp->visitLocalEndScopeUses([&](Operand *scopeEndingUse) { + if (auto scopeEndingBorrowingOp = + BorrowingOperand::get(scopeEndingUse)) { + if (scopeEndingBorrowingOp->isReborrow()) { + reborrowPoints.push_back(scopeEndingUse); + return true; + } + } + usePoints.push_back(scopeEndingUse); + return true; + }); + + // Now break up all of the reborrows + continue; + } + + // If our base guaranteed value does not have any consuming uses (consider + // function arguments), we need to be sure to include interior pointer + // operands since we may not get a use from a end_scope instruction. + if (auto intPtrOperand = InteriorPointerOperand::get(usePoints[i])) { + intPtrOperand->getImplicitUses(usePoints); + continue; + } + } +} + +void OwnershipRAUWUtility::eliminateReborrowsOfRecursiveBorrows( + ArrayRef transitiveReborrows, + SmallVectorImpl &usePoints) { + SmallVector, 8> + baseBorrowedValuePair; + + // Ok, we have transitive reborrows. + for (auto borrowingOperand : transitiveReborrows) { + // We eliminate the reborrow by creating a new copy+borrow at the reborrow + // edge from the base value and using that for the reborrow instead of the + // actual value. We of course insert an end_borrow for our original incoming + // value. + SILValue value = borrowingOperand->get(); + auto *bi = cast(borrowingOperand->getUser()); + SILBuilderWithScope reborrowBuilder(bi); + auto *innerCopy = reborrowBuilder.createCopyValue(bi->getLoc(), value); + auto *innerBorrow = + reborrowBuilder.createBeginBorrow(bi->getLoc(), innerCopy); + auto *outerEndBorrow = reborrowBuilder.createEndBorrow(bi->getLoc(), value); + if (ctx.newInstNotify) { + ctx.newInstNotify(innerCopy); + ctx.newInstNotify(innerBorrow); + ctx.newInstNotify(outerEndBorrow); + } + + // Then set our borrowing operand to take our innerBorrow instead of value + // (whose lifetime we just ended). + borrowingOperand->set(innerBorrow); + // Add our outer end borrow as a use point to make sure that we extend our + // base value to this point. + usePoints.push_back(&outerEndBorrow->getAllOperands()[0]); + + // Then check if in our destination block, we have further reborrows. If we + // do, we need to recursively process them. + auto *borrowedArg = + const_cast(bi->getArgForOperand(borrowingOperand)); + auto *baseArg = + insertOwnedBaseValueAlongBranchEdge(bi, innerCopy, getCallbacks()); + baseBorrowedValuePair.emplace_back(baseArg, borrowedArg); + } + + // Now recursively update all further reborrows... + while (!baseBorrowedValuePair.empty()) { + SILPhiArgument *baseArg; + SILPhiArgument *borrowedArg; + std::tie(baseArg, borrowedArg) = baseBorrowedValuePair.pop_back_val(); + + for (auto *use : borrowedArg->getConsumingUses()) { + // If our consuming use is an end of scope marker, we need to end + // the lifetime of our base arg. + if (isEndOfScopeMarker(use->getUser())) { + SILBuilderWithScope::insertAfter(use->getUser(), [&](SILBuilder &b) { + auto *dvi = b.createDestroyValue(b.getInsertionPointLoc(), baseArg); + if (ctx.newInstNotify) + ctx.newInstNotify(dvi); + }); + continue; + } + + // Otherwise, we have a reborrow. For now our reborrows must be + // phis. Add our owned value as a new argument of that phi along our + // edge and undef along all other edges. + auto borrowingOp = *BorrowingOperand::get(use); + auto *brInst = cast(borrowingOp.op->getUser()); + auto *newBorrowedPhi = brInst->getArgForOperand(borrowingOp); + auto *newBasePhi = + insertOwnedBaseValueAlongBranchEdge(brInst, baseArg, getCallbacks()); + baseBorrowedValuePair.emplace_back(newBasePhi, newBorrowedPhi); + } + } +} + +void OwnershipRAUWUtility::rewriteReborrows( + SILValue newBorrowedValue, ArrayRef foundReborrows) { + // Each initial reborrow that we have is a use of oldValue, so we know + // that copy should be valid at the reborrow. + SmallVector, 8> + baseBorrowedValuePair; + for (auto reborrow : foundReborrows) { + auto *bi = cast(reborrow.op->getUser()); + SILBuilderWithScope reborrowBuilder(bi); + auto *innerCopy = + reborrowBuilder.createCopyValue(bi->getLoc(), newBorrowedValue); + auto *innerBorrow = + reborrowBuilder.createBeginBorrow(bi->getLoc(), innerCopy); + auto *outerEndBorrow = + reborrowBuilder.createEndBorrow(bi->getLoc(), reborrow.op->get()); + if (ctx.newInstNotify) { + ctx.newInstNotify(innerCopy); + ctx.newInstNotify(innerBorrow); + ctx.newInstNotify(outerEndBorrow); + } + + reborrow->set(innerBorrow); + + auto *borrowedArg = + const_cast(bi->getArgForOperand(reborrow.op)); + auto *baseArg = + insertOwnedBaseValueAlongBranchEdge(bi, innerCopy, getCallbacks()); + baseBorrowedValuePair.emplace_back(baseArg, borrowedArg); + } + + // Now, follow through all chains of reborrows. + while (!baseBorrowedValuePair.empty()) { + SILPhiArgument *baseArg; + SILPhiArgument *borrowedArg; + std::tie(baseArg, borrowedArg) = baseBorrowedValuePair.pop_back_val(); + + for (auto *use : borrowedArg->getConsumingUses()) { + // If our consuming use is an end of scope marker, we need to end + // the lifetime of our base arg. + if (isEndOfScopeMarker(use->getUser())) { + SILBuilderWithScope::insertAfter(use->getUser(), [&](SILBuilder &b) { + auto *dvi = b.createDestroyValue(b.getInsertionPointLoc(), baseArg); + if (ctx.newInstNotify) + ctx.newInstNotify(dvi); + }); + continue; + } + + // Otherwise, we have a reborrow. For now our reborrows must be + // phis. Add our owned value as a new argument of that phi along our + // edge and undef along all other edges. + auto borrowingOp = *BorrowingOperand::get(use); + auto *brInst = cast(borrowingOp.op->getUser()); + auto *newBorrowedPhi = brInst->getArgForOperand(borrowingOp); + auto *newBasePhi = + insertOwnedBaseValueAlongBranchEdge(brInst, baseArg, getCallbacks()); + baseBorrowedValuePair.emplace_back(newBasePhi, newBorrowedPhi); + } + } +} + +SILBasicBlock::iterator OwnershipRAUWUtility::handleUnowned() { + switch (newValue.getOwnershipKind()) { + case OwnershipKind::None: + llvm_unreachable("Should have been handled elsewhere"); + case OwnershipKind::Any: + llvm_unreachable("Invalid for values"); + case OwnershipKind::Unowned: + // An unowned value can always be RAUWed with another unowned value. + return replaceAllUsesAndEraseInner(oldValue, newValue, ctx.eraseNotify); + case OwnershipKind::Guaranteed: { + // If we have an unowned value that we want to replace with a guaranteed + // value, we need to ensure that the guaranteed value is live at all use + // points of the unowned value. If so, just replace and continue. + // + // TODO: Implement this. + + // Otherwise, we need to lifetime extend the borrow over all of the use + // points. To do so, we copy the value, borrow it, insert an unchecked + // ownership conversion to unowned at oldValue and then RAUW. + // + // We need to insert the conversion to ensure that we do not violate + // ownership propagation rules of forwarding insts. + SmallVector oldValueUses(oldValue->getUses()); + for (auto *use : oldValueUses) { + if (auto *ti = dyn_cast(use->getUser())) { + if (ti->isFunctionExiting()) { + SILBuilderWithScope builder(ti); + auto *newInst = builder.createUncheckedOwnershipConversion( + ti->getLoc(), use->get(), OwnershipKind::Unowned); + if (ctx.newInstNotify) + ctx.newInstNotify(newInst); + use->set(newInst); + } + } + } + auto extender = getLifetimeExtender(); + SILValue borrow = + extender.copyBorrowAndExtendForRAUW(newValue, oldValue->getUses()); + SILBuilderWithScope builder(oldValue); + auto *newInst = builder.createUncheckedOwnershipConversion( + oldValue->getLoc(), borrow, OwnershipKind::Unowned); + if (ctx.newInstNotify) + ctx.newInstNotify(newInst); + return replaceAllUsesAndEraseInner(oldValue, newInst, ctx.eraseNotify); + } + case OwnershipKind::Owned: { + // If we have an unowned value that we want to replace with an owned value, + // we first check if the owned value is live over all use points of the old + // value. If so, just RAUW and continue. + // + // TODO: Implement this. + + // Otherwise, insert a copy of the owned value and lifetime extend that over + // all uses of the value and then RAUW. + SmallVector oldValueUses(oldValue->getUses()); + for (auto *use : oldValueUses) { + if (auto *ti = dyn_cast(use->getUser())) { + if (ti->isFunctionExiting()) { + SILBuilderWithScope builder(ti); + // We insert this to ensure that we can extend our owned value's + // lifetime to before the function end point. + auto *newInst = builder.createUncheckedOwnershipConversion( + ti->getLoc(), use->get(), OwnershipKind::Unowned); + if (ctx.newInstNotify) + ctx.newInstNotify(newInst); + use->set(newInst); + } + } + } + auto extender = getLifetimeExtender(); + SILValue copy = extender.copyAndExtendForNonLifetimeEndingRAUW( + newValue, oldValue->getUses()); + SILBuilderWithScope builder(oldValue); + auto *newInst = builder.createUncheckedOwnershipConversion( + oldValue->getLoc(), copy, OwnershipKind::Unowned); + if (ctx.newInstNotify) + ctx.newInstNotify(newInst); + auto result = + replaceAllUsesAndEraseInner(oldValue, newInst, ctx.eraseNotify); + return result; + } + } + llvm_unreachable("covered switch isn't covered?!"); +} + +SILBasicBlock::iterator OwnershipRAUWUtility::handleGuaranteed() { + // If we want to replace a guaranteed value with a value of some other + // ownership whose def dominates our guaranteed value. We first see if all + // uses of the old guaranteed value are within the lifetime of the new + // guaranteed value. If so, we can just RAUW and move on. + // + // TODO: Implement this. + // + // Otherwise, we need to actually modify the IR. We first always first + // lifetime extend newValue to oldValue's transitive uses to set our + // workspace. + SmallVector usePoints; + SmallVector recursiveBorrowScopeReborrows; + getAllBorrowedValueUsePoints(oldValue, usePoints, + recursiveBorrowScopeReborrows); + + // If we have any transitive reborrows on sub-borrows. + if (recursiveBorrowScopeReborrows.size()) + eliminateReborrowsOfRecursiveBorrows(recursiveBorrowScopeReborrows, + usePoints); + + auto extender = getLifetimeExtender(); + SILValue newBorrowedValue = + extender.copyBorrowAndExtendForRAUW>(newValue, + usePoints); + + // Now we need to handle reborrows by eliminating the reborrows from any + // borrowing operands that use old value as well as from oldvalue itself. We + // take advantage of a few properties of reborrows: + // + // 1. A reborrow has to be on a BorrowedValue. This ensures that the same + // base value is propagated through chains of reborrows. (In the future + // this may not be true when destructures are introduced as reborrow + // instructions). + // + // 2. Given that, we change each reborrows into new copy+borrow from the + // owned value that we perform at the reborrow use. What is nice about + // this formulation is that it ensures that we are always working with a + // non-dominating copy value, allowing us to force our borrowing value to + // need a base phi argument (the one of our choosing). + if (auto oldValueBorrowedVal = BorrowedValue::get(oldValue)) { + SmallVector foundReborrows; + if (oldValueBorrowedVal->gatherReborrows(foundReborrows)) { + rewriteReborrows(newBorrowedValue, foundReborrows); + } + } + + // Then we need to look and see if our oldValue had any transitive uses that + + // Ok, we now have eliminated any reborrows if we had any. That means that + // the uses of oldValue should be completely within the lifetime of our new + // borrow. + return replaceAllUsesAndEraseInner(oldValue, newBorrowedValue, + ctx.eraseNotify); +} + +SILBasicBlock::iterator OwnershipRAUWUtility::perform() { + assert(oldValue->getFunction()->hasOwnership()); + assert( + OwnershipFixupContext::canFixUpOwnershipForRAUW(oldValue, newValue) && + "Should have checked if can perform this operation before calling it?!"); + // If our new value is just none, we can pass anything to do it so just RAUW + // and return. + // + // NOTE: This handles RAUWing with undef. + if (newValue.getOwnershipKind() == OwnershipKind::None) + return replaceAllUsesAndEraseInner(oldValue, newValue, ctx.eraseNotify); + assert(SILValue(oldValue).getOwnershipKind() != OwnershipKind::None); + + switch (SILValue(oldValue).getOwnershipKind()) { + case OwnershipKind::None: + // If our old value was none and our new value is not, we need to do + // something more complex that we do not support yet, so bail. We should + // have not called this function in such a case. + llvm_unreachable("Should have been handled elsewhere"); + case OwnershipKind::Any: + llvm_unreachable("Invalid for values"); + case OwnershipKind::Guaranteed: { + return handleGuaranteed(); + } + case OwnershipKind::Owned: { + // If we have an owned value that we want to replace with a value with any + // other non-None ownership, we need to copy the other value for a + // lifetimeEnding RAUW, then RAUW the value, and insert a destroy_value on + // the original value. + auto extender = getLifetimeExtender(); + SILValue copy = + extender.copyAndExtendForLifetimeEndingRAUW(newValue, oldValue); + cleanupOperandsBeforeDeletion(oldValue, ctx.newInstNotify); + auto result = replaceAllUsesAndEraseInner(oldValue, copy, ctx.eraseNotify); + return result; + } + case OwnershipKind::Unowned: { + return handleUnowned(); + } + } + llvm_unreachable("Covered switch isn't covered?!"); +} + +//===----------------------------------------------------------------------===// +// Ownership Fixup Context +//===----------------------------------------------------------------------===// +// +// Top level entry points to RAUW code. +// +bool OwnershipFixupContext::canFixUpOwnershipForRAUW( + SingleValueInstruction *oldValue, SILValue newValue) { + auto newOwnershipKind = newValue.getOwnershipKind(); + + // If our new kind is ValueOwnershipKind::None, then we are fine. We + // trivially support that. This check also ensures that we can always + // replace any value with a ValueOwnershipKind::None value. + if (newOwnershipKind == OwnershipKind::None) + return true; + + // If we are in Raw SIL, just bail at this point. We do not support + // ownership fixups. + if (oldValue->getModule().getStage() == SILStage::Raw) + return false; + + // If our old ownership kind is ValueOwnershipKind::None and our new kind is + // not, we may need to do more work that has not been implemented yet. So + // bail. + // + // Due to our requirement that types line up, this can only occur given a + // non-trivial typed value with None ownership. This can only happen when + // oldValue is a trivial payloaded or no-payload non-trivially typed + // enum. That doesn't occur that often so we just bail on it today until we + // implement this functionality. + auto oldOwnershipKind = SILValue(oldValue).getOwnershipKind(); + if (oldOwnershipKind != OwnershipKind::None) + return true; + + // Ok, we have an old ownership kind that is OwnershipKind::None and a new + // ownership kind that is not OwnershipKind::None. In that case, for now, do + // not perform this transform. + return false; +} + +SILBasicBlock::iterator +OwnershipFixupContext::replaceAllUsesAndEraseFixingOwnership( + SingleValueInstruction *oldValue, SILValue newValue) { + OwnershipRAUWUtility utility{oldValue, newValue, *this}; + return utility.perform(); +} diff --git a/lib/Sema/BuilderTransform.cpp b/lib/Sema/BuilderTransform.cpp index fa5770d3bf869..f833a705e452d 100644 --- a/lib/Sema/BuilderTransform.cpp +++ b/lib/Sema/BuilderTransform.cpp @@ -1554,7 +1554,7 @@ Optional TypeChecker::applyResultBuilderBodyTransform( ctx.Diags.diagnose( returnStmts.front()->getReturnLoc(), - diag::result_builder_disabled_by_return, builderType); + diag::result_builder_disabled_by_return_warn, builderType); // Note that one can remove the result builder attribute. auto attr = func->getAttachedResultBuilder(); @@ -1671,7 +1671,7 @@ ConstraintSystem::matchResultBuilder( if (InvalidResultBuilderBodies.count(fn)) { (void)recordFix( IgnoreInvalidResultBuilderBody::duringConstraintGeneration( - *this, getConstraintLocator(fn.getBody()))); + *this, getConstraintLocator(fn.getAbstractClosureExpr()))); return getTypeMatchSuccess(); } @@ -1690,13 +1690,25 @@ ConstraintSystem::matchResultBuilder( return getTypeMatchFailure(locator); if (recordFix(IgnoreInvalidResultBuilderBody::duringPreCheck( - *this, getConstraintLocator(fn.getBody())))) + *this, getConstraintLocator(fn.getAbstractClosureExpr())))) return getTypeMatchFailure(locator); return getTypeMatchSuccess(); } case ResultBuilderBodyPreCheck::HasReturnStmt: + // Diagnostic mode means that solver couldn't reach any viable + // solution, so let's diagnose presence of a `return` statement + // in the closure body. + if (shouldAttemptFixes()) { + if (recordFix(IgnoreResultBuilderWithReturnStmts::create( + *this, builderType, + getConstraintLocator(fn.getAbstractClosureExpr())))) + return getTypeMatchFailure(locator); + + return getTypeMatchSuccess(); + } + // If the body has a return statement, suppress the transform but // continue solving the constraint system. return None; @@ -1744,7 +1756,7 @@ ConstraintSystem::matchResultBuilder( if (recordFix( IgnoreInvalidResultBuilderBody::duringConstraintGeneration( - *this, getConstraintLocator(fn.getBody())))) + *this, getConstraintLocator(fn.getAbstractClosureExpr())))) return getTypeMatchFailure(locator); return getTypeMatchSuccess(); diff --git a/lib/Sema/CSApply.cpp b/lib/Sema/CSApply.cpp index d1bc104a0c7eb..6df4090fe2c91 100644 --- a/lib/Sema/CSApply.cpp +++ b/lib/Sema/CSApply.cpp @@ -115,10 +115,12 @@ static ConcreteDeclRef generateDeclRefForSpecializedCXXFunctionTemplate( auto newFnType = oldFnType->substGenericArgs(subst); // The constructor type is a function type as follows: // (CType.Type) -> (Generic) -> CType - // But we only want the result of that function type because that is the - // function type with the generic params that need to be substituted: + // And a method's function type is as follows: + // (inout CType) -> (Generic) -> Void + // In either case, we only want the result of that function type because that + // is the function type with the generic params that need to be substituted: // (Generic) -> CType - if (isa(oldDecl)) + if (isa(oldDecl) || oldDecl->isInstanceMember()) newFnType = cast(newFnType->getResult().getPointer()); SmallVector newParams; unsigned i = 0; @@ -159,6 +161,7 @@ static ConcreteDeclRef generateDeclRefForSpecializedCXXFunctionTemplate( /*Async=*/false, oldDecl->hasThrows(), newParamList, newFnType->getResult(), /*GenericParams=*/nullptr, oldDecl->getDeclContext(), specialized); + newFnDecl->setSelfAccessKind(cast(oldDecl)->getSelfAccessKind()); return ConcreteDeclRef(newFnDecl); } @@ -2859,105 +2862,9 @@ namespace { if (!result) return nullptr; - // Check for ambiguous member if the base is an Optional - if (baseTy->getOptionalObjectType()) { - diagnoseAmbiguousNominalMember(baseTy, result); - } - return coerceToType(result, resultTy, cs.getConstraintLocator(expr)); } - - /// Diagnose if the base type is optional, we're referring to a nominal - /// type member via the dot syntax and the member name matches - /// Optional.{member name} - void diagnoseAmbiguousNominalMember(Type baseTy, Expr *result) { - if (auto baseTyUnwrapped = baseTy->lookThroughAllOptionalTypes()) { - // Return if the base type doesn't have a nominal type decl - if (!baseTyUnwrapped->getNominalOrBoundGenericNominal()) { - return; - } - - // Otherwise, continue digging - if (auto DSCE = dyn_cast(result)) { - auto calledValue = DSCE->getCalledValue(); - auto isOptional = false; - Identifier memberName; - - // Try cast the assigned value to an enum case - // - // This will always succeed if the base is Optional & the - // assigned case comes from Optional - if (auto EED = dyn_cast(calledValue)) { - isOptional = EED->getParentEnum()->isOptionalDecl(); - memberName = EED->getBaseIdentifier(); - } - - // Return if the enum case doesn't come from Optional - if (!isOptional) { - return; - } - - // Look up the enum case in the unwrapped type to check if it exists - // as a member - auto baseTyNominalDecl = baseTyUnwrapped - ->getNominalOrBoundGenericNominal(); - auto results = TypeChecker::lookupMember( - baseTyNominalDecl->getModuleContext(), baseTyUnwrapped, - DeclNameRef(memberName), defaultMemberLookupOptions); - - // Filter out any functions, instance members, enum cases with - // associated values or variables whose type does not match the - // contextual type. - results.filter([&](const LookupResultEntry entry, bool isOuter) { - if (auto member = entry.getValueDecl()) { - if (isa(member)) - return false; - if (member->isInstanceMember()) - return false; - if (auto EED = dyn_cast(member)) { - return !EED->hasAssociatedValues(); - } - if (auto VD = dyn_cast(member)) { - auto baseType = DSCE->getType()->lookThroughAllOptionalTypes(); - return VD->getInterfaceType()->isEqual(baseType); - } - } - - // Filter out anything that's not one of the above. We don't care - // if we have a typealias named 'none' or a struct/class named - // 'none'. - return false; - }); - - if (results.empty()) { - return; - } - auto &de = cs.getASTContext().Diags; - if (auto member = results.front().getValueDecl()) { - // Emit a diagnostic with some fix-its - auto baseTyName = baseTy->getCanonicalType().getString(); - auto baseTyUnwrappedName = baseTyUnwrapped->getString(); - auto loc = DSCE->getLoc(); - auto startLoc = DSCE->getStartLoc(); - de.diagnoseWithNotes( - de.diagnose(loc, swift::diag::optional_ambiguous_case_ref, - baseTyName, baseTyUnwrappedName, memberName.str()), - [&]() { - de.diagnose(loc, - swift::diag::optional_fixit_ambiguous_case_ref) - .fixItInsert(startLoc, "Optional"); - de.diagnose( - loc, - swift::diag::type_fixit_optional_ambiguous_case_ref, - baseTyUnwrappedName, memberName.str()) - .fixItInsert(startLoc, baseTyUnwrappedName); - }); - } - } - } - } - private: /// A list of "suspicious" optional injections that come from /// forced downcasts. diff --git a/lib/Sema/CSBindings.cpp b/lib/Sema/CSBindings.cpp index 61dc593265c6e..fa5f99ff77929 100644 --- a/lib/Sema/CSBindings.cpp +++ b/lib/Sema/CSBindings.cpp @@ -22,6 +22,52 @@ using namespace swift; using namespace constraints; +bool ConstraintSystem::PotentialBinding::isViableForJoin() const { + return Kind == AllowedBindingKind::Supertypes && + !BindingType->hasLValueType() && + !BindingType->hasUnresolvedType() && + !BindingType->hasTypeVariable() && + !BindingType->hasHole() && + !BindingType->hasUnboundGenericType() && + !hasDefaultedLiteralProtocol() && + !isDefaultableBinding(); +} + +bool ConstraintSystem::PotentialBindings::isDelayed() const { + if (!DelayedBy.empty()) + return true; + + if (isHole()) { + auto *locator = TypeVar->getImpl().getLocator(); + assert(locator && "a hole without locator?"); + + // Delay resolution of the code completion expression until + // the very end to give it a chance to be bound to some + // contextual type even if it's a hole. + if (locator->directlyAt()) + return true; + + // Delay resolution of the `nil` literal to a hole until + // the very end to give it a change to be bound to some + // other type, just like code completion expression which + // relies solely on contextual information. + if (locator->directlyAt()) + return true; + } + + return false; +} + +bool ConstraintSystem::PotentialBindings::involvesTypeVariables() const { + // This is effectively O(1) right now since bindings are re-computed + // on each step of the solver, but once bindings are computed + // incrementally it becomes more important to double-check that + // any adjacent type variables found previously are still unresolved. + return llvm::any_of(AdjacentVars, [](TypeVariableType *typeVar) { + return !typeVar->getImpl().getFixedType(/*record=*/nullptr); + }); +} + bool ConstraintSystem::PotentialBindings::isPotentiallyIncomplete() const { // Generic parameters are always potentially incomplete. if (isGenericParameter()) @@ -77,6 +123,25 @@ bool ConstraintSystem::PotentialBindings::isPotentiallyIncomplete() const { return true; } + // If there is a `bind param` constraint associated with + // current type variable, result should be aware of that + // fact. Binding set might be incomplete until + // this constraint is resolved, because we currently don't + // look-through constraints expect to `subtype` to try and + // find related bindings. + // This only affects type variable that appears one the + // right-hand side of the `bind param` constraint and + // represents result type of the closure body, because + // left-hand side gets types from overload choices. + if (llvm::any_of( + EquivalentTo, + [&](const std::pair &equivalence) { + auto *constraint = equivalence.second; + return constraint->getKind() == ConstraintKind::BindParam && + constraint->getSecondType()->isEqual(TypeVar); + })) + return true; + return false; } @@ -525,19 +590,6 @@ void ConstraintSystem::PotentialBindings::finalize( // very last moment possible, just like generic parameters. auto *locator = TypeVar->getImpl().getLocator(); - // Delay resolution of the code completion expression until - // the very end to give it a chance to be bound to some - // contextual type even if it's a hole. - if (locator->directlyAt()) - FullyBound = true; - - // Delay resolution of the `nil` literal to a hole until - // the very end to give it a change to be bound to some - // other type, just like code completion expression which - // relies solely on contextual information. - if (locator->directlyAt()) - FullyBound = true; - // If this type variable is associated with a code completion token // and it failed to infer any bindings let's adjust hole's locator // to point to a code completion token to avoid attempting to "fix" @@ -679,30 +731,30 @@ void ConstraintSystem::PotentialBindings::addPotentialBinding( // If this is a non-defaulted supertype binding, // check whether we can combine it with another // supertype binding by computing the 'join' of the types. - if (binding.Kind == AllowedBindingKind::Supertypes && - !binding.BindingType->hasUnresolvedType() && - !binding.BindingType->hasTypeVariable() && - !binding.BindingType->hasHole() && - !binding.BindingType->hasUnboundGenericType() && - !binding.hasDefaultedLiteralProtocol() && - !binding.isDefaultableBinding() && allowJoinMeet) { - if (lastSupertypeIndex) { - auto &lastBinding = Bindings[*lastSupertypeIndex]; - auto lastType = lastBinding.BindingType->getWithoutSpecifierType(); - auto bindingType = binding.BindingType->getWithoutSpecifierType(); - - auto join = Type::join(lastType, bindingType); - if (join && !(*join)->isAny() && - (!(*join)->getOptionalObjectType() - || !(*join)->getOptionalObjectType()->isAny())) { - // Replace the last supertype binding with the join. We're done. - lastBinding.BindingType = *join; - return; + if (binding.isViableForJoin() && allowJoinMeet) { + bool joined = false; + + auto isAcceptableJoin = [](Type type) { + return !type->isAny() && (!type->getOptionalObjectType() || + !type->getOptionalObjectType()->isAny()); + }; + + for (auto &existingBinding : Bindings) { + if (!existingBinding.isViableForJoin()) + continue; + + auto join = Type::join(existingBinding.BindingType, binding.BindingType); + + if (join && isAcceptableJoin(*join)) { + existingBinding.BindingType = *join; + joined = true; } } - // Record this as the most recent supertype index. - lastSupertypeIndex = Bindings.size(); + // If new binding has been joined with at least one of existing + // bindings, there is no reason to include it into the set. + if (joined) + return; } if (auto *literalProtocol = binding.getDefaultedLiteralProtocol()) @@ -746,7 +798,7 @@ bool ConstraintSystem::PotentialBindings::isViable( bool ConstraintSystem::PotentialBindings::favoredOverDisjunction( Constraint *disjunction) const { - if (isHole() || FullyBound) + if (isHole() || isDelayed()) return false; // If this bindings are for a closure and there are no holes, @@ -772,7 +824,7 @@ bool ConstraintSystem::PotentialBindings::favoredOverDisjunction( return boundType->lookThroughAllOptionalTypes()->is(); } - return !InvolvesTypeVariables; + return !involvesTypeVariables(); } ConstraintSystem::PotentialBindings @@ -841,23 +893,21 @@ ConstraintSystem::getPotentialBindingForRelationalConstraint( // of bindings for them until closure's body is opened. if (auto *typeVar = first->getAs()) { if (typeVar->getImpl().isClosureType()) { - result.InvolvesTypeVariables = true; - result.FullyBound = true; + result.DelayedBy.push_back(constraint); return None; } } - // Can't infer anything. - if (result.InvolvesTypeVariables) - return None; - // Check whether both this type and another type variable are // inferable. SmallPtrSet typeVars; findInferableTypeVars(first, typeVars); findInferableTypeVars(second, typeVars); - if (typeVars.size() > 1 && typeVars.count(typeVar)) - result.InvolvesTypeVariables = true; + + if (typeVars.erase(typeVar)) { + result.AdjacentVars.insert(typeVars.begin(), typeVars.end()); + } + return None; } @@ -886,11 +936,27 @@ ConstraintSystem::getPotentialBindingForRelationalConstraint( // If the type we'd be binding to is a dependent member, don't try to // resolve this type variable yet. if (type->is()) { - if (!ConstraintSystem::typeVarOccursInType(typeVar, type, - &result.InvolvesTypeVariables)) { - result.FullyBound = true; + SmallVector referencedVars; + type->getTypeVariables(referencedVars); + + bool containsSelf = false; + for (auto *var : referencedVars) { + // Add all type variables encountered in the type except + // to the current type variable. + if (var != typeVar) { + result.AdjacentVars.insert(var); + continue; + } + + containsSelf = true; } + // If inferred type doesn't contain the current type variable, + // let's mark bindings as delayed until dependent member type + // is resolved. + if (!containsSelf) + result.DelayedBy.push_back(constraint); + return None; } @@ -910,15 +976,18 @@ ConstraintSystem::getPotentialBindingForRelationalConstraint( // FIXME: this has a super-inefficient extraneous simplifyType() in it. if (auto boundType = checkTypeOfBinding(typeVar, type)) { type = *boundType; - if (type->hasTypeVariable()) - result.InvolvesTypeVariables = true; + if (type->hasTypeVariable()) { + SmallVector referencedVars; + type->getTypeVariables(referencedVars); + result.AdjacentVars.insert(referencedVars.begin(), referencedVars.end()); + } } else { auto *bindingTypeVar = type->getRValueType()->getAs(); if (!bindingTypeVar) return None; - result.InvolvesTypeVariables = true; + result.AdjacentVars.insert(bindingTypeVar); // If current type variable is associated with a code completion token // it's possible that it doesn't have enough contextual information @@ -1026,7 +1095,7 @@ bool ConstraintSystem::PotentialBindings::infer( // delaying bindings for as long as possible. if (isExpr(anchor) && !type->is()) { addPotentialBinding(binding->withType(LValueType::get(type))); - FullyBound = true; + DelayedBy.push_back(constraint); } // If this is a type variable representing closure result, @@ -1046,9 +1115,6 @@ bool ConstraintSystem::PotentialBindings::infer( break; } case ConstraintKind::KeyPathApplication: { - if (FullyBound) - return false; - // If this variable is in the application projected result type, mark the // result as `FullyBound` to ensure we delay binding until we've bound // other type variables in the KeyPathApplication constraint. This ensures @@ -1057,8 +1123,9 @@ bool ConstraintSystem::PotentialBindings::infer( SmallPtrSet typeVars; findInferableTypeVars(cs.simplifyType(constraint->getThirdType()), typeVars); - if (typeVars.count(TypeVar)) - FullyBound = true; + if (typeVars.count(TypeVar)) { + DelayedBy.push_back(constraint); + } break; } @@ -1100,10 +1167,6 @@ bool ConstraintSystem::PotentialBindings::infer( break; case ConstraintKind::Disjunction: - // FIXME: Recurse into these constraints to see whether this - // type variable is fully bound by any of them. - InvolvesTypeVariables = true; - // If there is additional context available via disjunction // associated with closure literal (e.g. coercion to some other // type) let's delay resolving the closure until the disjunction @@ -1111,6 +1174,7 @@ bool ConstraintSystem::PotentialBindings::infer( if (TypeVar->getImpl().isClosureType()) return true; + DelayedBy.push_back(constraint); break; case ConstraintKind::ConformsTo: @@ -1132,50 +1196,46 @@ bool ConstraintSystem::PotentialBindings::infer( case ConstraintKind::ApplicableFunction: case ConstraintKind::DynamicCallableApplicableFunction: case ConstraintKind::BindOverload: { - if (FullyBound && InvolvesTypeVariables) - return false; - - // If this variable is in the left-hand side, it is fully bound. - SmallPtrSet typeVars; - findInferableTypeVars(cs.simplifyType(constraint->getFirstType()), - typeVars); - if (typeVars.count(TypeVar)) - FullyBound = true; - - if (InvolvesTypeVariables) - return false; - - // If this and another type variable occur, this result involves - // type variables. - findInferableTypeVars(cs.simplifyType(constraint->getSecondType()), - typeVars); - if (typeVars.size() > 1 && typeVars.count(TypeVar)) - InvolvesTypeVariables = true; + // It's possible that type of member couldn't be determined, + // and if so it would be beneficial to bind member to a hole + // early to propagate that information down to arguments, + // result type of a call that references such a member. + if (cs.shouldAttemptFixes() && TypeVar->getImpl().canBindToHole()) { + if (ConstraintSystem::typeVarOccursInType( + TypeVar, cs.simplifyType(constraint->getSecondType()))) + break; + } + DelayedBy.push_back(constraint); break; } case ConstraintKind::ValueMember: case ConstraintKind::UnresolvedValueMember: - case ConstraintKind::ValueWitness: - // If our type variable shows up in the base type, there's - // nothing to do. - // FIXME: Can we avoid simplification here? - if (ConstraintSystem::typeVarOccursInType( - TypeVar, cs.simplifyType(constraint->getFirstType()), - &InvolvesTypeVariables)) { - return false; + case ConstraintKind::ValueWitness: { + // If current type variable represents a member type of some reference, + // it would be bound once member is resolved either to a actual member + // type or to a hole if member couldn't be found. + auto memberTy = constraint->getSecondType()->castTo(); + + if (memberTy->getImpl().hasRepresentativeOrFixed()) { + if (auto type = memberTy->getImpl().getFixedType(/*record=*/nullptr)) { + // It's possible that member has been bound to some other type variable + // instead of merged with it because it's wrapped in an l-value type. + if (type->getWithoutSpecifierType()->isEqual(TypeVar)) { + DelayedBy.push_back(constraint); + break; + } + } else { + memberTy = memberTy->getImpl().getRepresentative(/*record=*/nullptr); + } } - // If the type variable is in the list of member type - // variables, it is fully bound. - // FIXME: Can we avoid simplification here? - if (ConstraintSystem::typeVarOccursInType( - TypeVar, cs.simplifyType(constraint->getSecondType()), - &InvolvesTypeVariables)) { - FullyBound = true; - } + if (memberTy == TypeVar) + DelayedBy.push_back(constraint); + break; + } case ConstraintKind::OneWayEqual: case ConstraintKind::OneWayBindParam: { @@ -1424,7 +1484,7 @@ TypeVariableBinding::fixForHole(ConstraintSystem &cs) const { // If the whole body is being ignored due to a pre-check failure, // let's not record a fix about result type since there is // just not enough context to infer it without a body. - if (cs.hasFixFor(cs.getConstraintLocator(closure->getBody()), + if (cs.hasFixFor(cs.getConstraintLocator(closure), FixKind::IgnoreInvalidResultBuilderBody)) return None; diff --git a/lib/Sema/CSDiagnostics.cpp b/lib/Sema/CSDiagnostics.cpp index 0f711343d4982..448a10c31f274 100644 --- a/lib/Sema/CSDiagnostics.cpp +++ b/lib/Sema/CSDiagnostics.cpp @@ -964,20 +964,29 @@ bool NoEscapeFuncToTypeConversionFailure::diagnoseParameterUse() const { return true; } -ASTNode MissingForcedDowncastFailure::getAnchor() const { +ASTNode InvalidCoercionFailure::getAnchor() const { auto anchor = FailureDiagnostic::getAnchor(); if (auto *assignExpr = getAsExpr(anchor)) return assignExpr->getSrc(); return anchor; } -bool MissingForcedDowncastFailure::diagnoseAsError() { +bool InvalidCoercionFailure::diagnoseAsError() { auto fromType = getFromType(); auto toType = getToType(); - emitDiagnostic(diag::missing_forced_downcast, fromType, toType) - .highlight(getSourceRange()) - .fixItReplace(getLoc(), "as!"); + emitDiagnostic(diag::cannot_coerce_to_type, fromType, toType); + + if (UseConditionalCast) { + emitDiagnostic(diag::missing_optional_downcast) + .highlight(getSourceRange()) + .fixItReplace(getLoc(), "as?"); + } else { + emitDiagnostic(diag::missing_forced_downcast) + .highlight(getSourceRange()) + .fixItReplace(getLoc(), "as!"); + } + return true; } @@ -1051,10 +1060,19 @@ bool MissingExplicitConversionFailure::diagnoseAsError() { if (needsParensOutside) insertAfter += ")"; - auto diagID = - useAs ? diag::missing_explicit_conversion : diag::missing_forced_downcast; - auto diag = emitDiagnostic(diagID, fromType, toType); + auto diagnose = [&]() { + if (useAs) { + return emitDiagnostic(diag::missing_explicit_conversion, fromType, + toType); + } else { + // Emit error diagnostic. + emitDiagnostic(diag::cannot_coerce_to_type, fromType, toType); + // Emit and return note suggesting as! where the fix-it will be placed. + return emitDiagnostic(diag::missing_forced_downcast); + } + }; + auto diag = diagnose(); if (!insertBefore.empty()) { diag.fixItInsert(getSourceRange().Start, insertBefore); } @@ -5744,6 +5762,12 @@ bool ThrowingFunctionConversionFailure::diagnoseAsError() { return true; } +bool AsyncFunctionConversionFailure::diagnoseAsError() { + emitDiagnostic(diag::async_functiontype_mismatch, getFromType(), + getToType()); + return true; +} + bool InOutConversionFailure::diagnoseAsError() { auto *locator = getLocator(); auto path = locator->getPath(); @@ -7052,3 +7076,74 @@ bool ReferenceToInvalidDeclaration::diagnoseAsError() { emitDiagnosticAt(decl, diag::decl_declared_here, decl->getName()); return true; } + +bool InvalidReturnInResultBuilderBody::diagnoseAsError() { + auto *closure = castToExpr(getAnchor()); + + auto returnStmts = TypeChecker::findReturnStatements(closure); + assert(!returnStmts.empty()); + + auto loc = returnStmts.front()->getReturnLoc(); + emitDiagnosticAt(loc, diag::result_builder_disabled_by_return, BuilderType); + + // Note that one can remove all of the return statements. + { + auto diag = emitDiagnosticAt(loc, diag::result_builder_remove_returns); + for (auto returnStmt : returnStmts) + diag.fixItRemove(returnStmt->getReturnLoc()); + } + + return true; +} + +bool MemberMissingExplicitBaseTypeFailure::diagnoseAsError() { + auto UME = castToExpr(getAnchor()); + auto memberName = UME->getName().getBaseIdentifier().str(); + auto &DE = getASTContext().Diags; + auto &solution = getSolution(); + + auto selected = solution.getOverloadChoice(getLocator()); + auto baseType = + resolveType(selected.choice.getBaseType()->getMetatypeInstanceType()); + + SmallVector optionals; + auto baseTyUnwrapped = baseType->lookThroughAllOptionalTypes(optionals); + + if (!optionals.empty()) { + auto baseTyName = baseType->getCanonicalType().getString(); + auto baseTyUnwrappedName = baseTyUnwrapped->getString(); + auto loc = UME->getLoc(); + auto startLoc = UME->getStartLoc(); + + DE.diagnoseWithNotes( + DE.diagnose(loc, diag::optional_ambiguous_case_ref, baseTyName, + baseTyUnwrappedName, memberName), + [&]() { + DE.diagnose(UME->getDotLoc(), diag::optional_fixit_ambiguous_case_ref) + .fixItInsert(startLoc, "Optional"); + DE.diagnose(UME->getDotLoc(), + diag::type_fixit_optional_ambiguous_case_ref, + baseTyUnwrappedName, memberName) + .fixItInsert(startLoc, baseTyUnwrappedName); + }); + } else { + auto baseTypeName = baseType->getCanonicalType().getString(); + auto baseOptionalTypeName = + OptionalType::get(baseType)->getCanonicalType().getString(); + + DE.diagnoseWithNotes( + DE.diagnose(UME->getLoc(), diag::optional_ambiguous_case_ref, + baseTypeName, baseOptionalTypeName, memberName), + [&]() { + DE.diagnose(UME->getDotLoc(), + diag::type_fixit_optional_ambiguous_case_ref, + baseOptionalTypeName, memberName) + .fixItInsert(UME->getDotLoc(), baseOptionalTypeName); + DE.diagnose(UME->getDotLoc(), + diag::type_fixit_optional_ambiguous_case_ref, + baseTypeName, memberName) + .fixItInsert(UME->getDotLoc(), baseTypeName); + }); + } + return true; +} diff --git a/lib/Sema/CSDiagnostics.h b/lib/Sema/CSDiagnostics.h index 07c314c7048b3..de707a0b5369d 100644 --- a/lib/Sema/CSDiagnostics.h +++ b/lib/Sema/CSDiagnostics.h @@ -809,6 +809,27 @@ class ThrowingFunctionConversionFailure final : public ContextualFailure { bool diagnoseAsError() override; }; +/// Diagnose failures related to conversion between 'async' function type +/// and a synchronous one e.g. +/// +/// ```swift +/// func foo(_ t: T) async -> Void {} +/// let _: (Int) -> Void = foo // `foo` can't be implictly converted to +/// // synchronous function type `(Int) -> Void` +/// ``` +class AsyncFunctionConversionFailure final : public ContextualFailure { +public: + AsyncFunctionConversionFailure(const Solution &solution, Type fromType, + Type toType, ConstraintLocator *locator) + : ContextualFailure(solution, fromType, toType, locator) { + auto fnType1 = fromType->castTo(); + auto fnType2 = toType->castTo(); + assert(fnType1->isAsync() != fnType2->isAsync()); + } + + bool diagnoseAsError() override; +}; + /// Diagnose failures related attempt to implicitly convert types which /// do not support such implicit converstion. /// "as" or "as!" has to be specified explicitly in cases like that. @@ -1939,12 +1960,15 @@ class ArgumentMismatchFailure : public ContextualFailure { bool diagnoseMisplacedMissingArgument() const; }; -/// Replace a coercion ('as') with a forced checked cast ('as!'). -class MissingForcedDowncastFailure final : public ContextualFailure { +/// Replace a coercion ('as') with a runtime checked cast ('as!' or 'as?'). +class InvalidCoercionFailure final : public ContextualFailure { + bool UseConditionalCast; + public: - MissingForcedDowncastFailure(const Solution &solution, Type fromType, - Type toType, ConstraintLocator *locator) - : ContextualFailure(solution, fromType, toType, locator) {} + InvalidCoercionFailure(const Solution &solution, Type fromType, Type toType, + bool useConditionalCast, ConstraintLocator *locator) + : ContextualFailure(solution, fromType, toType, locator), + UseConditionalCast(useConditionalCast) {} ASTNode getAnchor() const override; @@ -2308,6 +2332,55 @@ class ReferenceToInvalidDeclaration final : public FailureDiagnostic { bool diagnoseAsError() override; }; +/// Diagnose use of `return` statements in a body of a result builder. +/// +/// \code +/// struct S : Builder { +/// var foo: some Builder { +/// return EmptyBuilder() +/// } +/// } +/// \endcode +class InvalidReturnInResultBuilderBody final : public FailureDiagnostic { + Type BuilderType; + +public: + InvalidReturnInResultBuilderBody(const Solution &solution, Type builderTy, + ConstraintLocator *locator) + : FailureDiagnostic(solution, locator), BuilderType(builderTy) {} + + bool diagnoseAsError() override; +}; + +/// Diagnose if the base type is optional, we're referring to a nominal +/// type member via the dot syntax and the member name matches +/// Optional.{member_name} or an unresolved `.none` inferred as a static +/// non-optional member base but could be an Optional.none. So we enforce +/// explicit type annotation to avoid ambiguity. +/// +/// \code +/// enum Enum { +/// case bar +/// static var none: Enum { .bar } +/// } +/// let _: Enum? = .none // Base inferred as Optional.none, suggest +/// // explicit type. +/// let _: Enum? = .none // Base inferred as static member Enum.none, +/// // emit warning suggesting explicit type. +/// let _: Enum = .none // Ok +/// \endcode +class MemberMissingExplicitBaseTypeFailure final : public FailureDiagnostic { + DeclNameRef Member; + +public: + MemberMissingExplicitBaseTypeFailure(const Solution &solution, + DeclNameRef member, + ConstraintLocator *locator) + : FailureDiagnostic(solution, locator), Member(member) {} + + bool diagnoseAsError() override; +}; + } // end namespace constraints } // end namespace swift diff --git a/lib/Sema/CSFix.cpp b/lib/Sema/CSFix.cpp index 9548272e9d815..a5eaa0cb4ea73 100644 --- a/lib/Sema/CSFix.cpp +++ b/lib/Sema/CSFix.cpp @@ -131,13 +131,14 @@ TreatRValueAsLValue *TreatRValueAsLValue::create(ConstraintSystem &cs, bool CoerceToCheckedCast::diagnose(const Solution &solution, bool asNote) const { - MissingForcedDowncastFailure failure(solution, getFromType(), getToType(), - getLocator()); + InvalidCoercionFailure failure(solution, getFromType(), getToType(), + UseConditionalCast, getLocator()); return failure.diagnose(asNote); } CoerceToCheckedCast *CoerceToCheckedCast::attempt(ConstraintSystem &cs, Type fromType, Type toType, + bool useConditionalCast, ConstraintLocator *locator) { // If any of the types has a type variable, don't add the fix. if (fromType->hasTypeVariable() || toType->hasTypeVariable()) @@ -159,7 +160,7 @@ CoerceToCheckedCast *CoerceToCheckedCast::attempt(ConstraintSystem &cs, return nullptr; return new (cs.getAllocator()) - CoerceToCheckedCast(cs, fromType, toType, locator); + CoerceToCheckedCast(cs, fromType, toType, useConditionalCast, locator); } bool TreatArrayLiteralAsDictionary::diagnose(const Solution &solution, @@ -1139,6 +1140,21 @@ DropThrowsAttribute *DropThrowsAttribute::create(ConstraintSystem &cs, DropThrowsAttribute(cs, fromType, toType, locator); } +bool DropAsyncAttribute::diagnose(const Solution &solution, + bool asNote) const { + AsyncFunctionConversionFailure failure(solution, getFromType(), + getToType(), getLocator()); + return failure.diagnose(asNote); +} + +DropAsyncAttribute *DropAsyncAttribute::create(ConstraintSystem &cs, + FunctionType *fromType, + FunctionType *toType, + ConstraintLocator *locator) { + return new (cs.getAllocator()) + DropAsyncAttribute(cs, fromType, toType, locator); +} + bool IgnoreContextualType::diagnose(const Solution &solution, bool asNote) const { ContextualFailure failure(solution, getFromType(), getToType(), getLocator()); @@ -1579,7 +1595,7 @@ bool IgnoreInvalidResultBuilderBody::diagnose(const Solution &solution, return true; // Already diagnosed by `matchResultBuilder`. } - auto *S = getAnchor().get(); + auto *S = castToExpr(getAnchor())->getBody(); class PreCheckWalker : public ASTWalker { DeclContext *DC; @@ -1644,3 +1660,113 @@ AllowRefToInvalidDecl::create(ConstraintSystem &cs, ConstraintLocator *locator) { return new (cs.getAllocator()) AllowRefToInvalidDecl(cs, locator); } + +bool IgnoreResultBuilderWithReturnStmts::diagnose(const Solution &solution, + bool asNote) const { + InvalidReturnInResultBuilderBody failure(solution, BuilderType, getLocator()); + return failure.diagnose(asNote); +} + +IgnoreResultBuilderWithReturnStmts * +IgnoreResultBuilderWithReturnStmts::create(ConstraintSystem &cs, Type builderTy, + ConstraintLocator *locator) { + return new (cs.getAllocator()) + IgnoreResultBuilderWithReturnStmts(cs, builderTy, locator); +} + +bool SpecifyBaseTypeForOptionalUnresolvedMember::diagnose( + const Solution &solution, bool asNote) const { + MemberMissingExplicitBaseTypeFailure failure(solution, MemberName, + getLocator()); + return failure.diagnose(asNote); +} + +SpecifyBaseTypeForOptionalUnresolvedMember * +SpecifyBaseTypeForOptionalUnresolvedMember::attempt( + ConstraintSystem &cs, ConstraintKind kind, Type baseTy, + DeclNameRef memberName, FunctionRefKind functionRefKind, + MemberLookupResult result, ConstraintLocator *locator) { + + if (kind != ConstraintKind::UnresolvedValueMember) + return nullptr; + + // None or only one viable candidate, there is no ambiguity. + if (result.ViableCandidates.size() <= 1) + return nullptr; + + // Only diagnose those situations for static members. + if (!baseTy->is()) + return nullptr; + + // Don't diagnose for function members e.g. Foo? = .none(0). + if (functionRefKind != FunctionRefKind::Unapplied) + return nullptr; + + Type underlyingBaseType = baseTy->getMetatypeInstanceType(); + if (!underlyingBaseType->getNominalOrBoundGenericNominal()) + return nullptr; + + if (!underlyingBaseType->getOptionalObjectType()) + return nullptr; + + auto unwrappedType = underlyingBaseType->lookThroughAllOptionalTypes(); + bool allOptionalBaseCandidates = true; + auto filterViableCandidates = + [&](SmallVector &candidates, + SmallVector &viableCandidates, + bool &allOptionalBase) { + for (OverloadChoice choice : candidates) { + if (!choice.isDecl()) + continue; + + auto memberDecl = choice.getDecl(); + if (isa(memberDecl)) + continue; + if (memberDecl->isInstanceMember()) + continue; + + allOptionalBase &= bool(choice.getBaseType() + ->getMetatypeInstanceType() + ->getOptionalObjectType()); + + if (auto EED = dyn_cast(memberDecl)) { + if (!EED->hasAssociatedValues()) + viableCandidates.push_back(choice); + } else if (auto VD = dyn_cast(memberDecl)) { + if (unwrappedType->hasTypeVariable() || + VD->getInterfaceType()->isEqual(unwrappedType)) + viableCandidates.push_back(choice); + } + } + }; + + SmallVector viableCandidates; + filterViableCandidates(result.ViableCandidates, viableCandidates, + allOptionalBaseCandidates); + + // Also none or only one viable candidate after filtering candidates, there is + // no ambiguity. + if (viableCandidates.size() <= 1) + return nullptr; + + // Right now, name lookup only unwraps a single layer of optionality, which + // for cases where base type is a multi-optional type e.g. Foo?? it only + // finds optional base candidates. To produce the correct warning we perform + // an extra lookup on unwrapped type. + if (!allOptionalBaseCandidates) + return new (cs.getAllocator()) + SpecifyBaseTypeForOptionalUnresolvedMember(cs, memberName, locator); + + MemberLookupResult unwrappedResult = + cs.performMemberLookup(kind, memberName, MetatypeType::get(unwrappedType), + functionRefKind, locator, + /*includeInaccessibleMembers*/ false); + SmallVector unwrappedViableCandidates; + filterViableCandidates(unwrappedResult.ViableCandidates, + unwrappedViableCandidates, allOptionalBaseCandidates); + if (unwrappedViableCandidates.empty()) + return nullptr; + + return new (cs.getAllocator()) + SpecifyBaseTypeForOptionalUnresolvedMember(cs, memberName, locator); +} diff --git a/lib/Sema/CSRanking.cpp b/lib/Sema/CSRanking.cpp index 7d637cc8dae46..ffec49862d01a 100644 --- a/lib/Sema/CSRanking.cpp +++ b/lib/Sema/CSRanking.cpp @@ -52,6 +52,9 @@ static StringRef getScoreKindName(ScoreKind kind) { case SK_DisfavoredOverload: return "disfavored overload"; + case SK_UnresolvedMemberViaOptional: + return "unwrapping optional at unresolved member base"; + case SK_ForceUnchecked: return "force of an implicitly unwrapped optional"; diff --git a/lib/Sema/CSSimplify.cpp b/lib/Sema/CSSimplify.cpp index d072fab500299..975fb1bcc04f3 100644 --- a/lib/Sema/CSSimplify.cpp +++ b/lib/Sema/CSSimplify.cpp @@ -1873,9 +1873,19 @@ ConstraintSystem::matchFunctionTypes(FunctionType *func1, FunctionType *func2, } } - // 'async' and non-'async' function types are not compatible. - if (func1->isAsync() != func2->isAsync()) - return getTypeMatchFailure(locator); + // A synchronous function can be a subtype of an 'async' function. + if (func1->isAsync() != func2->isAsync()) { + // Cannot drop 'async'. + if (func1->isAsync() || kind < ConstraintKind::Subtype) { + if (!shouldAttemptFixes()) + return getTypeMatchFailure(locator); + + auto *fix = DropAsyncAttribute::create(*this, func1, func2, + getConstraintLocator(locator)); + if (recordFix(fix)) + return getTypeMatchFailure(locator); + } + } // A non-@noescape function type can be a subtype of a @noescape function // type. @@ -3551,9 +3561,26 @@ bool ConstraintSystem::repairFailures( getConstraintLocator(coercion->getSubExpr()))) return true; - // Repair a coercion ('as') with a forced checked cast ('as!'). - if (auto *coerceToCheckCastFix = CoerceToCheckedCast::attempt( - *this, lhs, rhs, getConstraintLocator(locator))) { + // If the result type of the coercion has an value to optional conversion + // we can instead suggest the conditional downcast as it is safer in + // situations like conditional binding. + auto useConditionalCast = llvm::any_of( + ConstraintRestrictions, + [&](std::tuple restriction) { + ConversionRestrictionKind restrictionKind; + Type type1, type2; + std::tie(type1, type2, restrictionKind) = restriction; + + if (restrictionKind != ConversionRestrictionKind::ValueToOptional) + return false; + + return rhs->isEqual(type1); + }); + + // Repair a coercion ('as') with a runtime checked cast ('as!' or 'as?'). + if (auto *coerceToCheckCastFix = + CoerceToCheckedCast::attempt(*this, lhs, rhs, useConditionalCast, + getConstraintLocator(locator))) { conversionsOrFixes.push_back(coerceToCheckCastFix); return true; } @@ -6750,13 +6777,12 @@ performMemberLookup(ConstraintKind constraintKind, DeclNameRef memberName, // through optional types. // // FIXME: Unify with the above code path. - if (result.ViableCandidates.empty() && - baseObjTy->is() && + if (baseObjTy->is() && constraintKind == ConstraintKind::UnresolvedValueMember) { if (auto objectType = instanceTy->getOptionalObjectType()) { // If we don't have a wrapped type yet, we can't look through the optional // type. - if (objectType->getAs()) { + if (objectType->getAs() && result.ViableCandidates.empty()) { MemberLookupResult result; result.OverallResult = MemberLookupResult::Unsolved; return result; @@ -7264,7 +7290,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyMemberConstraint( } if (!result.UnviableCandidates.empty()) { - // Generate constraints for unvailable choices if they have a fix, + // Generate constraints for unavailable choices if they have a fix, // and disable them by default, they'd get picked up in the "salvage" mode. generateConstraints( candidates, memberTy, result.UnviableCandidates, useDC, locator, @@ -7275,6 +7301,19 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyMemberConstraint( }); } + // Attempt to record a warning where the unresolved member could be + // ambiguous with optional member. e.g. + // enum Foo { + // case none + // } + // + // let _: Foo? = .none // Although base is inferred as Optional.none + // it could be also Foo.none. + if (auto *fix = SpecifyBaseTypeForOptionalUnresolvedMember::attempt( + *this, kind, baseObjTy, member, functionRefKind, result, locator)) { + (void)recordFix(fix); + } + if (!candidates.empty()) { addOverloadSet(candidates, locator); return SolutionKind::Solved; @@ -9560,8 +9599,8 @@ ConstraintSystem::simplifyRestrictedConstraintImpl( loc->isLastElement() || loc->isForOptionalTry()) { if (restriction == ConversionRestrictionKind::Superclass) { - if (auto *fix = - CoerceToCheckedCast::attempt(*this, fromType, toType, loc)) + if (auto *fix = CoerceToCheckedCast::attempt( + *this, fromType, toType, /*useConditionalCast*/ false, loc)) return !recordFix(fix, impact); } @@ -10208,7 +10247,8 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyFixConstraint( case FixKind::AllowKeyPathWithoutComponents: case FixKind::IgnoreInvalidResultBuilderBody: case FixKind::SpecifyContextualTypeForNil: - case FixKind::AllowRefToInvalidDecl: { + case FixKind::AllowRefToInvalidDecl: + case FixKind::SpecifyBaseTypeForOptionalUnresolvedMember: { return recordFix(fix) ? SolutionKind::Error : SolutionKind::Solved; } @@ -10315,7 +10355,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyFixConstraint( if (auto *fnType1 = type1->getAs()) { // If this is a contextual mismatch between two // function types which we couldn't find a more - // speficit fix for. Let's assume that such types + // specific fix for. Let's assume that such types // are competely disjoint and adjust impact of // the fix accordingly. if (auto *fnType2 = type2->getAs()) { diff --git a/lib/Sema/CSSolver.cpp b/lib/Sema/CSSolver.cpp index 1f4169b1ae347..6b1e67a5f0340 100644 --- a/lib/Sema/CSSolver.cpp +++ b/lib/Sema/CSSolver.cpp @@ -2287,7 +2287,7 @@ void DisjunctionChoice::propagateConversionInfo(ConstraintSystem &cs) const { return; auto bindings = cs.inferBindingsFor(typeVar); - if (bindings.InvolvesTypeVariables || bindings.Bindings.size() != 1) + if (bindings.involvesTypeVariables() || bindings.Bindings.size() != 1) return; auto conversionType = bindings.Bindings[0].BindingType; diff --git a/lib/Sema/ConstraintSystem.cpp b/lib/Sema/ConstraintSystem.cpp index 972e1be8748c6..38ff002a0dc03 100644 --- a/lib/Sema/ConstraintSystem.cpp +++ b/lib/Sema/ConstraintSystem.cpp @@ -2802,6 +2802,11 @@ void ConstraintSystem::resolveOverload(ConstraintLocator *locator, choice.getDecl()->getAttrs().hasAttribute()) { increaseScore(SK_DisfavoredOverload); } + + if (choice.getKind() == OverloadChoiceKind::DeclViaUnwrappedOptional && + locator->isLastElement()) { + increaseScore(SK_UnresolvedMemberViaOptional); + } } Type ConstraintSystem::simplifyTypeImpl(Type type, @@ -4408,9 +4413,16 @@ Solution::getFunctionArgApplyInfo(ConstraintLocator *locator) const { // If we didn't resolve an overload for the callee, we should be dealing // with a call of an arbitrary function expr. auto *call = castToExpr(anchor); + rawFnType = getType(call->getFn()); + + // If callee couldn't be resolved due to expression + // issues e.g. it's a reference to an invalid member + // let's just return here. + if (simplifyType(rawFnType)->is()) + return None; + assert(!shouldHaveDirectCalleeOverload(call) && "Should we have resolved a callee for this?"); - rawFnType = getType(call->getFn()); } // Try to resolve the function type by loading lvalues and looking through diff --git a/lib/Sema/DerivedConformanceActor.cpp b/lib/Sema/DerivedConformanceActor.cpp index ad826a7734a01..38d8365c753ff 100644 --- a/lib/Sema/DerivedConformanceActor.cpp +++ b/lib/Sema/DerivedConformanceActor.cpp @@ -48,45 +48,11 @@ static Type getPartialAsyncTaskType(ASTContext &ctx) { return Type(); } -/// Look for the default actor queue type. -static Type getDefaultActorQueueType(DeclContext *dc, SourceLoc loc) { - ASTContext &ctx = dc->getASTContext(); - UnqualifiedLookupOptions options; - options |= UnqualifiedLookupFlags::TypeLookup; - auto desc = UnqualifiedLookupDescriptor( - DeclNameRef(ctx.getIdentifier("_DefaultActorQueue")), dc, loc, options); - auto lookup = - evaluateOrDefault(ctx.evaluator, UnqualifiedLookupRequest{desc}, {}); - for (const auto &result : lookup) { - if (auto typeDecl = dyn_cast(result.getValueDecl())) - return typeDecl->getDeclaredInterfaceType(); - } - - return Type(); -} - -/// Look for the initialization function for the default actor storage. -static FuncDecl *getDefaultActorQueueCreate(DeclContext *dc, SourceLoc loc) { - ASTContext &ctx = dc->getASTContext(); - auto desc = UnqualifiedLookupDescriptor( - DeclNameRef(ctx.getIdentifier("_defaultActorQueueCreate")), dc, loc, - UnqualifiedLookupOptions()); - auto lookup = - evaluateOrDefault(ctx.evaluator, UnqualifiedLookupRequest{desc}, {}); - for (const auto &result : lookup) { - // FIXME: Validate this further, because we're assuming the exact type. - if (auto func = dyn_cast(result.getValueDecl())) - return func; - } - - return nullptr; -} - /// Look for the default enqueue operation. -static FuncDecl *getDefaultActorQueueEnqueue(DeclContext *dc, SourceLoc loc) { +static FuncDecl *getDefaultActorEnqueue(DeclContext *dc, SourceLoc loc) { ASTContext &ctx = dc->getASTContext(); auto desc = UnqualifiedLookupDescriptor( - DeclNameRef(ctx.getIdentifier("_defaultActorQueueEnqueuePartialTask")), + DeclNameRef(ctx.Id__defaultActorEnqueue), dc, loc, UnqualifiedLookupOptions()); auto lookup = evaluateOrDefault(ctx.evaluator, UnqualifiedLookupRequest{desc}, {}); @@ -103,19 +69,10 @@ static std::pair deriveBodyActor_enqueuePartialTask( AbstractFunctionDecl *enqueuePartialTask, void *) { // func enqueue(partialTask: PartialAsyncTask) { - // _defaultActorQueueEnqueuePartialTask( - // actor: self, queue: &self.$__actor_storage, partialTask: partialTask) + // _defaultActorEnqueue(partialTask: partialTask, actor: self) // } ASTContext &ctx = enqueuePartialTask->getASTContext(); - - // Dig out the $__actor_storage property. auto classDecl = enqueuePartialTask->getDeclContext()->getSelfClassDecl(); - VarDecl *storageVar = nullptr; - for (auto decl : classDecl->lookupDirect(ctx.Id_actorStorage)) { - storageVar = dyn_cast(decl); - if (storageVar) - break; - } // Produce an empty brace statement on failure. auto failure = [&]() -> std::pair { @@ -124,21 +81,15 @@ deriveBodyActor_enqueuePartialTask( return { body, /*isTypeChecked=*/true }; }; - if (!storageVar) { - classDecl->diagnose( - diag::concurrency_lib_missing, ctx.Id_actorStorage.str()); - return failure(); - } - // Call into the runtime to enqueue the task. - auto fn = getDefaultActorQueueEnqueue(classDecl, classDecl->getLoc()); + auto fn = getDefaultActorEnqueue(classDecl, classDecl->getLoc()); if (!fn) { classDecl->diagnose( - diag::concurrency_lib_missing, "_defaultActorQueueEnqueuePartialTask"); + diag::concurrency_lib_missing, ctx.Id__defaultActorEnqueue.str()); return failure(); } - // Reference to _defaultActorQueueEnqueuePartialTask. + // Reference to _defaultActorEnqueue. auto fnRef = new (ctx) DeclRefExpr(fn, DeclNameLoc(), /*Implicit=*/true); fnRef->setType(fn->getInterfaceType()); @@ -152,24 +103,6 @@ deriveBodyActor_enqueuePartialTask( selfArg = ErasureExpr::create(ctx, selfArg, ctx.getAnyObjectType(), { }); selfArg->setImplicit(); - // Address of the actor storage. - auto module = classDecl->getModuleContext(); - Expr *selfBase = new (ctx) DeclRefExpr( - selfDecl, DeclNameLoc(), /*Implicit=*/true, AccessSemantics::Ordinary, - selfType); - SubstitutionMap storageVarSubs = classDecl->getDeclaredTypeInContext() - ->getMemberSubstitutionMap(module, storageVar); - ConcreteDeclRef storageVarDeclRef(storageVar, storageVarSubs); - Type storageVarType = classDecl->mapTypeIntoContext( - storageVar->getValueInterfaceType()); - Type storageVarRefType = LValueType::get(storageVarType); - Expr *storageVarRefExpr = new (ctx) MemberRefExpr( - selfBase, SourceLoc(), storageVarDeclRef, DeclNameLoc(), - /*Implicit=*/true); - storageVarRefExpr->setType(storageVarRefType); - storageVarRefExpr = new (ctx) InOutExpr( - SourceLoc(), storageVarRefExpr, storageVarType, /*isImplicit=*/true); - // The partial asynchronous task. auto partialTaskParam = enqueuePartialTask->getParameters()->get(0); Expr *partialTask = new (ctx) DeclRefExpr( @@ -180,9 +113,8 @@ deriveBodyActor_enqueuePartialTask( // Form the call itself. auto call = CallExpr::createImplicit( - ctx, fnRef, { selfArg, storageVarRefExpr, partialTask }, - { ctx.getIdentifier("actor"), ctx.getIdentifier("queue"), - ctx.Id_partialTask }); + ctx, fnRef, { partialTask, selfArg }, + { ctx.Id_partialTask, ctx.getIdentifier("actor") }); call->setType(fn->getResultInterfaceType()); call->setThrows(false); @@ -204,21 +136,6 @@ static ValueDecl *deriveActor_enqueuePartialTask(DerivedConformance &derived) { } auto parentDC = derived.getConformanceContext(); - Type defaultActorQueueType = getDefaultActorQueueType( - parentDC, derived.ConformanceDecl->getLoc()); - if (!defaultActorQueueType) { - derived.Nominal->diagnose( - diag::concurrency_lib_missing, "_DefaultActorQueue"); - return nullptr; - } - - auto actorStorageCreateFn = getDefaultActorQueueCreate( - parentDC, derived.ConformanceDecl->getLoc()); - if (!actorStorageCreateFn) { - derived.Nominal->diagnose( - diag::concurrency_lib_missing, "_defaultActorQueueCreate"); - return nullptr; - } // Partial task parameter to enqueue(partialTask:). auto partialTaskParamDecl = new (ctx) ParamDecl( @@ -240,44 +157,8 @@ static ValueDecl *deriveActor_enqueuePartialTask(DerivedConformance &derived) { func->getAttrs().add(new (ctx) ActorIndependentAttr( ActorIndependentKind::Unsafe, /*IsImplicit=*/true)); - // Actor storage property and its initialization. - auto actorStorage = new (ctx) VarDecl( - /*isStatic=*/false, VarDecl::Introducer::Var, SourceLoc(), - ctx.Id_actorStorage, parentDC); - actorStorage->setInterfaceType(defaultActorQueueType); - actorStorage->setImplicit(); - actorStorage->setAccess(AccessLevel::Private); - actorStorage->getAttrs().add(new (ctx) FinalAttr(/*Implicit=*/true)); - - // Pattern binding to initialize the actor storage. - Pattern *actorStoragePattern = NamedPattern::createImplicit( - ctx, actorStorage); - actorStoragePattern = TypedPattern::createImplicit( - ctx, actorStoragePattern, defaultActorQueueType); - - // Initialization expression. - // FIXME: We want the equivalent of type(of: self) here, but we cannot refer - // to self, so for now we use the static type instead. - Type nominalType = derived.Nominal->getDeclaredTypeInContext(); - Expr *metatypeArg = TypeExpr::createImplicit(nominalType, ctx); - Type anyObjectMetatype = ExistentialMetatypeType::get(ctx.getAnyObjectType()); - metatypeArg = ErasureExpr::create(ctx, metatypeArg, anyObjectMetatype, { }); - Expr *actorStorageCreateFnRef = new (ctx) DeclRefExpr( - actorStorageCreateFn, DeclNameLoc(), /*Implicit=*/true); - actorStorageCreateFnRef->setType(actorStorageCreateFn->getInterfaceType()); - - auto actorStorageInit = CallExpr::createImplicit( - ctx, actorStorageCreateFnRef, { metatypeArg}, { Identifier() }); - actorStorageInit->setType(actorStorageCreateFn->getResultInterfaceType()); - actorStorageInit->setThrows(false); - - auto actorStoragePatternBinding = PatternBindingDecl::createImplicit( - ctx, StaticSpellingKind::None, actorStoragePattern, actorStorageInit, - parentDC); - actorStoragePatternBinding->setInitializerChecked(0); - derived.addMembersToConformanceContext( - { func, actorStorage, actorStoragePatternBinding }); + { func }); return func; } diff --git a/lib/Sema/DerivedConformanceDifferentiable.cpp b/lib/Sema/DerivedConformanceDifferentiable.cpp index c56f3e58753b3..ae5ea062fc7d9 100644 --- a/lib/Sema/DerivedConformanceDifferentiable.cpp +++ b/lib/Sema/DerivedConformanceDifferentiable.cpp @@ -17,6 +17,8 @@ #include "CodeSynthesis.h" #include "TypeChecker.h" +#include "TypeCheckType.h" +#include "llvm/ADT/SmallPtrSet.h" #include "swift/AST/AutoDiff.h" #include "swift/AST/Decl.h" #include "swift/AST/Expr.h" @@ -627,6 +629,49 @@ deriveDifferentiable_zeroTangentVectorInitializer(DerivedConformance &derived) { return propDecl; } +/// Pushes all the protocols inherited, directly or transitively, by `decl` to `protos`. +/// +/// Precondition: `decl` is a nominal type decl or an extension decl. +void getInheritedProtocols(Decl *decl, SmallPtrSetImpl &protos) { + ArrayRef inheritedTypeLocs; + if (auto *nominalDecl = dyn_cast(decl)) + inheritedTypeLocs = nominalDecl->getInherited(); + else if (auto *extDecl = dyn_cast(decl)) + inheritedTypeLocs = extDecl->getInherited(); + else + llvm_unreachable("conformance is not a nominal or an extension"); + + std::function handleInheritedType; + + auto handleProto = [&](ProtocolType *proto) -> void { + proto->getDecl()->walkInheritedProtocols([&](ProtocolDecl *p) -> TypeWalker::Action { + protos.insert(p); + return TypeWalker::Action::Continue; + }); + }; + + auto handleProtoComp = [&](ProtocolCompositionType *comp) -> void { + for (auto ty : comp->getMembers()) + handleInheritedType(ty); + }; + + handleInheritedType = [&](Type ty) -> void { + if (auto *proto = ty->getAs()) + handleProto(proto); + else if (auto *comp = ty->getAs()) + handleProtoComp(comp); + }; + + for (auto loc : inheritedTypeLocs) { + if (loc.getTypeRepr()) + handleInheritedType(TypeResolution::forStructural( + cast(decl), None, /*unboundTyOpener*/ nullptr) + .resolveType(loc.getTypeRepr())); + else + handleInheritedType(loc.getType()); + } +} + /// Return associated `TangentVector` struct for a nominal type, if it exists. /// If not, synthesize the struct. static StructDecl * @@ -646,23 +691,46 @@ getOrSynthesizeTangentVectorStruct(DerivedConformance &derived, Identifier id) { } // Otherwise, synthesize a new struct. - auto *diffableProto = C.getProtocol(KnownProtocolKind::Differentiable); - auto diffableType = TypeLoc::withoutLoc(diffableProto->getDeclaredInterfaceType()); - auto *addArithProto = C.getProtocol(KnownProtocolKind::AdditiveArithmetic); - auto addArithType = TypeLoc::withoutLoc(addArithProto->getDeclaredInterfaceType()); - // By definition, `TangentVector` must conform to `Differentiable` and - // `AdditiveArithmetic`. - SmallVector inherited{diffableType, addArithType}; + // Compute `tvDesiredProtos`, the set of protocols that the new `TangentVector` struct must + // inherit, by collecting all the `TangentVector` conformance requirements imposed by the + // protocols that `derived.ConformanceDecl` inherits. + // + // Note that, for example, this will always find `AdditiveArithmetic` and `Differentiable` because + // the `Differentiable` protocol itself requires that its `TangentVector` conforms to + // `AdditiveArithmetic` and `Differentiable`. + llvm::SmallPtrSet tvDesiredProtos; + llvm::SmallPtrSet conformanceInheritedProtos; + getInheritedProtocols(derived.ConformanceDecl, conformanceInheritedProtos); + auto *diffableProto = C.getProtocol(KnownProtocolKind::Differentiable); + auto *tvAssocType = diffableProto->getAssociatedType(C.Id_TangentVector); + for (auto proto : conformanceInheritedProtos) { + for (auto req : proto->getRequirementSignature()) { + if (req.getKind() != RequirementKind::Conformance) + continue; + auto *firstType = req.getFirstType()->getAs(); + if (!firstType || firstType->getAssocType() != tvAssocType) + continue; + auto tvRequiredProto = req.getSecondType()->getAs(); + if (!tvRequiredProto) + continue; + tvDesiredProtos.insert(tvRequiredProto); + } + } + SmallVector tvDesiredProtoTypeLocs; + for (auto *p : tvDesiredProtos) + tvDesiredProtoTypeLocs.push_back(TypeLoc::withoutLoc(p)); // Cache original members and their associated types for later use. SmallVector diffProperties; getStoredPropertiesForDifferentiation(nominal, parentDC, diffProperties); + auto synthesizedLoc = derived.ConformanceDecl->getEndLoc(); auto *structDecl = - new (C) StructDecl(SourceLoc(), C.Id_TangentVector, SourceLoc(), - /*Inherited*/ C.AllocateCopy(inherited), + new (C) StructDecl(synthesizedLoc, C.Id_TangentVector, synthesizedLoc, + /*Inherited*/ C.AllocateCopy(tvDesiredProtoTypeLocs), /*GenericParams*/ {}, parentDC); + structDecl->setBraces({synthesizedLoc, synthesizedLoc}); structDecl->setImplicit(); structDecl->copyFormalAccessFrom(nominal, /*sourceIsParentContext*/ true); diff --git a/lib/Sema/MiscDiagnostics.cpp b/lib/Sema/MiscDiagnostics.cpp index 1c8d2dd969578..3cb0bc4d6639d 100644 --- a/lib/Sema/MiscDiagnostics.cpp +++ b/lib/Sema/MiscDiagnostics.cpp @@ -653,9 +653,9 @@ static void diagSyntacticUseRestrictions(const Expr *E, const DeclContext *DC, .fixItInsert(DRE->getStartLoc(), "self."); } - DeclContext *topLevelContext = DC->getModuleScopeContext(); + DeclContext *topLevelSubcontext = DC->getModuleScopeContext(); auto descriptor = UnqualifiedLookupDescriptor( - DeclNameRef(VD->getBaseName()), topLevelContext, SourceLoc()); + DeclNameRef(VD->getBaseName()), topLevelSubcontext, SourceLoc()); auto lookup = evaluateOrDefault(Ctx.evaluator, UnqualifiedLookupRequest{descriptor}, {}); diff --git a/lib/Sema/PreCheckExpr.cpp b/lib/Sema/PreCheckExpr.cpp index 4cae305fc7ede..3b2f1e04b3302 100644 --- a/lib/Sema/PreCheckExpr.cpp +++ b/lib/Sema/PreCheckExpr.cpp @@ -1056,12 +1056,49 @@ namespace { if (isa(parent)) return finish(true, expr); + SourceLoc lastInnerParenLoc; + // Unwrap to the outermost paren in the sequence. + if (isa(parent)) { + for (;;) { + auto nextParent = parents.find(parent); + if (nextParent == parents.end()) + break; + + // e.g. `foo((&bar), x: ...)` + if (isa(nextParent->second)) { + lastInnerParenLoc = cast(parent)->getLParenLoc(); + parent = nextParent->second; + break; + } + + // e.g. `foo(((&bar))` + if (isa(nextParent->second)) { + lastInnerParenLoc = cast(parent)->getLParenLoc(); + parent = nextParent->second; + continue; + } + + break; + } + } + if (isa(parent) || isa(parent)) { auto call = parents.find(parent); if (call != parents.end()) { if (isa(call->getSecond()) || - isa(call->getSecond())) + isa(call->getSecond())) { + // If outermost paren is associated with a call or + // a member reference, it might be valid to have `&` + // before all of the parens. + if (lastInnerParenLoc.isValid()) { + auto &DE = getASTContext().Diags; + auto diag = DE.diagnose(expr->getStartLoc(), + diag::extraneous_address_of); + diag.fixItExchange(expr->getLoc(), lastInnerParenLoc); + } + return finish(true, expr); + } if (isa(call->getSecond())) { getASTContext().Diags.diagnose( diff --git a/lib/Sema/TypeCheckAccess.cpp b/lib/Sema/TypeCheckAccess.cpp index 37f0e007ea9c2..59ccf4f94b74c 100644 --- a/lib/Sema/TypeCheckAccess.cpp +++ b/lib/Sema/TypeCheckAccess.cpp @@ -22,6 +22,7 @@ #include "swift/AST/ASTWalker.h" #include "swift/AST/DiagnosticsSema.h" #include "swift/AST/ExistentialLayout.h" +#include "swift/AST/Import.h" #include "swift/AST/Pattern.h" #include "swift/AST/ParameterList.h" #include "swift/AST/TypeCheckRequests.h" @@ -1439,7 +1440,6 @@ class UsableFromInlineChecker : public AccessControlCheckerBase, } } }; - } // end anonymous namespace /// Returns the kind of origin, implementation-only import or SPI declaration, @@ -1459,6 +1459,40 @@ swift::getDisallowedOriginKind(const Decl *decl, if (where.isSPI()) downgradeToWarning = DowngradeToWarning::Yes; + // Even if the current module is @_implementationOnly, Swift should + // not report an error in the cases where the decl is also exported from + // a non @_implementationOnly module. Thus, we check to see if there is + // a visible access path to the Clang decl, and only error out in case + // there is none. + auto filter = ModuleDecl::ImportFilter( + {ModuleDecl::ImportFilterKind::Exported, + ModuleDecl::ImportFilterKind::Default, + ModuleDecl::ImportFilterKind::SPIAccessControl, + ModuleDecl::ImportFilterKind::ShadowedByCrossImportOverlay}); + SmallVector sfImportedModules; + SF->getImportedModules(sfImportedModules, filter); + if (auto clangDecl = decl->getClangDecl()) { + for (auto redecl : clangDecl->redecls()) { + if (auto tagReDecl = dyn_cast(redecl)) { + // This is a forward declaration. We ignore visibility of those. + if (tagReDecl->getBraceRange().isInvalid()) { + continue; + } + } + auto moduleWrapper = + decl->getASTContext().getClangModuleLoader()->getWrapperForModule( + redecl->getOwningModule()); + auto visibleAccessPath = + find_if(sfImportedModules, [&moduleWrapper](auto importedModule) { + return importedModule.importedModule == moduleWrapper || + !importedModule.importedModule + ->isImportedImplementationOnly(moduleWrapper); + }); + if (visibleAccessPath != sfImportedModules.end()) { + return DisallowedOriginKind::None; + } + } + } // Implementation-only imported, cannot be reexported. return DisallowedOriginKind::ImplementationOnly; } else if (decl->isSPI() && !where.isSPI()) { diff --git a/lib/Sema/TypeCheckAttr.cpp b/lib/Sema/TypeCheckAttr.cpp index 5f8c98f3c3837..1f8d972fb83cd 100644 --- a/lib/Sema/TypeCheckAttr.cpp +++ b/lib/Sema/TypeCheckAttr.cpp @@ -1030,6 +1030,21 @@ void AttributeChecker::visitSPIAccessControlAttr(SPIAccessControlAttr *attr) { } } } + + if (auto ID = dyn_cast(D)) { + auto importedModule = ID->getModule(); + if (importedModule) { + auto path = importedModule->getModuleFilename(); + if (llvm::sys::path::extension(path) == ".swiftinterface" && + !path.endswith(".private.swiftinterface")) { + // If the module was built from the public swiftinterface, it can't + // have any SPI. + diagnose(attr->getLocation(), + diag::spi_attribute_on_import_of_public_module, + importedModule->getName(), path); + } + } + } } static bool checkObjCDeclContext(Decl *D) { diff --git a/lib/Sema/TypeCheckConcurrency.cpp b/lib/Sema/TypeCheckConcurrency.cpp index dfa6b60c9d96c..f821f84139cfa 100644 --- a/lib/Sema/TypeCheckConcurrency.cpp +++ b/lib/Sema/TypeCheckConcurrency.cpp @@ -215,10 +215,7 @@ bool IsActorRequest::evaluate( // The superclass is 'NSObject', which is known to have no state and no // superclass. - if (superclassDecl->hasClangNode() && - superclassDecl->getName().is("NSObject") && - superclassDecl->getModuleContext()->getName().is("ObjectiveC") && - actorAttr != nullptr) + if (superclassDecl->isNSObject() && actorAttr != nullptr) return true; // This class cannot be an actor; complain if the 'actor' modifier was @@ -235,6 +232,35 @@ bool IsActorRequest::evaluate( return actorAttr != nullptr; } +bool IsDefaultActorRequest::evaluate( + Evaluator &evaluator, ClassDecl *classDecl) const { + // If the class isn't an actor class, it's not a default actor. + if (!classDecl->isActor()) + return false; + + // If there is a superclass, and it's an actor class, we defer + // the decision to it. + if (auto superclassDecl = classDecl->getSuperclassDecl()) { + // If the superclass is an actor, we inherit its default-actor-ness. + if (superclassDecl->isActor()) + return superclassDecl->isDefaultActor(); + + // If the superclass is not an actor class, it can only be + // a default actor if it's NSObject. (For now, other classes simply + // can't be actors at all.) We don't need to diagnose this; we + // should've done that already in isActor(). + if (!superclassDecl->isNSObject()) + return false; + } + + // If the class has explicit custom-actor methods, it's not + // a default actor. + if (classDecl->hasExplicitCustomActorMethods()) + return false; + + return true; +} + static bool isDeclNotAsAccessibleAsParent(ValueDecl *decl, NominalTypeDecl *parent) { return decl->getFormalAccess() < @@ -595,6 +621,38 @@ namespace { contextStack.push_back(dc); } + /// Searches the applyStack from back to front for the inner-most CallExpr + /// and marks that CallExpr as implicitly async. + /// + /// NOTE: Crashes if no CallExpr was found. + /// + /// For example, for global actor function `curryAdd`, if we have: + /// ((curryAdd 1) 2) + /// then we want to mark the inner-most CallExpr, `(curryAdd 1)`. + /// + /// The same goes for calls to member functions, such as calc.add(1, 2), + /// aka ((add calc) 1 2), looks like this: + /// + /// (call_expr + /// (dot_syntax_call_expr + /// (declref_expr add) + /// (declref_expr calc)) + /// (tuple_expr + /// ...)) + /// + /// and we reach up to mark the CallExpr. + void markNearestCallAsImplicitlyAsync() { + assert(applyStack.size() > 0 && "not contained within an Apply?"); + + const auto End = applyStack.rend(); + for (auto I = applyStack.rbegin(); I != End; ++I) + if (auto call = dyn_cast(*I)) { + call->setImplicitlyAsync(true); + return; + } + llvm_unreachable("expected a CallExpr in applyStack!"); + } + bool shouldWalkCaptureInitializerExpressions() override { return true; } bool shouldWalkIntoTapExpression() override { return false; } @@ -635,10 +693,9 @@ namespace { if (auto memberRef = findMemberReference(partialApply->fn)) { // NOTE: partially-applied thunks are never annotated as // implicitly async, regardless of whether they are escaping. - // So, we do not pass the ApplyExpr along to checkMemberReference. checkMemberReference( partialApply->base, memberRef->first, memberRef->second, - partialApply->isEscaping); + partialApply->isEscaping, /*maybeImplicitAsync=*/false); partialApply->base->walk(*this); @@ -657,7 +714,7 @@ namespace { if (auto memberRef = findMemberReference(fn)) { checkMemberReference( call->getArg(), memberRef->first, memberRef->second, - /*isEscapingPartialApply=*/false, call); + /*isEscapingPartialApply=*/false, /*maybeImplicitAsync=*/true); call->getArg()->walk(*this); @@ -877,7 +934,7 @@ namespace { auto concDecl = memberRef->first; if (value == concDecl.getDecl() && !apply->implicitlyAsync()) { // then this ValueDecl appears as the called value of the ApplyExpr. - apply->setImplicitlyAsync(true); + markNearestCallAsImplicitlyAsync(); return true; } } @@ -986,7 +1043,7 @@ namespace { bool checkMemberReference( Expr *base, ConcreteDeclRef memberRef, SourceLoc memberLoc, bool isEscapingPartialApply = false, - ApplyExpr *maybeImplicitAsync = nullptr) { + bool maybeImplicitAsync = false) { if (!base || !memberRef) return false; @@ -1002,7 +1059,7 @@ namespace { if (!selfVar) { // actor-isolated non-self calls are implicitly async and thus OK. if (maybeImplicitAsync && isa(member)) { - maybeImplicitAsync->setImplicitlyAsync(true); + markNearestCallAsImplicitlyAsync(); return false; } ctx.Diags.diagnose( diff --git a/lib/Sema/TypeCheckDeclOverride.cpp b/lib/Sema/TypeCheckDeclOverride.cpp index 15b9627033006..af8d60e4755b1 100644 --- a/lib/Sema/TypeCheckDeclOverride.cpp +++ b/lib/Sema/TypeCheckDeclOverride.cpp @@ -728,9 +728,7 @@ static bool isNSObjectHashValue(ValueDecl *baseDecl) { if (auto baseVar = dyn_cast(baseDecl)) { if (auto classDecl = baseVar->getDeclContext()->getSelfClassDecl()) { return baseVar->getName() == ctx.Id_hashValue && - classDecl->getName().is("NSObject") && - (classDecl->getModuleContext()->getName() == ctx.Id_Foundation || - classDecl->getModuleContext()->getName() == ctx.Id_ObjectiveC); + classDecl->isNSObject(); } } return false; diff --git a/lib/Sema/TypeCheckDeclPrimary.cpp b/lib/Sema/TypeCheckDeclPrimary.cpp index 7109cad8bf31c..7c0cb04092d2b 100644 --- a/lib/Sema/TypeCheckDeclPrimary.cpp +++ b/lib/Sema/TypeCheckDeclPrimary.cpp @@ -179,6 +179,20 @@ static void checkInheritanceClause( // GenericSignatureBuilder (for protocol inheritance) or the // ConformanceLookupTable (for protocol conformance). if (inheritedTy->isAnyObject()) { + // Warn inherited AnyObject written as 'class' as deprecated + // for Swift >= 5. + auto sourceRange = inherited.getSourceRange(); + bool isWrittenAsClass = + (isa(decl) || isa(decl)) && + Lexer::getTokenAtLocation(ctx.SourceMgr, sourceRange.Start) + .is(tok::kw_class); + if (ctx.LangOpts.isSwiftVersionAtLeast(5) && isWrittenAsClass) { + diags + .diagnose(sourceRange.Start, + diag::anyobject_class_inheritance_deprecated) + .fixItReplace(sourceRange, "AnyObject"); + } + if (inheritedAnyObject) { // If the first occurrence was written as 'class', downgrade the error // to a warning in such case for backward compatibility with diff --git a/lib/Sema/TypeCheckEffects.cpp b/lib/Sema/TypeCheckEffects.cpp index 04e3afbd95214..7eb7cd6845e96 100644 --- a/lib/Sema/TypeCheckEffects.cpp +++ b/lib/Sema/TypeCheckEffects.cpp @@ -985,8 +985,9 @@ class Context { } static Context forTopLevelCode(TopLevelCodeDecl *D) { - // Top-level code implicitly handles errors and 'async' calls. - return Context(/*handlesErrors=*/true, /*handlesAsync=*/true, None); + // Top-level code implicitly handles errors. + // TODO: Eventually, it will handle async as well. + return Context(/*handlesErrors=*/true, /*handlesAsync=*/false, None); } static Context forFunction(AbstractFunctionDecl *D) { diff --git a/lib/Sema/TypeCheckProtocol.cpp b/lib/Sema/TypeCheckProtocol.cpp index 95f0296b46631..86de5ba4c0ab6 100644 --- a/lib/Sema/TypeCheckProtocol.cpp +++ b/lib/Sema/TypeCheckProtocol.cpp @@ -713,8 +713,8 @@ swift::matchWitness( } // If the witness is 'async', the requirement must be. - if (witnessFnType->getExtInfo().isAsync() != - reqFnType->getExtInfo().isAsync()) { + if (witnessFnType->getExtInfo().isAsync() && + !reqFnType->getExtInfo().isAsync()) { return RequirementMatch(witness, MatchKind::AsyncConflict); } @@ -4849,15 +4849,11 @@ TypeChecker::containsProtocol(Type T, ProtocolDecl *Proto, DeclContext *DC, } ProtocolConformanceRef -TypeChecker::conformsToProtocol(Type T, ProtocolDecl *Proto, DeclContext *DC, - SourceLoc ComplainLoc) { +TypeChecker::conformsToProtocol(Type T, ProtocolDecl *Proto, DeclContext *DC) { // Look up conformance in the module. ModuleDecl *M = DC->getParentModule(); auto lookupResult = M->lookupConformance(T, Proto); if (lookupResult.isInvalid()) { - if (ComplainLoc.isValid()) { - diagnoseConformanceFailure(T, Proto, DC, ComplainLoc); - } return ProtocolConformanceRef::forInvalid(); } @@ -4869,16 +4865,8 @@ TypeChecker::conformsToProtocol(Type T, ProtocolDecl *Proto, DeclContext *DC, // If we have a conditional requirements that // we need to check, do so now. if (!condReqs->empty()) { - // Figure out the location of the conditional conformance. - auto conformanceDC = lookupResult.getConcrete()->getDeclContext(); - SourceLoc noteLoc; - if (auto ext = dyn_cast(conformanceDC)) - noteLoc = ext->getLoc(); - else - noteLoc = cast(conformanceDC)->getLoc(); - auto conditionalCheckResult = checkGenericArguments( - DC, ComplainLoc, noteLoc, T, + DC, SourceLoc(), SourceLoc(), T, {lookupResult.getRequirement()->getSelfInterfaceType()}, *condReqs, [](SubstitutableType *dependentType) { return Type(dependentType); }); switch (conditionalCheckResult) { diff --git a/lib/Sema/TypeCheckStorage.cpp b/lib/Sema/TypeCheckStorage.cpp index e58baa71aa992..fb2e14352c9dd 100644 --- a/lib/Sema/TypeCheckStorage.cpp +++ b/lib/Sema/TypeCheckStorage.cpp @@ -145,15 +145,10 @@ StoredPropertiesRequest::evaluate(Evaluator &evaluator, if (isa(decl->getModuleScopeContext())) computeLoweredStoredProperties(decl); - ASTContext &ctx = decl->getASTContext(); for (auto *member : decl->getMembers()) { if (auto *var = dyn_cast(member)) if (!var->isStatic() && var->hasStorage()) { - // Actor storage always goes at the beginning. - if (var->getName() == ctx.Id_actorStorage) - results.insert(results.begin(), var); - else - results.push_back(var); + results.push_back(var); } } diff --git a/lib/Sema/TypeChecker.h b/lib/Sema/TypeChecker.h index 2e499b059776f..46912ef8f7100 100644 --- a/lib/Sema/TypeChecker.h +++ b/lib/Sema/TypeChecker.h @@ -763,16 +763,10 @@ ProtocolConformanceRef containsProtocol(Type T, ProtocolDecl *Proto, /// \param DC The context in which to check conformance. This affects, for /// example, extension visibility. /// -/// \param ComplainLoc If valid, then this function will emit diagnostics if -/// T does not conform to the given protocol. The primary diagnostic will -/// be placed at this location, with notes for each of the protocol -/// requirements not satisfied. -/// /// \returns The protocol conformance, if \c T conforms to the /// protocol \c Proto, or \c None. ProtocolConformanceRef conformsToProtocol(Type T, ProtocolDecl *Proto, - DeclContext *DC, - SourceLoc ComplainLoc = SourceLoc()); + DeclContext *DC); /// This is similar to \c conformsToProtocol, but returns \c true for cases where /// the type \p T could be dynamically cast to \p Proto protocol, such as a non-final diff --git a/lib/Serialization/DeserializeSIL.cpp b/lib/Serialization/DeserializeSIL.cpp index c29900110eae2..79b2386170373 100644 --- a/lib/Serialization/DeserializeSIL.cpp +++ b/lib/Serialization/DeserializeSIL.cpp @@ -1225,8 +1225,9 @@ bool SILDeserializer::readSILInstruction(SILFunction *Fn, case SILInstructionKind::GetAsyncContinuationInst: assert(RecordKind == SIL_ONE_TYPE && "Layout should be OneType."); - ResultInst = Builder.createGetAsyncContinuation(Loc, - getSILType(MF->getType(TyID), (SILValueCategory)TyCategory, Fn)); + ResultInst = Builder.createGetAsyncContinuation( + Loc, MF->getType(TyID)->getCanonicalType(), + /*throws*/ Attr != 0); break; case SILInstructionKind::GetAsyncContinuationAddrInst: @@ -1235,7 +1236,8 @@ bool SILDeserializer::readSILInstruction(SILFunction *Fn, ResultInst = Builder.createGetAsyncContinuationAddr(Loc, getLocalValue(ValID, getSILType(MF->getType(TyID2), (SILValueCategory)TyCategory2, Fn)), - getSILType(MF->getType(TyID), (SILValueCategory)TyCategory, Fn)); + MF->getType(TyID)->getCanonicalType(), + /*throws*/ Attr != 0); break; #define ONETYPE_ONEOPERAND_INST(ID) \ diff --git a/lib/Serialization/ModuleFormat.h b/lib/Serialization/ModuleFormat.h index 0079f19e68a93..93d5b00eeedbb 100644 --- a/lib/Serialization/ModuleFormat.h +++ b/lib/Serialization/ModuleFormat.h @@ -56,7 +56,7 @@ const uint16_t SWIFTMODULE_VERSION_MAJOR = 0; /// describe what change you made. The content of this comment isn't important; /// it just ensures a conflict if two people change the module format. /// Don't worry about adhering to the 80-column limit for this line. -const uint16_t SWIFTMODULE_VERSION_MINOR = 587; // fingerprints in modules +const uint16_t SWIFTMODULE_VERSION_MINOR = 588; // change type of get_async_continuation[_addr] /// A standard hash seed used for all string hashes in a serialized module. /// diff --git a/lib/Serialization/Serialization.cpp b/lib/Serialization/Serialization.cpp index 5de3805dc2043..56d4d9d4f450a 100644 --- a/lib/Serialization/Serialization.cpp +++ b/lib/Serialization/Serialization.cpp @@ -2605,8 +2605,8 @@ class Serializer::DeclSerializer : public DeclVisitor { storage->hasPrivateAccessor())); if (shouldEmitFilenameForPrivate || shouldEmitPrivateDiscriminator) { - auto topLevelContext = value->getDeclContext()->getModuleScopeContext(); - if (auto *enclosingFile = dyn_cast(topLevelContext)) { + auto topLevelSubcontext = value->getDeclContext()->getModuleScopeContext(); + if (auto *enclosingFile = dyn_cast(topLevelSubcontext)) { if (shouldEmitPrivateDiscriminator) { Identifier discriminator = enclosingFile->getDiscriminatorForPrivateValue(value); diff --git a/lib/Serialization/SerializeSIL.cpp b/lib/Serialization/SerializeSIL.cpp index a0942b82b264c..4656ea43932d8 100644 --- a/lib/Serialization/SerializeSIL.cpp +++ b/lib/Serialization/SerializeSIL.cpp @@ -260,6 +260,8 @@ namespace { unsigned attrs); void writeOneTypeLayout(SILInstructionKind valueKind, unsigned attrs, SILType type); + void writeOneTypeLayout(SILInstructionKind valueKind, + unsigned attrs, CanType type); void writeOneTypeOneOperandLayout(SILInstructionKind valueKind, unsigned attrs, SILType type, @@ -587,6 +589,14 @@ void SILSerializer::writeOneTypeLayout(SILInstructionKind valueKind, (unsigned)type.getCategory()); } +void SILSerializer::writeOneTypeLayout(SILInstructionKind valueKind, + unsigned attrs, CanType type) { + unsigned abbrCode = SILAbbrCodes[SILOneTypeLayout::Code]; + SILOneTypeLayout::emitRecord(Out, ScratchRecord, abbrCode, + (unsigned) valueKind, attrs, + S.addTypeRef(type), 0); +} + void SILSerializer::writeOneOperandLayout(SILInstructionKind valueKind, unsigned attrs, SILValue operand) { @@ -1584,13 +1594,15 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { } case SILInstructionKind::GetAsyncContinuationAddrInst: { auto &gaca = cast(SI); - writeOneTypeOneOperandLayout(gaca.getKind(), 0, gaca.getType(), + writeOneTypeOneOperandLayout(gaca.getKind(), gaca.throws(), + gaca.getFormalResumeType(), gaca.getOperand()); break; } case SILInstructionKind::GetAsyncContinuationInst: { auto &gaca = cast(SI); - writeOneTypeLayout(gaca.getKind(), 0, gaca.getType()); + writeOneTypeLayout(gaca.getKind(), gaca.throws(), + gaca.getFormalResumeType()); break; } // Conversion instructions (and others of similar form). diff --git a/stdlib/cmake/modules/AddSwiftStdlib.cmake b/stdlib/cmake/modules/AddSwiftStdlib.cmake index a045cd91673f6..d603c800849b8 100644 --- a/stdlib/cmake/modules/AddSwiftStdlib.cmake +++ b/stdlib/cmake/modules/AddSwiftStdlib.cmake @@ -261,6 +261,18 @@ function(_add_target_variant_c_compile_flags) endif() endif() + # The concurrency library uses double-word atomics. MSVC's std::atomic + # uses a spin lock for this, so to get reasonable behavior we have to + # implement it ourselves using _InterlockedCompareExchange128. + # clang-cl requires us to enable the `cx16` feature to use this intrinsic. + if(SWIFT_HOST_VARIANT_ARCH STREQUAL x86_64) + if(SWIFT_COMPILER_IS_MSVC_LIKE) + list(APPEND result /clang:-mcx16) + else() + list(APPEND result -mcx16) + endif() + endif() + if(${CFLAGS_SDK} STREQUAL ANDROID) if(${CFLAGS_ARCH} STREQUAL x86_64) # NOTE(compnerd) Android NDK 21 or lower will generate library calls to diff --git a/stdlib/cmake/modules/SwiftSource.cmake b/stdlib/cmake/modules/SwiftSource.cmake index 02f61bc80dba2..a8737c7d16daa 100644 --- a/stdlib/cmake/modules/SwiftSource.cmake +++ b/stdlib/cmake/modules/SwiftSource.cmake @@ -803,6 +803,7 @@ function(_compile_swift_files ${source_files} ${SWIFTFILE_DEPENDS} ${swift_ide_test_dependency} ${create_dirs_dependency_target} + ${copy_legacy_layouts_dep} COMMENT "Generating ${module_file}") if(SWIFTFILE_STATIC) @@ -859,6 +860,7 @@ function(_compile_swift_files ${SWIFTFILE_DEPENDS} ${swift_ide_test_dependency} ${obj_dirs_dependency_target} + ${copy_legacy_layouts_dep} COMMENT "Generating ${maccatalyst_module_file}") @@ -882,6 +884,7 @@ function(_compile_swift_files ${swift_compiler_tool_dep} ${source_files} ${SWIFTFILE_DEPENDS} ${create_dirs_dependency_target} + ${copy_legacy_layouts_dep} COMMENT "Generating ${sib_file}" EXCLUDE_FROM_ALL) set("${dependency_sib_target_out_var_name}" "${sib_dependency_target}" PARENT_SCOPE) @@ -898,6 +901,7 @@ function(_compile_swift_files ${swift_compiler_tool_dep} ${source_files} ${SWIFTFILE_DEPENDS} ${create_dirs_dependency_target} + ${copy_legacy_layouts_dep} COMMENT "Generating ${sibopt_file}" EXCLUDE_FROM_ALL) set("${dependency_sibopt_target_out_var_name}" "${sibopt_dependency_target}" PARENT_SCOPE) @@ -915,6 +919,7 @@ function(_compile_swift_files ${swift_compiler_tool_dep} ${source_files} ${SWIFTFILE_DEPENDS} ${create_dirs_dependency_target} + ${copy_legacy_layouts_dep} COMMENT "Generating ${sibgen_file}" EXCLUDE_FROM_ALL) set("${dependency_sibgen_target_out_var_name}" "${sibgen_dependency_target}" PARENT_SCOPE) diff --git a/stdlib/private/BlocksRuntimeStubs/BlocksRuntime.c b/stdlib/private/BlocksRuntimeStubs/BlocksRuntime.c new file mode 100644 index 0000000000000..7f6216c559995 --- /dev/null +++ b/stdlib/private/BlocksRuntimeStubs/BlocksRuntime.c @@ -0,0 +1,6 @@ +void +#if defined(_WIN32) +__declspec(dllexport) +#endif +_Block_release(void) { } + diff --git a/stdlib/private/BlocksRuntimeStubs/CMakeLists.txt b/stdlib/private/BlocksRuntimeStubs/CMakeLists.txt new file mode 100644 index 0000000000000..c24c956f36a19 --- /dev/null +++ b/stdlib/private/BlocksRuntimeStubs/CMakeLists.txt @@ -0,0 +1,40 @@ +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../test/cmake/modules") + +include(SwiftTestUtils) + +foreach(SDK ${SWIFT_SDKS}) + foreach(ARCH ${SWIFT_SDK_${SDK}_ARCHITECTURES}) + get_swift_test_build_flavors(build_flavors "${SDK}") + + foreach(BUILD_FLAVOR ${build_flavors}) + get_swift_test_variant_suffix(VARIANT_SUFFIX "${SDK}" "${ARCH}" "${BUILD_FLAVOR}") + + set(test_bin_dir "${SWIFT_BINARY_DIR}/test${VARIANT_SUFFIX}") + + _add_swift_target_library_single( + BlocksRuntimeStub${VARIANT_SUFFIX} + BlocksRuntimeStub + SHARED + ARCHITECTURE ${ARCH} + SDK ${SDK} + INSTALL_IN_COMPONENT dev + BlocksRuntime.c + ) + set_target_properties(BlocksRuntimeStub${VARIANT_SUFFIX} PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${test_bin_dir} + LIBRARY_OUTPUT_DIRECTORY ${test_bin_dir} + RUNTIME_OUTPUT_DIRECTORY ${test_bin_dir} + OUTPUT_NAME BlocksRuntime) + + # When built in a unified build, ensure that we add a dependency on the + # compiler to serialize this behind the compiler. Otherwise, we would + # attempt to build this before the compiler is ready, which breaks the + # build. + if(NOT SWIFT_BUILD_RUNTIME_WITH_HOST_COMPILER AND NOT BUILD_STANDALONE AND + TARGET clang) + add_dependencies(BlocksRuntimeStub${VARIANT_SUFFIX} clang) + endif() + endforeach() + endforeach() +endforeach() + diff --git a/stdlib/private/CMakeLists.txt b/stdlib/private/CMakeLists.txt index dc6d3ece54c7e..40feea28ef3c6 100644 --- a/stdlib/private/CMakeLists.txt +++ b/stdlib/private/CMakeLists.txt @@ -36,3 +36,12 @@ if(SWIFT_BUILD_SDK_OVERLAY) add_subdirectory(SwiftReflectionTest) endif() endif() + +# Keep in sync with stdlib/tools/CMakeLists.txt: swift-reflection-test is +# only used when testing dynamic stdlib. +if(SWIFT_BUILD_DYNAMIC_STDLIB AND SWIFT_INCLUDE_TESTS) + # NOTE create a stub BlocksRuntime library that can be used for the + # reflection tests + add_subdirectory(BlocksRuntimeStubs) +endif() + diff --git a/stdlib/public/Concurrency/Actor.cpp b/stdlib/public/Concurrency/Actor.cpp new file mode 100644 index 0000000000000..24fe59477c284 --- /dev/null +++ b/stdlib/public/Concurrency/Actor.cpp @@ -0,0 +1,1377 @@ +///===--- Actor.cpp - Standard actor implementation ------------------------===/// +/// +/// This source file is part of the Swift.org open source project +/// +/// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +/// Licensed under Apache License v2.0 with Runtime Library Exception +/// +/// See https:///swift.org/LICENSE.txt for license information +/// See https:///swift.org/CONTRIBUTORS.txt for the list of Swift project authors +/// +///===----------------------------------------------------------------------===/// +/// +/// The standard actor implementation for Swift actors. +/// +///===----------------------------------------------------------------------===/// + +#include "swift/Runtime/Concurrency.h" + +#include "swift/Runtime/Atomic.h" +#include "swift/Runtime/Mutex.h" +#include "swift/Runtime/ThreadLocal.h" +#include "swift/ABI/Actor.h" +#include "llvm/ADT/PointerIntPair.h" + +using namespace swift; + +/// Should we yield the thread? +static bool shouldYieldThread() { + // FIXME: system scheduler integration + return false; +} + +/*****************************************************************************/ +/*********************** DEFAULT ACTOR IMPLEMENTATION ************************/ +/*****************************************************************************/ + +namespace { + +class DefaultActorImpl; + +/// A job to process a default actor. Allocated inline in the actor. +class ProcessInlineJob : public Job { +public: + ProcessInlineJob(JobPriority priority) + : Job({JobKind::DefaultActorInline, priority}, &process) {} + + SWIFT_CC(swiftasync) + static void process(Job *job, ExecutorRef executor); + + static bool classof(const Job *job) { + return job->Flags.getKind() == JobKind::DefaultActorInline; + } +}; + +/// A job to process a default actor that's allocated separately from +/// the actor but doesn't need the override mechanics. +class ProcessOutOfLineJob : public Job { + DefaultActorImpl *Actor; +public: + ProcessOutOfLineJob(DefaultActorImpl *actor, JobPriority priority) + : Job({JobKind::DefaultActorSeparate, priority}, &process), + Actor(actor) {} + + SWIFT_CC(swiftasync) + static void process(Job *job, ExecutorRef executor); + + static bool classof(const Job *job) { + return job->Flags.getKind() == JobKind::DefaultActorSeparate; + } +}; + +/// A job to process a default actor with a new priority; allocated +/// separately from the actor. +class ProcessOverrideJob; + +/// Information about the currently-running processing job. +struct RunningJobInfo { + enum KindType : uint8_t { + Inline, Override, Other + }; + KindType Kind; + JobPriority Priority; + ProcessOverrideJob *OverrideJob; + + bool wasInlineJob() const { + return Kind == Inline; + } + + static RunningJobInfo forOther(JobPriority priority) { + return {Other, priority, nullptr}; + } + static RunningJobInfo forInline(JobPriority priority) { + return {Inline, priority, nullptr}; + } + static RunningJobInfo forOverride(ProcessOverrideJob *job); + + void setAbandoned(); + void setRunning(); + bool waitForActivation(); +}; + +class JobRef { + enum : uintptr_t { + NeedsPreprocessing = 0x1, + IsOverride = 0x2, + JobMask = ~uintptr_t(NeedsPreprocessing | IsOverride) + }; + + /// A Job* that may have one of the two bits above mangled into it. + uintptr_t Value; + + JobRef(Job *job, unsigned flags) + : Value(reinterpret_cast(job) | flags) {} +public: + constexpr JobRef() : Value(0) {} + + /// Return a reference to a job that's been properly preprocessed. + static JobRef getPreprocessed(Job *job) { + /// We allow null pointers here. + return { job, 0 }; + } + + /// Return a reference to a job that hasn't been preprocesssed yet. + static JobRef getUnpreprocessed(Job *job) { + assert(job && "passing a null job"); + return { job, NeedsPreprocessing }; + } + + /// Return a reference to an override job, which needs special + /// treatment during preprocessing. + static JobRef getOverride(ProcessOverrideJob *job); + + /// Is this a null reference? + operator bool() const { return Value != 0; } + + /// Does this job need to be pre-processed before we can treat + /// the job queue as a proper queue? + bool needsPreprocessing() const { + return Value & NeedsPreprocessing; + } + + /// Is this an unpreprocessed override job? + bool isOverride() const { + return Value & IsOverride; + } + + /// Given that this is an override job, return it. + ProcessOverrideJob *getAsOverride() const { + assert(isOverride()); + return reinterpret_cast(Value & JobMask); + } + ProcessOverrideJob *getAsPreprocessedOverride() const; + + Job *getAsJob() const { + assert(!isOverride()); + return reinterpret_cast(Value & JobMask); + } + Job *getAsPreprocessedJob() const { + assert(!isOverride() && !needsPreprocessing()); + return reinterpret_cast(Value); + } + + bool operator==(JobRef other) const { + return Value == other.Value; + } + bool operator!=(JobRef other) const { + return Value != other.Value; + } +}; + +/// The default actor implementation. +/// +/// Ownership of the actor is subtle. Jobs are assumed to keep the actor +/// alive as long as they're executing on it; this allows us to avoid +/// retaining and releasing whenever threads are scheduled to run a job. +/// While jobs are enqueued on the actor, there is a conceptual shared +/// ownership of the currently-enqueued jobs which is passed around +/// between threads and processing jobs and managed using extra retains +/// and releases of the actor. The basic invariant is as follows: +/// +/// - Let R be 1 if there are jobs enqueued on the actor or if a job +/// is currently running on the actor; otherwise let R be 0. +/// - Let N be the number of active processing jobs for the actor. +/// - N >= R +/// - There are N - R extra retains of the actor. +/// +/// We can think of this as there being one "owning" processing job +/// and K "extra" jobs. If there is a processing job that is actively +/// running the actor, it is always the owning job; otherwise, any of +/// the N jobs may win the race to become the owning job. +/// +/// We then have the following ownership rules: +/// +/// - When we enqueue the first job on an actor, then R becomes 1, and +/// we must create a processing job so that N >= R. We do not need to +/// retain the actor. +/// - When we create an extra job to process an actor (e.g. because of +/// priority overrides), N increases but R remains the same. We must +/// retain the actor. +/// - When we start running an actor, our job definitively becomes the +/// owning job, but neither N nor R changes. We do not need to retain +/// the actor. +/// - When we go to start running an actor and for whatever reason we +/// don't actually do so, we are eliminating an extra processing job, +/// and so N decreases but R remains the same. We must release the +/// actor. +/// - When we are running an actor and give it up, and there are no +/// remaining jobs on it, then R becomes 0 and N decreases by 1. +/// We do not need to release the actor. +/// - When we are running an actor and give it up, and there are jobs +/// remaining on it, then R remains 1 but N is decreasing by 1. +/// We must either release the actor or create a new processing job +/// for it to maintain the balance. +class DefaultActorImpl : public HeapObject { + enum class Status { + /// The actor is not currently scheduled. Completely redundant + /// with the job list being empty. + Idle, + + /// There is currently a job scheduled to process the actor at the + /// stored max priority. + Scheduled, + + /// There is currently a thread processing the actor at the stored + /// max priority. + Running + }; + + struct Flags : public FlagSet { + enum : size_t { + Status_offset = 0, + Status_width = 2, + + HasActiveInlineJob = 2, + + MaxPriority = 8, + MaxPriority_width = JobFlags::Priority_width, + + // FIXME: add a reference to the running thread ID so that we + // can boost priorities. + }; + + /// What is the current high-level status of this actor? + FLAGSET_DEFINE_FIELD_ACCESSORS(Status_offset, Status_width, Status, + getStatus, setStatus) + + /// Is there currently an active processing job allocated inline + /// in the actor? + FLAGSET_DEFINE_FLAG_ACCESSORS(HasActiveInlineJob, + hasActiveInlineJob, setHasActiveInlineJob) + + /// What is the maximum priority of jobs that are currently running + /// or enqueued on this actor? + /// + /// Note that the above isn't quite correct: we don't actually + /// lower this after we finish processing higher-priority tasks. + /// (Doing so introduces some subtleties around kicking off + /// lower-priority processing jobs.) + FLAGSET_DEFINE_FIELD_ACCESSORS(MaxPriority, MaxPriority_width, + JobPriority, + getMaxPriority, setMaxPriority) + }; + + /// This is designed to fit into two words, which can generally be + /// done lock-free on all our supported platforms. + struct alignas(2 * sizeof(void*)) State { + JobRef FirstJob; + struct Flags Flags; + }; + + swift::atomic CurrentState; + + friend class ProcessInlineJob; + union { + ProcessInlineJob JobStorage; + }; + +public: + + /// Properly construct an actor, except for the heap header. + void initialize() { + new (&CurrentState) std::atomic(State{JobRef(), Flags()}); + } + + /// Properly destruct an actor, except for the heap header. + void destroy() { + assert(CurrentState.load(std::memory_order_relaxed).Flags.getStatus() + == Status::Idle && "actor not idle during destruction?"); + } + + /// Add a job to this actor. + void enqueue(Job *job); + + /// Take over running this actor in the current thread, if possible. + bool tryAssumeThread(RunningJobInfo runner); + + /// Give up running this actor in the current thread. + void giveUpThread(RunningJobInfo runner); + + /// Claim the next job off the actor or give it up. + Job *claimNextJobOrGiveUp(bool actorIsOwned, RunningJobInfo runner); + +private: + /// Schedule an inline processing job. This can generally only be + /// done if we know nobody else is trying to do it at the same time, + /// e.g. if this thread just sucessfully transitioned the actor from + /// Idle to Scheduled. + void scheduleNonOverrideProcessJob(JobPriority priority, + bool hasActiveInlineJob); + + static DefaultActorImpl *fromInlineJob(Job *job) { + assert(isa(job)); +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Winvalid-offsetof" + return reinterpret_cast( + reinterpret_cast(job) - offsetof(DefaultActorImpl, JobStorage)); +#pragma clang diagnostic pop + } + + class OverrideJobCache { + ProcessOverrideJob *Job = nullptr; + bool IsNeeded = false; +#ifndef NDEBUG + bool WasCommitted = false; +#endif + public: + OverrideJobCache() = default; + OverrideJobCache(const OverrideJobCache &) = delete; + OverrideJobCache &operator=(const OverrideJobCache &) = delete; + ~OverrideJobCache() { + assert(WasCommitted && "didn't commit override job!"); + } + + void addToState(DefaultActorImpl *actor, State &newState); + void setNotNeeded() { IsNeeded = false; } + void commit(); + }; +}; + +} /// end anonymous namespace + +static_assert(sizeof(DefaultActorImpl) <= sizeof(DefaultActor) && + alignof(DefaultActorImpl) <= alignof(DefaultActor), + "DefaultActorImpl doesn't fit in DefaultActor"); + +static DefaultActorImpl *asImpl(DefaultActor *actor) { + return reinterpret_cast(actor); +} + +static DefaultActor *asAbstract(DefaultActorImpl *actor) { + return reinterpret_cast(actor); +} + +/*****************************************************************************/ +/************************** DEFAULT ACTOR TRACKING ***************************/ +/*****************************************************************************/ + +namespace { + +enum Mode { + /// Shadow any existing frame, leaving it untouched. + ShadowExistingFrame, + + /// Update any existing frame if possible. + UpdateExistingFrame +}; + +/// A little class for tracking whether there's a frame processing +/// default actors in the current thread. +/// +/// The goal of this class is to encapsulate uses of the central variable. +/// We want to potentially use a more efficient access pattern than +/// ordinary thread-locals when that's available. +class DefaultActorProcessingFrame { + using ValueType = llvm::PointerIntPair; + + /// The active default actor on the current thread, if any. + /// This may still need to be tracked separately from the active + /// executor, if/when we start tracking that in thread-local storage. + static SWIFT_RUNTIME_DECLARE_THREAD_LOCAL(ValueType, ThreadLocalValue); + + ValueType SavedValue; + bool IsNeeded; + +public: + /// Flag that this thread is processing the given actor (or null, + /// for generic processing) and set up a processing frame if we + /// don't already have one. + DefaultActorProcessingFrame(DefaultActorImpl *actor, Mode mode) { + // If we should shadow an existing frame, save any value that + // it might have set. + if (mode == ShadowExistingFrame) { + SavedValue = ThreadLocalValue.get(); + IsNeeded = true; + + // If we should update an existing frame, just replace any value + // that it might have set. + } else { + IsNeeded = !ThreadLocalValue.get().getInt(); + SavedValue = ValueType(); + } + + ThreadLocalValue.set(ValueType(actor, true)); + } + + DefaultActorProcessingFrame(const DefaultActorProcessingFrame &) = delete; + DefaultActorProcessingFrame &operator=( + const DefaultActorProcessingFrame &) = delete; + + /// Return the currently active actor. + DefaultActorImpl *getActiveActor() { + return ThreadLocalValue.get().getPointer(); + } + + /// Exit the frame. This isn't a destructor intentionally, because + /// we need to be able to tail-call out of frames that might have + /// optimistically made one of these. + void exit() { + ThreadLocalValue.set(SavedValue); + } + + /// Return whether this frame was needed; if it was not, then it's + /// okay to abandon it without calling exit(). This is only meaningful + /// when constructed in the UpdateExistingFrame mode. + bool isNeeded() { + return IsNeeded; + } +}; + +/// Define the thread-local. +SWIFT_RUNTIME_DECLARE_THREAD_LOCAL( + DefaultActorProcessingFrame::ValueType, + DefaultActorProcessingFrame::ThreadLocalValue); + +} /// end anonymous namespace + +/*****************************************************************************/ +/*********************** DEFAULT ACTOR IMPLEMENTATION ************************/ +/*****************************************************************************/ + +/// Given that a job is enqueued normally on a default actor, get/set +/// the next job in the actor's queue. +/// +/// Note that this must not be used on the override jobs that can appear +/// in the queue; those jobs are not actually in the actor's queue (they're +/// on the global execution queues). So the actor's actual queue flows +/// through the NextJob field on those objects rather than through +/// the SchedulerPrivate fields. +static JobRef getNextJobInQueue(Job *job) { + return *reinterpret_cast(job->SchedulerPrivate); +} +static void setNextJobInQueue(Job *job, JobRef next) { + *reinterpret_cast(job->SchedulerPrivate) = next; +} + +/// Schedule a processing job that doesn't have to be an override job. +/// +/// We can either do this with inline storage or heap-allocated. +/// To ues inline storage, we need to verify that the hasActiveInlineJob +/// flag is not set in the state and then successfully set it. The +/// argument reports that this has happened correctly. +/// +/// We should only schedule a non-override processing job at all if +/// we're transferring ownership of the jobs in it; see the ownership +/// comment on DefaultActorImpl. +void DefaultActorImpl::scheduleNonOverrideProcessJob(JobPriority priority, + bool hasActiveInlineJob) { + Job *job; + if (hasActiveInlineJob) { + job = new ProcessOutOfLineJob(this, priority); + } else { + job = new (&JobStorage) ProcessInlineJob(priority); + } + swift_task_enqueueGlobal(job); +} + + +namespace { + +/// A job to process a specific default actor at a higher priority than +/// it was previously running at. +/// +/// When an override job is successfully registered with an actor +/// (not enqueued there), the thread processing the actor and the +/// thread processing the override job coordinate by each calling +/// one of a set of methods on the object. +class ProcessOverrideJob : public Job { + DefaultActorImpl *Actor; + + ConditionVariable::Mutex Lock; + ConditionVariable Queue; + + /// Has the actor made a decision about this job yet? + bool IsResolvedByActor = false; + + /// Has the job made a decision about itself yet? + bool IsResolvedByJob = false; + + /// Has this job been abandoned? + bool IsAbandoned = false; + +public: + /// SchedulerPrivate in an override job is used for actually scheduling + /// the job, so the actor queue goes through this instead. + /// + /// We also use this temporarily for the list of override jobs on + /// the actor that we need to wake up. + JobRef NextJob; + +public: + ProcessOverrideJob(DefaultActorImpl *actor, JobPriority priority, + JobRef nextJob) + : Job({JobKind::DefaultActorOverride, priority}, &process), + Actor(actor), NextJob(nextJob) {} + + DefaultActorImpl *getActor() const { return Actor; } + + /// Called by the job to notify the actor that the job has chosen + /// to abandon its work. This is irrevocable: the job is not going + /// to have a thread behind it. + /// + /// This may delete the job or cause it to be deleted on another thread. + void setAbandoned() { + bool shouldDelete = false; + Lock.withLock([&] { + assert(!IsResolvedByJob && "job already resolved itself"); + IsResolvedByJob = true; + IsAbandoned = true; + shouldDelete = IsResolvedByJob && IsResolvedByActor; + }); + if (shouldDelete) delete this; + } + + /// Called by the job to notify the actor that the job has successfully + /// taken over the actor and is now running it. + /// + /// This may delete the job object or cause it to be deleted on + /// another thread. + void setRunning() { + bool shouldDelete = false; + Lock.withLock([&] { + assert(!IsResolvedByJob && "job already resolved itself"); + IsResolvedByJob = true; + shouldDelete = IsResolvedByJob && IsResolvedByActor; + }); + if (shouldDelete) delete this; + } + + /// Called by the job to wait for the actor to resolve what the job + /// should do. + bool waitForActivation() { + bool isActivated = false; + Lock.withLockOrWait(Queue, [&] { + assert(!IsResolvedByJob && "job already resolved itself"); + if (IsResolvedByActor) { + isActivated = !IsAbandoned; + IsResolvedByJob = true; + return true; + } + return false; + }); + delete this; + return isActivated; + } + + /// Called by the actor to notify this job that the actor thinks it + /// should try to take over the actor. It's okay if that doesn't + /// succeed (as long as that's because some other job is going to + /// take over). + /// + /// This may delete the job or cause it to be deleted on another + /// thread. + bool wakeAndActivate() { + bool shouldDelete = false; + bool mayHaveBeenActivated = false; + Lock.withLockThenNotifyAll(Queue, [&] { + assert(!IsResolvedByActor && "actor already resolved this sjob"); + IsResolvedByActor = true; + mayHaveBeenActivated = IsResolvedByJob && !IsAbandoned; + shouldDelete = IsResolvedByJob && IsResolvedByActor; + }); + if (shouldDelete) delete this; + return mayHaveBeenActivated; + } + + /// Called by the actor to notify this job that the actor does not + /// think it should try to take over the actor. It's okay if the + /// job successfully takes over the actor anyway. + /// + /// This may delete the job or cause it to be deleted on another + /// thread. + void wakeAndAbandon() { + bool shouldDelete = false; + Lock.withLockThenNotifyAll(Queue, [&] { + assert(!IsResolvedByActor && "actor already resolved this job"); + IsResolvedByActor = true; + IsAbandoned = true; + shouldDelete = IsResolvedByJob && IsResolvedByActor; + }); + if (shouldDelete) delete this; + } + + SWIFT_CC(swiftasync) + static void process(Job *job, ExecutorRef _executor); + + static bool classof(const Job *job) { + return job->Flags.getKind() == JobKind::DefaultActorOverride; + } +}; + +} /// end anonymous namespace + +JobRef JobRef::getOverride(ProcessOverrideJob *job) { + return JobRef(job, NeedsPreprocessing | IsOverride); +} +ProcessOverrideJob *JobRef::getAsPreprocessedOverride() const { + return cast_or_null(getAsPreprocessedJob()); +} +RunningJobInfo RunningJobInfo::forOverride(ProcessOverrideJob *job) { + return {Override, job->getPriority(), job}; +} + +/// Flag that the current processing job has been abandoned +/// and will not be running the actor. +void RunningJobInfo::setAbandoned() { + if (OverrideJob) { + OverrideJob->setAbandoned(); + OverrideJob = nullptr; + } +} + +/// Flag that the current processing job is now running the actor. +void RunningJobInfo::setRunning() { + if (OverrideJob) { + OverrideJob->setRunning(); + OverrideJob = nullptr; + } +} + +/// Try to wait for the current processing job to be activated, +/// if that's possible. It's okay to call this multiple times +/// (or to call setAbandoned/setRunning after it) as long as +/// it's all on a single value. +bool RunningJobInfo::waitForActivation() { + if (Kind == Override) { + // If we don't have an override job, it's because we've already + // waited for activation successfully. + if (!OverrideJob) return true; + + bool result = OverrideJob->waitForActivation(); + OverrideJob = nullptr; + return result; + } + return false; +} + +/// Wake all the overrides in the given list, activating the first +/// that exactly matches the target priority, if any. +static void wakeOverrides(ProcessOverrideJob *nextOverride, + Optional targetPriority) { + bool hasAlreadyActivated = false; + while (nextOverride) { + // We have to advance to the next override before we call one of + // the wake methods because they can delete the job immediately + // (and even if they don't, we'd still be racing with deletion). + auto cur = nextOverride; + nextOverride = cur->NextJob.getAsPreprocessedOverride(); + + if (hasAlreadyActivated || + !targetPriority || + cur->getPriority() != *targetPriority) + cur->wakeAndAbandon(); + else + hasAlreadyActivated = cur->wakeAndActivate(); + } +} + +/// Flag that an override job is needed and create it. +void DefaultActorImpl::OverrideJobCache::addToState(DefaultActorImpl *actor, + State &newState) { + IsNeeded = true; + auto newPriority = newState.Flags.getMaxPriority(); + auto nextJob = newState.FirstJob; + if (Job) { + Job->Flags.setPriority(newPriority); + Job->NextJob = nextJob; + } else { + // Override jobs are always "extra" from the perspective of our + // ownership rules and so require a retain of the actor. We must + // do this before changing the actor state because other jobs may + // race to release the actor as soon as we change the actor state. + swift_retain(actor); + Job = new ProcessOverrideJob(actor, newPriority, nextJob); + } + newState.FirstJob = JobRef::getOverride(Job); +} + +/// Schedule the override job if we created one and still need it. +/// If we created one but didn't end up needing it (which can happen +/// with a race to override), destroy it. +void DefaultActorImpl::OverrideJobCache::commit() { +#ifndef NDEBUG + assert(!WasCommitted && "committing override job multiple timee"); + WasCommitted = true; +#endif + + if (Job) { + if (IsNeeded) { + swift_task_enqueueGlobal(Job); + } else { + swift_release(Job->getActor()); + delete Job; + } + } +} + +/// Preprocess the prefix of the actor's queue that hasn't already +/// been preprocessed: +/// +/// - Split the jobs into registered overrides and actual jobs. +/// - Append the actual jobs to any already-preprocessed job list. +/// +/// The returned job should become the new root of the job queue +/// (or may be immediately dequeued, in which its successor should). +/// All of the jobs in this list are guaranteed to be non-override jobs. +static Job *preprocessQueue(JobRef first, + JobRef previousFirst, + Job *previousFirstNewJob, + ProcessOverrideJob *&overridesToWake) { + assert(previousFirst || previousFirstNewJob == nullptr); + + if (!first.needsPreprocessing()) + return first.getAsPreprocessedJob(); + + Job *firstNewJob = nullptr; + + while (first != previousFirst) { + // If we find something that doesn't need preprocessing, it must've + // been left by a previous queue-processing, which means that + // this must be our first attempt to preprocess in this processing. + // Just treat the queue from this point as a well-formed whole + // to which we need to add any new items we might've just found. + if (!first.needsPreprocessing()) { + assert(!previousFirst && !previousFirstNewJob); + previousFirstNewJob = first.getAsPreprocessedJob(); + break; + } + + // If the job is an override, add it to the list of override jobs + // that we need to wake up. Note that the list of override jobs + // flows through NextJob; we must not use getNextJobInQueue because + // that touches queue-private state, and the override job is + // not enqueued on the actor, merely registered with it. + if (first.isOverride()) { + auto overrideJob = first.getAsOverride(); + first = overrideJob->NextJob; + overrideJob->NextJob = JobRef::getPreprocessed(overridesToWake); + overridesToWake = overrideJob; + continue; + } + + // If the job isn't an override, add it to the front of the list of + // jobs we're building up. Note that this reverses the order of + // jobs; since enqueue() always adds jobs to the front, reversing + // the order effectively makes the actor queue FIFO, which is what + // we want. + // FIXME: but we should also sort by priority + auto job = first.getAsJob(); + first = getNextJobInQueue(job); + setNextJobInQueue(job, JobRef::getPreprocessed(firstNewJob)); + firstNewJob = job; + } + + // If there are jobs already in the queue, put the new jobs at the end. + if (!firstNewJob) { + firstNewJob = previousFirstNewJob; + } else if (previousFirstNewJob) { + auto cur = previousFirstNewJob; + while (true) { + auto next = getNextJobInQueue(cur).getAsPreprocessedJob(); + if (!next) { + setNextJobInQueue(cur, JobRef::getPreprocessed(firstNewJob)); + break; + } + cur = next; + } + firstNewJob = previousFirstNewJob; + } + + return firstNewJob; +} + +void DefaultActorImpl::giveUpThread(RunningJobInfo runner) { + auto oldState = CurrentState.load(std::memory_order_acquire); + assert(oldState.Flags.getStatus() == Status::Running); + + ProcessOverrideJob *overridesToWake = nullptr; + auto firstNewJob = preprocessQueue(oldState.FirstJob, JobRef(), nullptr, + overridesToWake); + + while (true) { + State newState = oldState; + newState.FirstJob = JobRef::getPreprocessed(firstNewJob); + if (firstNewJob) { + newState.Flags.setStatus(Status::Scheduled); + } else { + newState.Flags.setStatus(Status::Idle); + } + + // If the runner was an inline job, it's no longer active. + if (runner.wasInlineJob()) { + newState.Flags.setHasActiveInlineJob(false); + } + + bool hasMoreJobs = (bool) newState.FirstJob; + bool hasOverrideAtNewPriority = + (runner.Priority < oldState.Flags.getMaxPriority()); + bool hasActiveInlineJob = newState.Flags.hasActiveInlineJob(); + bool needsNewProcessJob = hasMoreJobs && !hasOverrideAtNewPriority; + + // If we want to create a new inline job below, be sure to claim that + // in the new state. + if (needsNewProcessJob && !hasActiveInlineJob) { + newState.Flags.setHasActiveInlineJob(true); + } + + auto firstPreprocessed = oldState.FirstJob; + if (!CurrentState.compare_exchange_weak(oldState, newState, + /*success*/ std::memory_order_release, + /*failure*/ std::memory_order_acquire)) { + // Preprocess any new queue items. + firstNewJob = preprocessQueue(oldState.FirstJob, + firstPreprocessed, + firstNewJob, + overridesToWake); + + // Try again. + continue; + } + + // The priority of the remaining work. + auto newPriority = newState.Flags.getMaxPriority(); + + // Wake any overrides. + wakeOverrides(overridesToWake, newPriority); + + // This is the actor's owning job; per the ownership rules (see + // the comment on DefaultActorImpl), if there are remaining + // jobs, we need to balance out our ownership one way or another. + // We also, of course, need to ensure that there's a thread that's + // actually going to process the actor. + if (hasMoreJobs) { + // If we know that there's an override job at the new priority, + // we can let it become the owning job. We just need to release. + if (hasOverrideAtNewPriority) { + swift_release(this); + + // Otherwies, enqueue a job that will try to take over running + // with the new priority. This also ensures that there's a job + // at that priority which will actually take over the actor. + } else { + scheduleNonOverrideProcessJob(newPriority, hasActiveInlineJob); + } + } + + return; + } +} + +/// Claim the next job on the actor or give it up forever. +/// +/// The running thread doesn't need to already own the actor to do this. +/// It does need to be participating correctly in the ownership +/// scheme as a "processing job"; see the comment on DefaultActorImpl. +Job *DefaultActorImpl::claimNextJobOrGiveUp(bool actorIsOwned, + RunningJobInfo runner) { + auto oldState = CurrentState.load(std::memory_order_acquire); + + // The status had better be Running unless we're trying to acquire + // our first job. + assert(oldState.Flags.getStatus() == Status::Running || + !actorIsOwned); + + // If we don't yet own the actor, we need to try to claim the actor + // first; we cannot safely access the queue memory yet because other + // threads may concurrently be trying to do this. + if (!actorIsOwned) { + while (true) { + // A helper function when the only change we need to try is to + // update for an inline runner. + auto tryUpdateForInlineRunner = [&]{ + if (!runner.wasInlineJob()) return true; + + auto newState = oldState; + newState.Flags.setHasActiveInlineJob(false); + return CurrentState.compare_exchange_weak(oldState, newState, + /*success*/ std::memory_order_relaxed, + /*failure*/ std::memory_order_acquire); + }; + + // If the actor is out of work, or its priority doesn't match our + // priority, don't try to take over the actor. + if (!oldState.FirstJob || + oldState.Flags.getMaxPriority() != runner.Priority) { + + // The only change we need here is inline-runner bookkeeping. + if (!tryUpdateForInlineRunner()) + continue; + + // We're eliminating a processing thread; balance ownership. + swift_release(this); + runner.setAbandoned(); + return nullptr; + } + + // If the actor is currently running, we'd need to wait for + // it to stop. We can do this if we're an override job; + // otherwise we need to exit. + if (oldState.Flags.getStatus() == Status::Running) { + if (!runner.waitForActivation()) { + // The only change we need here is inline-runner bookkeeping. + if (!tryUpdateForInlineRunner()) + continue; + + swift_release(this); + return nullptr; + } + + // Fall through into the compare-exchange below, but anticipate + // that the actor is now Scheduled instead of Running. + oldState.Flags.setStatus(Status::Scheduled); + } + + // Try to set the state as Running. + assert(oldState.Flags.getStatus() == Status::Scheduled); + auto newState = oldState; + newState.Flags.setStatus(Status::Running); + + // Also do our inline-runner bookkeeping. + if (runner.wasInlineJob()) + newState.Flags.setHasActiveInlineJob(false); + + if (!CurrentState.compare_exchange_weak(oldState, newState, + /*success*/ std::memory_order_relaxed, + /*failure*/ std::memory_order_acquire)) + continue; + + // If that succeeded, we can proceed to the main body. + oldState = newState; + runner.setRunning(); + break; + } + } + + assert(oldState.Flags.getStatus() == Status::Running); + + // We should have taken care of the inline-job bookkeeping now. + assert(!oldState.Flags.hasActiveInlineJob() || !runner.wasInlineJob()); + + // Okay, now it's safe to look at queue state. + // Preprocess any queue items at the front of the queue. + ProcessOverrideJob *overridesToWake = nullptr; + auto newFirstJob = preprocessQueue(oldState.FirstJob, JobRef(), + nullptr, overridesToWake); + + Optional remainingJobPriority; + while (true) { + State newState = oldState; + + // If the priority we're currently running with is adqeuate for + // all the remaining jobs, try to dequeue something. + // FIXME: should this be an exact match in priority instead of + // potentially running jobs with too high a priority? + Job *jobToRun; + if (oldState.Flags.getMaxPriority() <= runner.Priority && + newFirstJob) { + jobToRun = newFirstJob; + newState.FirstJob = getNextJobInQueue(newFirstJob); + newState.Flags.setStatus(Status::Running); + + // Otherwise, we should give up the thread. + } else { + jobToRun = nullptr; + newState.FirstJob = JobRef::getPreprocessed(newFirstJob); + newState.Flags.setStatus(newFirstJob ? Status::Scheduled + : Status::Idle); + } + + // Try to update the queue. The changes we've made to the queue + // structure need to be made visible even if we aren't dequeuing + // anything. + auto firstPreprocessed = oldState.FirstJob; + if (!CurrentState.compare_exchange_weak(oldState, newState, + /*success*/ std::memory_order_release, + /*failure*/ std::memory_order_acquire)) { + // Preprocess any new queue items, which will have been formed + // into a linked list leading to the last head we observed. + // (The fact that that job may not be the head anymore doesn't + // matter; we're looking for an exact match with that.) + newFirstJob = preprocessQueue(oldState.FirstJob, + firstPreprocessed, + newFirstJob, + overridesToWake); + + // Loop to retry updating the state. + continue; + } + + // We successfully updated the state. + + // If we're giving up the thread with jobs remaining, we need + // to release the actor, and we should wake overrides with the + // right priority. + Optional remainingJobPriority; + if (!jobToRun && newFirstJob) { + remainingJobPriority = newState.Flags.getMaxPriority(); + } + + // Wake the overrides. + wakeOverrides(overridesToWake, remainingJobPriority); + + // Per the ownership rules (see the comment on DefaultActorImpl), + // release the actor if we're giving up the thread with jobs + // remaining. We intentionally do this after wakeOverrides to + // try to get the overrides running a little faster. + if (remainingJobPriority) + swift_release(this); + + return jobToRun; + } +} + +/// The primary function for processing an actor on a thread. Start +/// processing the given default actor as the active default actor on +/// the current thread, and keep processing whatever actor we're +/// running when code returns back to us until we're not processing +/// any actors anymore. +static void processDefaultActor(DefaultActorImpl *currentActor, + RunningJobInfo runner) { + // Register that we're processing a default actor in this frame. + DefaultActorProcessingFrame frame(currentActor, ShadowExistingFrame); + + bool threadIsRunningActor = false; + while (true) { + assert(currentActor); + + // Immediately check if we've been asked to yield the thread. + if (shouldYieldThread()) + break; + + // Claim another job from the current actor. + auto job = currentActor->claimNextJobOrGiveUp(threadIsRunningActor, + runner); + + // If we failed to claim a job, we have nothing to do. + if (!job) { + // We also gave up the actor as part of failing to claim it. + // Make sure we don't try to give up the actor again. + currentActor = nullptr; + break; + } + + // Run the job. + job->run(ExecutorRef::forDefaultActor(asAbstract(currentActor))); + + // The current actor may have changed after the job. + // If it's become nil, we have nothing to do. + currentActor = frame.getActiveActor(); + if (!currentActor) + break; + + // Otherwise, we know that we're running the actor on this thread. + threadIsRunningActor = true; + } + + frame.exit(); + + // If we still have an active actor, we should give it up. + if (currentActor) + currentActor->giveUpThread(runner); +} + +void ProcessInlineJob::process(Job *job, ExecutorRef _executor) { + DefaultActorImpl *actor = DefaultActorImpl::fromInlineJob(job); + + // Pull the priority out of the job before we do anything that might + // invalidate it. + auto targetPriority = job->getPriority(); + auto runner = RunningJobInfo::forInline(targetPriority); + + // FIXME: force tail call + return processDefaultActor(actor, runner); +} + +void ProcessOutOfLineJob::process(Job *job, ExecutorRef _executor) { + auto self = cast(job); + DefaultActorImpl *actor = self->Actor; + + // Pull the priority out of the job before we do anything that might + // invalidate it. + auto targetPriority = job->getPriority(); + auto runner = RunningJobInfo::forOther(targetPriority); + + delete self; + + // FIXME: force tail call + return processDefaultActor(actor, runner); +} + +void ProcessOverrideJob::process(Job *job, ExecutorRef _executor) { + auto self = cast(job); + + // Pull the actor and priority out of the job. + auto actor = self->Actor; + auto runner = RunningJobInfo::forOverride(self); + + // FIXME: force tail call + return processDefaultActor(actor, runner); +} + +void DefaultActorImpl::enqueue(Job *job) { + auto oldState = CurrentState.load(std::memory_order_relaxed); + + OverrideJobCache overrideJob; + + while (true) { + auto newState = oldState; + + // Put the job at the front of the job list (which will get + // reversed during preprocessing). + setNextJobInQueue(job, oldState.FirstJob); + newState.FirstJob = JobRef::getUnpreprocessed(job); + + auto oldStatus = oldState.Flags.getStatus(); + bool wasIdle = oldStatus == Status::Idle; + + // Update the priority: the prriority of the job we're adding + // if the actor was idle, or the max if not. Only the running + // thread can decrease the actor's priority once it's non-idle. + // (But note that the job we enqueue can still observe a + // lowered priority.) + auto oldPriority = oldState.Flags.getMaxPriority(); + auto newPriority = + wasIdle ? job->getPriority() + : std::max(oldPriority, job->getPriority()); + newState.Flags.setMaxPriority(newPriority); + + // If we need an override job, create it (if necessary) and + // register it with the queue. + bool needsOverride = !wasIdle && newPriority != oldPriority; + if (needsOverride) { + overrideJob.addToState(this, newState); + } else { + overrideJob.setNotNeeded(); + } + + // If we don't need an override job, then we might be able to + // create an inline job; flag that. + bool hasActiveInlineJob = newState.Flags.hasActiveInlineJob(); + if (wasIdle && !hasActiveInlineJob) + newState.Flags.setHasActiveInlineJob(true); + + // Make sure the status is at least Scheduled. We'll actually + // schedule the job below, if we succeed at this. + if (wasIdle) { + newState.Flags.setStatus(Status::Scheduled); + } + + // Try the compare-exchange, and try again if it fails. + if (!CurrentState.compare_exchange_weak(oldState, newState, + /*success*/ std::memory_order_release, + /*failure*/ std::memory_order_relaxed)) + continue; + + // Okay, we successfully updated the status. Schedule a job to + // process the actor if necessary. + + // Commit the override job if we created one. + overrideJob.commit(); + + // If the actor is currently idle, schedule it using the + // invasive job. + if (wasIdle) { + assert(!needsOverride); + scheduleNonOverrideProcessJob(newPriority, hasActiveInlineJob); + } + + return; + } +} + +bool DefaultActorImpl::tryAssumeThread(RunningJobInfo runner) { + // We have to load-acquire in order to properly order accesses to + // the actor's state for the new task. + auto oldState = CurrentState.load(std::memory_order_acquire); + + // If the actor is currently idle, try to mark it as running. + while (oldState.Flags.getStatus() == Status::Idle) { + assert(!oldState.FirstJob); + auto newState = oldState; + newState.Flags.setStatus(Status::Running); + newState.Flags.setMaxPriority(runner.Priority); + + if (CurrentState.compare_exchange_weak(oldState, newState, + /*success*/ std::memory_order_relaxed, + /*failure*/ std::memory_order_acquire)) + return true; + } + + return false; +} + +void swift::swift_defaultActor_initialize(DefaultActor *_actor) { + asImpl(_actor)->initialize(); +} + +void swift::swift_defaultActor_destroy(DefaultActor *_actor) { + asImpl(_actor)->destroy(); +} + +void swift::swift_defaultActor_enqueue(Job *job, DefaultActor *_actor) { + asImpl(_actor)->enqueue(job); +} + +/*****************************************************************************/ +/****************************** ACTOR SWITCHING ******************************/ +/*****************************************************************************/ + +/// Can the current executor give up its thread? +static bool canGiveUpThreadForSwitch(ExecutorRef currentExecutor) { + // We can certainly "give up" a generic executor to try to run + // a task for an actor. + if (currentExecutor.isGeneric()) + return true; + + // If the current executor is a default actor, we know how to make + // it give up its thread. + if (currentExecutor.isDefaultActor()) + return true; + + return false; +} + +/// Tell the current executor to give up its thread, given that it +/// returned true from canGiveUpThreadForSwitch(). +/// +/// Note that we don't update DefaultActorProcessingFrame here; we'll +/// do that in runOnAssumedThread. +static void giveUpThreadForSwitch(ExecutorRef currentExecutor, + RunningJobInfo runner) { + if (currentExecutor.isGeneric()) + return; + + asImpl(currentExecutor.getDefaultActor())->giveUpThread(runner); +} + +/// Try to assume control of the current thread for the given executor +/// in order to run the given job. +/// +/// This doesn't actually run the job yet. +/// +/// Note that we don't update DefaultActorProcessingFrame here; we'll +/// do that in runOnAssumedThread. +static bool tryAssumeThreadForSwitch(ExecutorRef newExecutor, + RunningJobInfo runner) { + // If the new executor is generic, we don't need to do anything. + if (newExecutor.isGeneric()) { + return true; + } + + // If the new executor is a default actor, ask it to assume the thread. + if (newExecutor.isDefaultActor()) { + return asImpl(newExecutor.getDefaultActor())->tryAssumeThread(runner); + } + + return false; +} + +/// Given that we've assumed control of an executor on this thread, +/// run the given task on it. +SWIFT_CC(swiftasync) +static void runOnAssumedThread(AsyncTask *task, ExecutorRef newExecutor, + RunningJobInfo runner) { + assert(newExecutor.isGeneric() || newExecutor.isDefaultActor()); + + DefaultActorImpl *actor = newExecutor.isGeneric() + ? nullptr + : asImpl(newExecutor.getDefaultActor()); + + // Set that this actor is now the active default actor on this thread, + // and set up an actor-processing frame if there wasn't one already. + DefaultActorProcessingFrame frame(actor, UpdateExistingFrame); + + // If one already existed, we should just tail-call the task; we don't + // want these frames to potentially accumulate linearly. + if (!frame.isNeeded()) { + // FIXME: force tail call + return task->run(newExecutor); + } + + // Otherwise, run the new task. + task->run(newExecutor); + + // Leave the processing frame, and give up the current actor if + // we have one. + // + // In principle, we could execute more tasks here, but that's probably + // not a reasonable thing to do in an assumed context rather than a + // dedicated actor-processing job. + actor = frame.getActiveActor(); + frame.exit(); + + if (actor) + actor->giveUpThread(runner); +} + +void swift::swift_task_switch(AsyncTask *task, ExecutorRef currentExecutor, + ExecutorRef newExecutor) { + assert(task && "no task provided"); + + // If the current executor is compatible with running the new executor, + // just continue running. + if (!currentExecutor.mustSwitchToRun(newExecutor)) { + // FIXME: force tail call + return task->run(currentExecutor); + } + + // Okay, we semantically need to switch. + auto runner = RunningJobInfo::forOther(task->getPriority()); + + // If the current executor can give up its thread, and the new executor + // can take over a thread, try to do so; but don't do this if we've + // been asked to yield the thread. + if (canGiveUpThreadForSwitch(currentExecutor) && + !shouldYieldThread() && + tryAssumeThreadForSwitch(newExecutor, runner)) { + giveUpThreadForSwitch(currentExecutor, runner); + // FIXME: force tail call + return runOnAssumedThread(task, newExecutor, runner); + } + + // Otherwise, just asynchronously enqueue the task on the given + // executor. + swift_task_enqueue(task, newExecutor); +} + +/*****************************************************************************/ +/************************* GENERIC ACTOR INTERFACES **************************/ +/*****************************************************************************/ + +void swift::swift_task_enqueue(Job *job, ExecutorRef executor) { + assert(job && "no job provided"); + + if (executor.isGeneric()) + return swift_task_enqueueGlobal(job); + + if (executor.isDefaultActor()) + return asImpl(executor.getDefaultActor())->enqueue(job); + + // FIXME: call the general method. + job->run(executor); +} + +SWIFT_CC(swift) +void (*swift::swift_task_enqueueGlobal_hook)(Job *job) = nullptr; + +void swift::swift_task_enqueueGlobal(Job *job) { + assert(job && "no job provided"); + + // If the hook is defined, use it. + if (swift_task_enqueueGlobal_hook) + return swift_task_enqueueGlobal_hook(job); + + // FIXME: implement this properly + job->run(ExecutorRef::generic()); +} + diff --git a/stdlib/public/Concurrency/Actor.swift b/stdlib/public/Concurrency/Actor.swift index 212651ecfb6b3..1b5c273e6d743 100644 --- a/stdlib/public/Concurrency/Actor.swift +++ b/stdlib/public/Concurrency/Actor.swift @@ -24,32 +24,17 @@ public protocol Actor: AnyObject { func enqueue(partialTask: PartialAsyncTask) } -/// A native actor queue, which schedules partial tasks onto a serial queue. -public struct _NativeActorQueue { - // TODO: This is just a stub for now -} - -/// The default type to be used for an actor's queue when an actor does not -/// provide its own implementation of `enqueue(partialTask:)`. -public typealias _DefaultActorQueue = _NativeActorQueue +/// Called to initialize the default actor instance in an actor. +/// The implementation will call this within the actor's initializer. +@_silgen_name("swift_defaultActor_initialize") +public func _defaultActorInitialize(_ actor: AnyObject) -/// Called to create a new default actor queue instance for a class of the given -/// type. The implementation will call this within the actor's initializer to -/// initialize the actor queue. -public func _defaultActorQueueCreate( - _ actorClass: AnyObject.Type -) -> _DefaultActorQueue { - _DefaultActorQueue() -} +/// Called to destroy the default actor instance in an actor. +/// The implementation will call this within the actor's deinit. +@_silgen_name("swift_defaultActor_destroy") +public func _defaultActorDestroy(_ actor: AnyObject) /// Called by the synthesized implementation of enqueue(partialTask:). -/// -/// The implementation is provided with the address of the synthesized instance -/// property for the actor queue, so that it need not be at a fixed offset. -public func _defaultActorQueueEnqueuePartialTask( - actor: AnyObject, - queue: inout _DefaultActorQueue, - partialTask: PartialAsyncTask -) { - // TODO: Implement queueing. -} +@_silgen_name("swift_defaultActor_enqueue") +public func _defaultActorEnqueue(partialTask: PartialAsyncTask, + actor: AnyObject) diff --git a/stdlib/public/Concurrency/CMakeLists.txt b/stdlib/public/Concurrency/CMakeLists.txt index a2efc30b3dc51..071787bf72d69 100644 --- a/stdlib/public/Concurrency/CMakeLists.txt +++ b/stdlib/public/Concurrency/CMakeLists.txt @@ -10,7 +10,14 @@ # #===----------------------------------------------------------------------===# +set(swift_concurrency_objc_sources + SwiftNativeNSObject.mm) + +set(LLVM_OPTIONAL_SOURCES + ${swift_concurrency_objc_sources}) + add_swift_target_library(swift_Concurrency ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} IS_STDLIB + Actor.cpp Actor.swift PartialAsyncTask.swift Task.cpp @@ -21,6 +28,7 @@ add_swift_target_library(swift_Concurrency ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} I TaskStatus.cpp TaskGroup.swift Mutex.cpp + ${swift_concurrency_objc_sources} SWIFT_MODULE_DEPENDS_OSX Darwin SWIFT_MODULE_DEPENDS_IOS Darwin diff --git a/stdlib/public/Concurrency/PartialAsyncTask.swift b/stdlib/public/Concurrency/PartialAsyncTask.swift index 618ec38ef9463..80b6a01a37ca0 100644 --- a/stdlib/public/Concurrency/PartialAsyncTask.swift +++ b/stdlib/public/Concurrency/PartialAsyncTask.swift @@ -14,25 +14,39 @@ import Swift @_implementationOnly import _SwiftConcurrencyShims /// A partial task is a unit of scheduleable work. +@frozen public struct PartialAsyncTask { - private var context: UnsafeMutablePointer<_SwiftContext> + private var context: Builtin.Job public func run() { } } @frozen public struct UnsafeContinuation { - private var context: UnsafeRawPointer + @usableFromInline internal var context: Builtin.RawUnsafeContinuation + + @_alwaysEmitIntoClient + internal init(_ context: Builtin.RawUnsafeContinuation) { + self.context = context + } - public func resume(_: __owned T) { } + @_silgen_name("swift_continuation_resume") + public func resume(returning value: __owned T) } @frozen public struct UnsafeThrowingContinuation { - private var context: UnsafeRawPointer + @usableFromInline internal var context: Builtin.RawUnsafeContinuation + + @_alwaysEmitIntoClient + internal init(_ context: Builtin.RawUnsafeContinuation) { + self.context = context + } - public func resume(_: __owned T) { } - public func fail(_: __owned Error) { } + @_silgen_name("swift_continuation_throwingResume") + public func resume(returning: __owned T) + @_silgen_name("swift_continuation_throwingResumeWithError") + public func resume(throwing: __owned Error) } #if _runtime(_ObjC) @@ -40,30 +54,46 @@ public struct UnsafeThrowingContinuation { // Intrinsics used by SILGen to resume or fail continuations // for @_alwaysEmitIntoClient -@usableFromInline internal func _resumeUnsafeContinuation( _ continuation: UnsafeContinuation, _ value: __owned T ) { - continuation.resume(value) + continuation.resume(returning: value) } @_alwaysEmitIntoClient -@usableFromInline internal func _resumeUnsafeThrowingContinuation( _ continuation: UnsafeThrowingContinuation, _ value: __owned T ) { - continuation.resume(value) + continuation.resume(returning: value) } @_alwaysEmitIntoClient -@usableFromInline internal func _resumeUnsafeThrowingContinuationWithError( _ continuation: UnsafeThrowingContinuation, _ error: __owned Error ) { - continuation.fail(error) + continuation.resume(throwing: error) } #endif + +// Wrappers around unsafe continuation builtins +@_alwaysEmitIntoClient +public func withUnsafeContinuation( + _ fn: (UnsafeContinuation) -> Void +) async -> T { + return await Builtin.withUnsafeContinuation { + fn(UnsafeContinuation($0)) + } +} + +@_alwaysEmitIntoClient +public func withUnsafeThrowingContinuation( + _ fn: (UnsafeThrowingContinuation) -> Void +) async throws -> T { + return await try Builtin.withUnsafeThrowingContinuation { + fn(UnsafeThrowingContinuation($0)) + } +} diff --git a/stdlib/public/Concurrency/SwiftNativeNSObject.mm b/stdlib/public/Concurrency/SwiftNativeNSObject.mm new file mode 100644 index 0000000000000..2742f7e223068 --- /dev/null +++ b/stdlib/public/Concurrency/SwiftNativeNSObject.mm @@ -0,0 +1,70 @@ +//===--- SwiftNativeNSObject.mm - NSObject-inheriting native class --------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Define the SwiftNativeNSObject class, which inherits from +// NSObject but uses Swift reference-counting. +// +//===----------------------------------------------------------------------===// + +#include "swift/Runtime/Config.h" + +#if SWIFT_OBJC_INTEROP +#import +#import +#include +#include +#include +#include "swift/Runtime/HeapObject.h" +#include "swift/Runtime/Metadata.h" +#include "swift/Runtime/ObjCBridge.h" + +using namespace swift; + +SWIFT_RUNTIME_STDLIB_API +@interface SwiftNativeNSObject : NSObject +{ +@private + SWIFT_HEAPOBJECT_NON_OBJC_MEMBERS; +} +@end + +@implementation SwiftNativeNSObject + ++ (id)allocWithZone: (NSZone *)zone { + // Allocate the object with swift_allocObject(). + // Note that this doesn't work if called on SwiftNativeNSObject itself, + // which is not a Swift class. + auto cls = cast(reinterpret_cast(self)); + assert(cls->isTypeMetadata()); + auto result = swift_allocObject(cls, cls->getInstanceSize(), + cls->getInstanceAlignMask()); + return reinterpret_cast(result); +} + +- (id)initWithCoder: (NSCoder *)coder { + return [super init]; +} + ++ (BOOL)automaticallyNotifiesObserversForKey:(NSString *)key { + return NO; +} + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wobjc-missing-super-calls" + +STANDARD_OBJC_METHOD_IMPLS_FOR_SWIFT_OBJECTS + +#pragma clang diagnostic pop + +@end + +#endif diff --git a/stdlib/public/Concurrency/Task.cpp b/stdlib/public/Concurrency/Task.cpp index 6570763153b23..4593bbce17045 100644 --- a/stdlib/public/Concurrency/Task.cpp +++ b/stdlib/public/Concurrency/Task.cpp @@ -20,6 +20,11 @@ #include "swift/Runtime/HeapObject.h" #include "TaskPrivate.h" +#if defined(__APPLE__) +// TODO: We shouldn't need this +#include +#endif + using namespace swift; using FutureFragment = AsyncTask::FutureFragment; @@ -303,11 +308,13 @@ AsyncTaskAndContext swift::swift_task_create_future_f( // be is the final hop. Store a signed null instead. initialContext->Parent = nullptr; initialContext->ResumeParent = &completeTask; - initialContext->ResumeParentExecutor = ExecutorRef::noPreference(); + initialContext->ResumeParentExecutor = ExecutorRef::generic(); initialContext->Flags = AsyncContextKind::Ordinary; initialContext->Flags.setShouldNotDeallocateInCallee(true); // Initialize the task-local allocator. + // TODO: consider providing an initial pre-allocated first slab to the + // allocator. _swift_task_alloc_initialize(task); return {task, initialContext}; @@ -349,9 +356,76 @@ void swift::swift_task_future_wait( // TODO: Remove this hack. void swift::swift_task_run(AsyncTask *taskToRun) { - taskToRun->run(ExecutorRef::noPreference()); + taskToRun->run(ExecutorRef::generic()); +} + +size_t swift::swift_task_getJobFlags(AsyncTask *task) { + return task->Flags.getOpaqueValue(); +} + +namespace { + +/// Structure that gets filled in when a task is suspended by `withUnsafeContinuation`. +struct AsyncContinuationContext { + // These fields are unnecessary for resuming a continuation. + void *Unused1; + void *Unused2; + // Storage slot for the error result, if any. + SwiftError *ErrorResult; + // Pointer to where to store a normal result. + OpaqueValue *NormalResult; + + // Executor on which to resume execution. + ExecutorRef ResumeExecutor; +}; + +static void resumeTaskAfterContinuation(AsyncTask *task, + AsyncContinuationContext *context) { +#if __APPLE__ + // TODO: Enqueue the task on the specific executor in the continuation + // context. + // + // For now, just enqueue the task resumption on the global concurrent queue + // so that we're able to return back to the caller of resume. + + dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), + ^{ + task->run(context->ResumeExecutor); + }); +#else + swift_unreachable("not implemented"); +#endif +} + + +} + +SWIFT_CC(swift) +void swift::swift_continuation_resume(/* +1 */ OpaqueValue *result, + void *continuation, + const Metadata *resumeType) { + auto task = reinterpret_cast(continuation); + auto context = reinterpret_cast(task->ResumeContext); + resumeType->vw_initializeWithTake(context->NormalResult, result); + + resumeTaskAfterContinuation(task, context); } -JobFlags swift::swift_task_getJobFlags(AsyncTask *task) { - return task->Flags; +SWIFT_CC(swift) +void swift::swift_continuation_throwingResume(/* +1 */ OpaqueValue *result, + void *continuation, + const Metadata *resumeType) { + return swift_continuation_resume(result, continuation, resumeType); +} + + +SWIFT_CC(swift) +void swift::swift_continuation_throwingResumeWithError(/* +1 */ SwiftError *error, + void *continuation, + const Metadata *resumeType) { + auto task = reinterpret_cast(continuation); + auto context = reinterpret_cast(task->ResumeContext); + context->ErrorResult = error; + + resumeTaskAfterContinuation(task, context); } diff --git a/stdlib/public/Concurrency/Task.swift b/stdlib/public/Concurrency/Task.swift index 124cc96035ab6..2065ee917f82d 100644 --- a/stdlib/public/Concurrency/Task.swift +++ b/stdlib/public/Concurrency/Task.swift @@ -123,16 +123,7 @@ extension Task { /// and throwing a specific error or using `checkCancellation` the error /// thrown out of the task will be re-thrown here. public func get() async throws -> Success { - let rawResult = await taskFutureWait(on: task) - if rawResult.hadErrorResult { - // Throw the result on error. - throw unsafeBitCast(rawResult.storage, to: Error.self) - } - - // Take the value on success - let storagePtr = - rawResult.storage.bindMemory(to: Success.self, capacity: 1) - return UnsafeMutablePointer(mutating: storagePtr).pointee + return await try _taskFutureGetThrowing(task) } /// Attempt to cancel the task. @@ -319,6 +310,11 @@ extension Task { return Handle(task: task) } + +} + +public func _runAsyncHandler(operation: @escaping () async -> ()) { + _ = Task.runDetached(operation: operation) } // ==== Voluntary Suspension ----------------------------------------------------- @@ -349,43 +345,6 @@ extension Task { // ==== UnsafeContinuation ----------------------------------------------------- extension Task { - public struct UnsafeContinuation { - /// Return a value into the continuation and make the task schedulable. - /// - /// The task will never run synchronously, even if the task does not - /// need to be resumed on a specific executor. - /// - /// This is appropriate when the caller is something "busy", like an event - /// loop, and doesn't want to be potentially delayed by arbitrary work. - public func resume(returning: T) { - fatalError("\(#function) not implemented yet.") - } - } - - public struct UnsafeThrowingContinuation { - /// Return a value into the continuation and make the task schedulable. - /// - /// The task will never run synchronously, even if the task does not - /// need to be resumed on a specific executor. - /// - /// This is appropriate when the caller is something "busy", like an event - /// loop, and doesn't want to be potentially delayed by arbitrary work. - public func resume(returning: T) { - fatalError("\(#function) not implemented yet.") - } - - /// Resume the continuation with an error and make the task schedulable. - /// - /// The task will never run synchronously, even if the task does not - /// need to be resumed on a specific executor. - /// - /// This is appropriate when the caller is something "busy", like an event - /// loop, and doesn't want to be potentially delayed by arbitrary work. - public func resume(throwing: E) { - fatalError("\(#function) not implemented yet.") - } - } - /// The operation functions must resume the continuation *exactly once*. /// /// The continuation will not begin executing until the operation function returns. @@ -407,7 +366,7 @@ extension Task { /// This function returns instantly and will never suspend. /* @instantaneous */ public static func withUnsafeThrowingContinuation( - operation: (UnsafeThrowingContinuation) -> Void + operation: (UnsafeThrowingContinuation) -> Void ) async throws -> T { fatalError("\(#function) not implemented yet.") } @@ -424,12 +383,66 @@ public func runAsync(_ asyncFun: @escaping () async -> ()) { runTask(childTask.0) } -struct RawTaskFutureWaitResult { - let hadErrorResult: Bool - let storage: UnsafeRawPointer -} - @_silgen_name("swift_task_future_wait") -func taskFutureWait( +func _taskFutureWait( on task: Builtin.NativeObject -) async -> RawTaskFutureWaitResult +) async -> (hadErrorResult: Bool, storage: UnsafeRawPointer) + +public func _taskFutureGet(_ task: Builtin.NativeObject) async -> T { + let rawResult = await _taskFutureWait(on: task) + assert(!rawResult.hadErrorResult) + + // Take the value. + let storagePtr = + rawResult.storage.bindMemory(to: T.self, capacity: 1) + return UnsafeMutablePointer(mutating: storagePtr).pointee +} + +public func _taskFutureGetThrowing( + _ task: Builtin.NativeObject +) async throws -> T { + let rawResult = await _taskFutureWait(on: task) + if rawResult.hadErrorResult { + // Throw the result on error. + throw unsafeBitCast(rawResult.storage, to: Error.self) + } + + // Take the value on success + let storagePtr = + rawResult.storage.bindMemory(to: T.self, capacity: 1) + return UnsafeMutablePointer(mutating: storagePtr).pointee +} + +public func _runChildTask(operation: @escaping () async throws -> T) async + -> Builtin.NativeObject +{ + let currentTask = Builtin.getCurrentAsyncTask() + + // Set up the job flags for a new task. + var flags = Task.JobFlags() + flags.kind = .task + flags.priority = getJobFlags(currentTask).priority + flags.isFuture = true + flags.isChildTask = true + + // Create the asynchronous task future. + let (task, _) = Builtin.createAsyncTaskFuture( + flags.bits, currentTask, operation) + return task +} + +#if _runtime(_ObjC) + +/// Intrinsic used by SILGen to launch a task for bridging a Swift async method +/// which was called through its ObjC-exported completion-handler-based API. +@_alwaysEmitIntoClient +@usableFromInline +internal func _runTaskForBridgedAsyncMethod(_ body: @escaping () async -> Void) { + // TODO: As a start, we should invoke Task.runDetached here, but we + // can probably do better if we're already running on behalf of a task, + // if the receiver of the method invocation is itself an Actor, or in other + // situations. + fatalError("not implemented") +} + +#endif diff --git a/stdlib/public/Concurrency/TaskAlloc.cpp b/stdlib/public/Concurrency/TaskAlloc.cpp index 37858b9312e80..4afe29167fed0 100644 --- a/stdlib/public/Concurrency/TaskAlloc.cpp +++ b/stdlib/public/Concurrency/TaskAlloc.cpp @@ -19,40 +19,27 @@ #include "TaskPrivate.h" #include "swift/Runtime/Concurrency.h" -#include "swift/Runtime/Debug.h" +#include "../runtime/StackAllocator.h" #include -#include using namespace swift; namespace { -class TaskAllocator { - // Just keep track of all allocations in a vector so that we can - // verify stack discipline. We should make sure the allocator - // implementation strictly verifies allocation order at least - // until we've stabilized the compiler implementation. - std::vector Allocations; +/// The size of an allocator slab. +/// +/// TODO: find the optimal value by experiment. +static constexpr size_t SlabCapacity = 1024; -public: - void *alloc(size_t size) { - void *ptr = malloc(size); - Allocations.push_back(ptr); - return ptr; - } +using TaskAllocator = StackAllocator; - void dealloc(void *ptr) { - if (Allocations.empty() || Allocations.back() != ptr) - fatalError(0, "pointer was not the last allocation on this task"); +struct GlobalAllocator { + TaskAllocator allocator; + void *spaceForFirstSlab[64]; - Allocations.pop_back(); - free(ptr); - } + GlobalAllocator() : allocator(spaceForFirstSlab, sizeof(spaceForFirstSlab)) {} }; -static_assert(sizeof(TaskAllocator) <= sizeof(AsyncTask::AllocatorPrivate), - "task allocator must fit in allocator-private slot"); - static_assert(alignof(TaskAllocator) <= alignof(decltype(AsyncTask::AllocatorPrivate)), "task allocator must not be more aligned than " "allocator-private slot"); @@ -70,8 +57,8 @@ static TaskAllocator &allocator(AsyncTask *task) { // FIXME: this fall-back shouldn't be necessary, but it's useful // for now, since the current execution tests aren't setting up a task // properly. - static TaskAllocator global; - return global; + static GlobalAllocator global; + return global.allocator; } void swift::_swift_task_alloc_destroy(AsyncTask *task) { diff --git a/stdlib/public/Darwin/CoreFoundation/CoreFoundation.swift b/stdlib/public/Darwin/CoreFoundation/CoreFoundation.swift index edf4ec460d24d..f9490218e15e3 100644 --- a/stdlib/public/Darwin/CoreFoundation/CoreFoundation.swift +++ b/stdlib/public/Darwin/CoreFoundation/CoreFoundation.swift @@ -12,7 +12,7 @@ @_exported import CoreFoundation -public protocol _CFObject: class, Hashable {} +public protocol _CFObject: AnyObject, Hashable {} extension _CFObject { public var hashValue: Int { return Int(bitPattern: CFHash(self)) diff --git a/stdlib/public/Differentiation/CMakeLists.txt b/stdlib/public/Differentiation/CMakeLists.txt index 1675d583880b1..f94766caf75af 100644 --- a/stdlib/public/Differentiation/CMakeLists.txt +++ b/stdlib/public/Differentiation/CMakeLists.txt @@ -18,6 +18,8 @@ add_swift_target_library(swift_Differentiation ${SWIFT_STDLIB_LIBRARY_BUILD_TYPE ArrayDifferentiation.swift OptionalDifferentiation.swift + "${SWIFT_SOURCE_DIR}/stdlib/linker-support/magic-symbols-for-install-name.c" + GYB_SOURCES FloatingPointDifferentiation.swift.gyb TgmathDerivatives.swift.gyb @@ -35,6 +37,8 @@ add_swift_target_library(swift_Differentiation ${SWIFT_STDLIB_LIBRARY_BUILD_TYPE SWIFT_MODULE_DEPENDS_WASI WASILibc SWIFT_MODULE_DEPENDS_WINDOWS CRT + C_COMPILE_FLAGS + -Dswift_Differentiation_EXPORTS SWIFT_COMPILE_FLAGS ${SWIFT_STANDARD_LIBRARY_SWIFT_FLAGS} -parse-stdlib diff --git a/stdlib/public/SwiftShims/CMakeLists.txt b/stdlib/public/SwiftShims/CMakeLists.txt index 33c49a3f8861e..926020372fb2e 100644 --- a/stdlib/public/SwiftShims/CMakeLists.txt +++ b/stdlib/public/SwiftShims/CMakeLists.txt @@ -10,6 +10,7 @@ set(sources MetadataSections.h Random.h RefCount.h + Reflection.h RuntimeShims.h RuntimeStubs.h SwiftStdbool.h diff --git a/stdlib/public/SwiftShims/Reflection.h b/stdlib/public/SwiftShims/Reflection.h new file mode 100644 index 0000000000000..143a630412f6a --- /dev/null +++ b/stdlib/public/SwiftShims/Reflection.h @@ -0,0 +1,36 @@ +//===--- Reflection.h - Types for access to reflection metadata. ----------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_STDLIB_SHIMS_REFLECTION_H +#define SWIFT_STDLIB_SHIMS_REFLECTION_H + +#include "SwiftStdbool.h" +#include "SwiftStdint.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*NameFreeFunc)(const char*); + +typedef struct _FieldReflectionMetadata { + const char* name; + NameFreeFunc freeFunc; + __swift_bool isStrong; + __swift_bool isVar; +} _FieldReflectionMetadata; + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SWIFT_STDLIB_SHIMS_REFLECTION_H diff --git a/stdlib/public/SwiftShims/Visibility.h b/stdlib/public/SwiftShims/Visibility.h index 4ddd34a12eaf7..deeec91d27dbd 100644 --- a/stdlib/public/SwiftShims/Visibility.h +++ b/stdlib/public/SwiftShims/Visibility.h @@ -178,6 +178,11 @@ #else #define SWIFT_IMAGE_EXPORTS_swift_Concurrency 0 #endif +#if defined(swift_Differentiation_EXPORTS) +#define SWIFT_IMAGE_EXPORTS_swift_Differentiation 1 +#else +#define SWIFT_IMAGE_EXPORTS_swift_Differentiation 0 +#endif #define SWIFT_EXPORT_FROM_ATTRIBUTE(LIBRARY) \ SWIFT_MACRO_IF(SWIFT_IMAGE_EXPORTS_##LIBRARY, \ diff --git a/stdlib/public/SwiftShims/module.modulemap b/stdlib/public/SwiftShims/module.modulemap index 8ba9b93a8f9af..de62f9c36e91c 100644 --- a/stdlib/public/SwiftShims/module.modulemap +++ b/stdlib/public/SwiftShims/module.modulemap @@ -9,6 +9,7 @@ module SwiftShims { header "MetadataSections.h" header "Random.h" header "RefCount.h" + header "Reflection.h" header "RuntimeShims.h" header "RuntimeStubs.h" header "SwiftStdbool.h" diff --git a/stdlib/public/Windows/WinSDK.swift b/stdlib/public/Windows/WinSDK.swift index 32eb9beb1e8cd..a1b482c4006fa 100644 --- a/stdlib/public/Windows/WinSDK.swift +++ b/stdlib/public/Windows/WinSDK.swift @@ -46,11 +46,11 @@ public let INVALID_SOCKET: SOCKET = SOCKET(bitPattern: -1) public let FIONBIO: Int32 = Int32(bitPattern: 0x8004667e) // WinUser.h -public let CW_USEDEFAULT: Int32 = Int32(truncatingIfNeeded: 2147483648) +public let CW_USEDEFAULT: Int32 = Int32(bitPattern: 2147483648) public let WS_OVERLAPPEDWINDOW: UINT = UINT(WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX) public let WS_POPUPWINDOW: UINT = - UINT(Int32(WS_POPUP) | WS_BORDER | WS_SYSMENU) + UINT(numericCast(WS_POPUP) | WS_BORDER | WS_SYSMENU) // fileapi.h public let INVALID_FILE_ATTRIBUTES: DWORD = DWORD(bitPattern: -1) @@ -91,16 +91,17 @@ public let DPI_AWARENESS_CONTEXT_UNAWARE_GDISCALED: DPI_AWARENESS_CONTEXT = DPI_AWARENESS_CONTEXT(bitPattern: -5)! // winreg.h -public let HKEY_CLASSES_ROOT: HKEY = HKEY(bitPattern: 0x80000000)! -public let HKEY_CURRENT_USER: HKEY = HKEY(bitPattern: 0x80000001)! -public let HKEY_LOCAL_MACHINE: HKEY = HKEY(bitPattern: 0x80000002)! -public let HKEY_USERS: HKEY = HKEY(bitPattern: 0x80000003)! -public let HKEY_PERFORMANCE_DATA: HKEY = HKEY(bitPattern: 0x80000004)! -public let HKEY_PERFORMANCE_TEXT: HKEY = HKEY(bitPattern: 0x80000050)! -public let HKEY_PERFORMANCE_NLSTEXT: HKEY = HKEY(bitPattern: 0x80000060)! -public let HKEY_CURRENT_CONFIG: HKEY = HKEY(bitPattern: 0x80000005)! -public let HKEY_DYN_DATA: HKEY = HKEY(bitPattern: 0x80000006)! -public let HKEY_CURRENT_USER_LOCAL_SETTINGS: HKEY = HKEY(bitPattern: 0x80000007)! +public let HKEY_CLASSES_ROOT: HKEY = HKEY(bitPattern: UInt(0x80000000))! +public let HKEY_CURRENT_USER: HKEY = HKEY(bitPattern: UInt(0x80000001))! +public let HKEY_LOCAL_MACHINE: HKEY = HKEY(bitPattern: UInt(0x80000002))! +public let HKEY_USERS: HKEY = HKEY(bitPattern: UInt(0x80000003))! +public let HKEY_PERFORMANCE_DATA: HKEY = HKEY(bitPattern: UInt(0x80000004))! +public let HKEY_PERFORMANCE_TEXT: HKEY = HKEY(bitPattern: UInt(0x80000050))! +public let HKEY_PERFORMANCE_NLSTEXT: HKEY = HKEY(bitPattern: UInt(0x80000060))! +public let HKEY_CURRENT_CONFIG: HKEY = HKEY(bitPattern: UInt(0x80000005))! +public let HKEY_DYN_DATA: HKEY = HKEY(bitPattern: UInt(0x80000006))! +public let HKEY_CURRENT_USER_LOCAL_SETTINGS: HKEY = + HKEY(bitPattern: UInt(0x80000007))! // Richedit.h public let MSFTEDIT_CLASS: [WCHAR] = Array("RICHEDIT50W".utf16) diff --git a/stdlib/public/core/BridgeObjectiveC.swift b/stdlib/public/core/BridgeObjectiveC.swift index 07f4c6c1cdc32..1a10c0bae9b77 100644 --- a/stdlib/public/core/BridgeObjectiveC.swift +++ b/stdlib/public/core/BridgeObjectiveC.swift @@ -681,7 +681,7 @@ public func _conditionallyBridgeFromObjectiveC_bridgeable Index { var lo = startIndex var hi = endIndex + while true { + // Invariants at this point: + // + // * `lo <= hi` + // * all elements in `startIndex ..< lo` belong in the first partition + // * all elements in `hi ..< endIndex` belong in the second partition - // 'Loop' invariants (at start of Loop, all are true): - // * lo < hi - // * predicate(self[i]) == false, for i in startIndex ..< lo - // * predicate(self[i]) == true, for i in hi ..< endIndex - - Loop: while true { - FindLo: repeat { - while lo < hi { - if try belongsInSecondPartition(self[lo]) { break FindLo } - formIndex(after: &lo) - } - break Loop - } while false + // Find next element from `lo` that may not be in the right place. + while true { + guard lo < hi else { return lo } + if try belongsInSecondPartition(self[lo]) { break } + formIndex(after: &lo) + } - FindHi: repeat { + // Find next element down from `hi` that we can swap `lo` with. + while true { formIndex(before: &hi) - while lo < hi { - if try !belongsInSecondPartition(self[hi]) { break FindHi } - formIndex(before: &hi) - } - break Loop - } while false + guard lo < hi else { return lo } + if try !belongsInSecondPartition(self[hi]) { break } + } swapAt(lo, hi) formIndex(after: &lo) } - - return lo } } diff --git a/stdlib/public/core/ExistentialCollection.swift b/stdlib/public/core/ExistentialCollection.swift index 445dd271c69ed..d22174940e1c5 100644 --- a/stdlib/public/core/ExistentialCollection.swift +++ b/stdlib/public/core/ExistentialCollection.swift @@ -1641,7 +1641,7 @@ extension AnyRandomAccessCollection { //===----------------------------------------------------------------------===// @usableFromInline -internal protocol _AnyIndexBox: class { +internal protocol _AnyIndexBox: AnyObject { var _typeID: ObjectIdentifier { get } func _unbox() -> T? diff --git a/stdlib/public/core/FloatingPoint.swift b/stdlib/public/core/FloatingPoint.swift index d4de80b8e5c5b..ba4a2325b3ca1 100644 --- a/stdlib/public/core/FloatingPoint.swift +++ b/stdlib/public/core/FloatingPoint.swift @@ -1894,9 +1894,9 @@ extension BinaryFloatingPoint { // count and significand bit count, then they must share the same encoding // for finite and infinite values. switch (Source.exponentBitCount, Source.significandBitCount) { -#if !os(macOS) && !(os(iOS) && targetEnvironment(macCatalyst)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) case (5, 10): - guard #available(iOS 14.0, watchOS 7.0, tvOS 14.0, *) else { + guard #available(macOS 11.0, iOS 14.0, watchOS 7.0, tvOS 14.0, *) else { // Convert signaling NaN to quiet NaN by multiplying by 1. self = Self._convert(from: value).value * 1 break diff --git a/stdlib/public/core/FloatingPointParsing.swift.gyb b/stdlib/public/core/FloatingPointParsing.swift.gyb index a3c4b1b883bfd..2779b1c49da17 100644 --- a/stdlib/public/core/FloatingPointParsing.swift.gyb +++ b/stdlib/public/core/FloatingPointParsing.swift.gyb @@ -43,7 +43,7 @@ internal func _isspace_clocale(_ u: UTF16.CodeUnit) -> Bool { % if bits == 80: #if !(os(Windows) || os(Android)) && (arch(i386) || arch(x86_64)) % elif bits == 16: -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) % end %if bits == 16: diff --git a/stdlib/public/core/FloatingPointTypes.swift.gyb b/stdlib/public/core/FloatingPointTypes.swift.gyb index ac7ec998e675d..1e71b1bb4fac0 100644 --- a/stdlib/public/core/FloatingPointTypes.swift.gyb +++ b/stdlib/public/core/FloatingPointTypes.swift.gyb @@ -66,7 +66,7 @@ else: % if bits == 80: #if !(os(Windows) || os(Android)) && (arch(i386) || arch(x86_64)) % elif bits == 16: -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) % end ${SelfDocComment} @@ -1111,7 +1111,7 @@ extension ${Self} { % if srcBits == 80: #if !(os(Windows) || os(Android)) && (arch(i386) || arch(x86_64)) % elif srcBits == 16: -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) % end % if srcBits == bits: @@ -1352,7 +1352,13 @@ internal struct _${Self}AnyHashableBox: _AnyHashableBox { ${SelfDocComment} @frozen +% if bits == 16: +@available(iOS 14, tvOS 14, watchOS 7, *) +@available(macOS, unavailable) +@available(macCatalyst, unavailable) +% else: @available(*, unavailable, message: "${Self} is not available on target platform.") +% end public struct ${Self} { /// Creates a value initialized to zero. @_transparent diff --git a/stdlib/public/core/IntegerTypes.swift.gyb b/stdlib/public/core/IntegerTypes.swift.gyb index 4b4c6c7cbe716..478ef2bd1141d 100644 --- a/stdlib/public/core/IntegerTypes.swift.gyb +++ b/stdlib/public/core/IntegerTypes.swift.gyb @@ -1130,7 +1130,7 @@ public struct ${Self} % if FloatType == 'Float80': #if !(os(Windows) || os(Android)) && (arch(i386) || arch(x86_64)) % elif FloatType == 'Float16': -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) % end /// Creates an integer from the given floating-point value, rounding toward diff --git a/stdlib/public/core/KeyPath.swift b/stdlib/public/core/KeyPath.swift index 7c4c0645793af..3d58ef848e69b 100644 --- a/stdlib/public/core/KeyPath.swift +++ b/stdlib/public/core/KeyPath.swift @@ -1739,6 +1739,40 @@ internal struct KeyPathBuffer { return UnsafeMutableRawBufferPointer(mutating: data) } + internal struct Builder { + internal var buffer: UnsafeMutableRawBufferPointer + internal init(_ buffer: UnsafeMutableRawBufferPointer) { + self.buffer = buffer + } + internal mutating func pushRaw(size: Int, alignment: Int) + -> UnsafeMutableRawBufferPointer { + var baseAddress = buffer.baseAddress.unsafelyUnwrapped + var misalign = Int(bitPattern: baseAddress) % alignment + if misalign != 0 { + misalign = alignment - misalign + baseAddress = baseAddress.advanced(by: misalign) + } + let result = UnsafeMutableRawBufferPointer( + start: baseAddress, + count: size) + buffer = UnsafeMutableRawBufferPointer( + start: baseAddress + size, + count: buffer.count - size - misalign) + return result + } + internal mutating func push(_ value: T) { + let buf = pushRaw(size: MemoryLayout.size, + alignment: MemoryLayout.alignment) + buf.storeBytes(of: value, as: T.self) + } + internal mutating func pushHeader(_ header: Header) { + push(header) + // Start the components at pointer alignment + _ = pushRaw(size: RawKeyPathComponent.Header.pointerAlignmentSkew, + alignment: 4) + } + } + internal struct Header { internal var _value: UInt32 @@ -2286,40 +2320,16 @@ internal func _appendingKeyPaths< count: resultSize) } - func pushRaw(size: Int, alignment: Int) - -> UnsafeMutableRawBufferPointer { - var baseAddress = destBuffer.baseAddress.unsafelyUnwrapped - var misalign = Int(bitPattern: baseAddress) % alignment - if misalign != 0 { - misalign = alignment - misalign - baseAddress = baseAddress.advanced(by: misalign) - } - let result = UnsafeMutableRawBufferPointer( - start: baseAddress, - count: size) - destBuffer = UnsafeMutableRawBufferPointer( - start: baseAddress + size, - count: destBuffer.count - size - misalign) - return result - } - func push(_ value: T) { - let buf = pushRaw(size: MemoryLayout.size, - alignment: MemoryLayout.alignment) - buf.storeBytes(of: value, as: T.self) - } + var destBuilder = KeyPathBuffer.Builder(destBuffer) // Save space for the header. let leafIsReferenceWritable = type(of: leaf).kind == .reference - let header = KeyPathBuffer.Header( + destBuilder.pushHeader(KeyPathBuffer.Header( size: resultSize - MemoryLayout.size, trivial: rootBuffer.trivial && leafBuffer.trivial, hasReferencePrefix: rootBuffer.hasReferencePrefix || leafIsReferenceWritable - ) - push(header) - // Start the components at pointer alignment - _ = pushRaw(size: RawKeyPathComponent.Header.pointerAlignmentSkew, - alignment: 4) + )) let leafHasReferencePrefix = leafBuffer.hasReferencePrefix @@ -2340,13 +2350,13 @@ internal func _appendingKeyPaths< } component.clone( - into: &destBuffer, + into: &destBuilder.buffer, endOfReferencePrefix: endOfReferencePrefix) + // Insert our endpoint type between the root and leaf components. if let type = type { - push(type) + destBuilder.push(type) } else { - // Insert our endpoint type between the root and leaf components. - push(Value.self as Any.Type) + destBuilder.push(Value.self as Any.Type) break } } @@ -2356,17 +2366,17 @@ internal func _appendingKeyPaths< let (component, type) = leafBuffer.next() component.clone( - into: &destBuffer, + into: &destBuilder.buffer, endOfReferencePrefix: component.header.endOfReferencePrefix) if let type = type { - push(type) + destBuilder.push(type) } else { break } } - _internalInvariant(destBuffer.isEmpty, + _internalInvariant(destBuilder.buffer.isEmpty, "did not fill entire result buffer") } diff --git a/stdlib/public/core/ReflectionMirror.swift b/stdlib/public/core/ReflectionMirror.swift index 8ed481582570d..84261c76d8065 100644 --- a/stdlib/public/core/ReflectionMirror.swift +++ b/stdlib/public/core/ReflectionMirror.swift @@ -10,6 +10,8 @@ // //===----------------------------------------------------------------------===// +import SwiftShims + @_silgen_name("swift_isClassType") internal func _isClassType(_: Any.Type) -> Bool @@ -29,8 +31,7 @@ internal func _getRecursiveChildCount(_: Any.Type) -> Int internal func _getChildMetadata( _: Any.Type, index: Int, - outName: UnsafeMutablePointer?>, - outFreeFunc: UnsafeMutablePointer + fieldMetadata: UnsafeMutablePointer<_FieldReflectionMetadata> ) -> Any.Type @_silgen_name("swift_reflectionMirror_recursiveChildOffset") @@ -281,14 +282,91 @@ public func _forEachField( for i in 0..? = nil - var freeFunc: NameFreeFunc? = nil - let childType = _getChildMetadata( - type, index: i, outName: &nameC, outFreeFunc: &freeFunc) - defer { freeFunc?(nameC) } + var field = _FieldReflectionMetadata() + let childType = _getChildMetadata(type, index: i, fieldMetadata: &field) + defer { field.freeFunc?(field.name) } + let kind = _MetadataKind(childType) + + if !body(field.name!, offset, childType, kind) { + return false + } + } + + return true +} + +/// Calls the given closure on every field of the specified type. +/// +/// If `body` returns `false` for any field, no additional fields are visited. +/// +/// - Parameters: +/// - type: The type to inspect. +/// - options: Options to use when reflecting over `type`. +/// - body: A closure to call with information about each field in `type`. +/// The parameters to `body` are a pointer to a C string holding the name +/// of the field, the offset of the field in bytes, the type of the field, +/// and the `_MetadataKind` of the field's type. +/// - Returns: `true` if every invocation of `body` returns `true`; otherwise, +/// `false`. +@available(macOS 9999, iOS 9999, tvOS 9999, watchOS 9999, *) +@discardableResult +@_spi(Reflection) +public func _forEachFieldWithKeyPath( + of type: Root.Type, + options: _EachFieldOptions = [], + body: (UnsafePointer, PartialKeyPath) -> Bool +) -> Bool { + // Class types not supported because the metadata does not have + // enough information to construct computed properties. + if _isClassType(type) || options.contains(.classType) { + return false + } + let ignoreUnknown = options.contains(.ignoreUnknown) + + let childCount = _getRecursiveChildCount(type) + for i in 0..(for: Leaf.Type) -> PartialKeyPath.Type { + if field.isVar { return WritableKeyPath.self } + return KeyPath.self + } + let resultSize = MemoryLayout.size + MemoryLayout.size + let partialKeyPath = _openExistential(childType, do: keyPathType) + ._create(capacityInBytes: resultSize) { + var destBuilder = KeyPathBuffer.Builder($0) + destBuilder.pushHeader(KeyPathBuffer.Header( + size: resultSize - MemoryLayout.size, + trivial: true, + hasReferencePrefix: false + )) + let component = RawKeyPathComponent( + header: RawKeyPathComponent.Header(stored: .struct, + mutable: field.isVar, + inlineOffset: UInt32(offset)), + body: UnsafeRawBufferPointer(start: nil, count: 0)) + component.clone( + into: &destBuilder.buffer, + endOfReferencePrefix: false) + } - if !body(nameC!, offset, childType, kind) { + if !body(field.name!, partialKeyPath) { return false } } diff --git a/stdlib/public/core/Runtime.swift b/stdlib/public/core/Runtime.swift index c1e72cdd0eae2..7df18bd78a232 100644 --- a/stdlib/public/core/Runtime.swift +++ b/stdlib/public/core/Runtime.swift @@ -293,7 +293,7 @@ internal struct _Buffer72 { } } -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) // Note that this takes a Float32 argument instead of Float16, because clang // doesn't have _Float16 on all platforms yet. @_silgen_name("swift_float16ToString") diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 0002f8cdcf293..238984452630a 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -254,7 +254,7 @@ extension ${Self}: SIMDScalar { %for (Self, bits) in [('Float16',16), ('Float',32), ('Double',64)]: % if bits == 16: -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) @available(macOS 11.0, iOS 14.0, watchOS 7.0, tvOS 14.0, *) % end extension ${Self} : SIMDScalar { diff --git a/stdlib/public/core/UnsafeBufferPointer.swift.gyb b/stdlib/public/core/UnsafeBufferPointer.swift.gyb index a0242df85313b..ee85c9de54d33 100644 --- a/stdlib/public/core/UnsafeBufferPointer.swift.gyb +++ b/stdlib/public/core/UnsafeBufferPointer.swift.gyb @@ -499,7 +499,8 @@ extension Unsafe${Mutable}BufferPointer { @inlinable // unsafe-performance public init(rebasing slice: Slice>) { let base = slice.base.baseAddress?.advanced(by: slice.startIndex) - self.init(start: base, count: slice.count) + let count = slice.endIndex &- slice.startIndex + self.init(start: base, count: count) } % end @@ -526,7 +527,8 @@ extension Unsafe${Mutable}BufferPointer { @inlinable // unsafe-performance public init(rebasing slice: Slice>) { let base = slice.base.baseAddress?.advanced(by: slice.startIndex) - self.init(start: base, count: slice.count) + let count = slice.endIndex &- slice.startIndex + self.init(start: base, count: count) } /// Deallocates the memory block previously allocated at this buffer pointer’s diff --git a/stdlib/public/core/UnsafeRawBufferPointer.swift.gyb b/stdlib/public/core/UnsafeRawBufferPointer.swift.gyb index 453791f2c60d3..1a7508ccf50a3 100644 --- a/stdlib/public/core/UnsafeRawBufferPointer.swift.gyb +++ b/stdlib/public/core/UnsafeRawBufferPointer.swift.gyb @@ -512,7 +512,8 @@ extension Unsafe${Mutable}RawBufferPointer { @inlinable public init(rebasing slice: Slice) { let base = slice.base.baseAddress?.advanced(by: slice.startIndex) - self.init(start: base, count: slice.count) + let count = slice.endIndex &- slice.startIndex + self.init(start: base, count: count) } % end # !mutable @@ -539,7 +540,8 @@ extension Unsafe${Mutable}RawBufferPointer { @inlinable public init(rebasing slice: Slice) { let base = slice.base.baseAddress?.advanced(by: slice.startIndex) - self.init(start: base, count: slice.count) + let count = slice.endIndex &- slice.startIndex + self.init(start: base, count: count) } /// A pointer to the first byte of the buffer. diff --git a/stdlib/public/runtime/AutoDiffSupport.cpp b/stdlib/public/runtime/AutoDiffSupport.cpp new file mode 100644 index 0000000000000..467ae2b3e37ce --- /dev/null +++ b/stdlib/public/runtime/AutoDiffSupport.cpp @@ -0,0 +1,73 @@ +//===--- AutoDiffSupport.cpp ----------------------------------*- C++ -*---===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2019 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "AutoDiffSupport.h" +#include "swift/ABI/Metadata.h" +#include "swift/Runtime/HeapObject.h" + +using namespace swift; +using namespace llvm; + +SWIFT_CC(swift) +static void destroyLinearMapContext(SWIFT_CONTEXT HeapObject *obj) { + static_cast(obj)->~AutoDiffLinearMapContext(); + free(obj); +} + +/// Heap metadata for a linear map context. +static FullMetadata linearMapContextHeapMetadata = { + { + { + &destroyLinearMapContext + }, + { + /*value witness table*/ nullptr + } + }, + { + MetadataKind::Opaque + } +}; + +AutoDiffLinearMapContext::AutoDiffLinearMapContext() + : HeapObject(&linearMapContextHeapMetadata) { +} + +void *AutoDiffLinearMapContext::projectTopLevelSubcontext() const { + auto offset = alignTo( + sizeof(AutoDiffLinearMapContext), alignof(AutoDiffLinearMapContext)); + return const_cast( + reinterpret_cast(this) + offset); +} + +void *AutoDiffLinearMapContext::allocate(size_t size) { + return allocator.Allocate(size, alignof(AutoDiffLinearMapContext)); +} + +AutoDiffLinearMapContext *swift::swift_autoDiffCreateLinearMapContext( + size_t topLevelLinearMapStructSize) { + auto allocationSize = alignTo( + sizeof(AutoDiffLinearMapContext), alignof(AutoDiffLinearMapContext)) + + topLevelLinearMapStructSize; + auto *buffer = (AutoDiffLinearMapContext *)malloc(allocationSize); + return new (buffer) AutoDiffLinearMapContext; +} + +void *swift::swift_autoDiffProjectTopLevelSubcontext( + AutoDiffLinearMapContext *allocator) { + return allocator->projectTopLevelSubcontext(); +} + +void *swift::swift_autoDiffAllocateSubcontext( + AutoDiffLinearMapContext *allocator, size_t size) { + return allocator->allocate(size); +} diff --git a/stdlib/public/runtime/AutoDiffSupport.h b/stdlib/public/runtime/AutoDiffSupport.h new file mode 100644 index 0000000000000..7df152779e5ee --- /dev/null +++ b/stdlib/public/runtime/AutoDiffSupport.h @@ -0,0 +1,56 @@ +//===--- AutoDiffSupport.h ------------------------------------*- C++ -*---===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2019 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_RUNTIME_AUTODIFF_SUPPORT_H +#define SWIFT_RUNTIME_AUTODIFF_SUPPORT_H + +#include "swift/Runtime/HeapObject.h" +#include "swift/Runtime/Config.h" +#include "llvm/Support/Allocator.h" + +namespace swift { + +/// A data structure responsible for efficiently allocating closure contexts for +/// linear maps such as pullbacks, including rescursive branching trace enum +/// case payloads. +class AutoDiffLinearMapContext : public HeapObject { +private: + /// The underlying allocator. + // TODO: Use a custom allocator so that the initial slab can be + // tail-allocated. + llvm::BumpPtrAllocator allocator; + +public: + /// Creates a linear map context. + AutoDiffLinearMapContext(); + /// Returns the address of the tail-allocated top-level subcontext. + void *projectTopLevelSubcontext() const; + /// Allocates memory for a new subcontext. + void *allocate(size_t size); +}; + +/// Creates a linear map context with a tail-allocated top-level subcontext. +SWIFT_EXPORT_FROM(swift_Differentiation) SWIFT_CC(swift) +AutoDiffLinearMapContext *swift_autoDiffCreateLinearMapContext( + size_t topLevelSubcontextSize); + +/// Returns the address of the tail-allocated top-level subcontext. +SWIFT_EXPORT_FROM(swift_Differentiation) SWIFT_CC(swift) +void *swift_autoDiffProjectTopLevelSubcontext(AutoDiffLinearMapContext *); + +/// Allocates memory for a new subcontext. +SWIFT_EXPORT_FROM(swift_Differentiation) SWIFT_CC(swift) +void *swift_autoDiffAllocateSubcontext(AutoDiffLinearMapContext *, size_t size); + +} + +#endif /* SWIFT_RUNTIME_AUTODIFF_SUPPORT_H */ diff --git a/stdlib/public/runtime/CMakeLists.txt b/stdlib/public/runtime/CMakeLists.txt index 69cb3999f4e2c..7d0af072fc26f 100644 --- a/stdlib/public/runtime/CMakeLists.txt +++ b/stdlib/public/runtime/CMakeLists.txt @@ -28,6 +28,7 @@ set(swift_runtime_objc_sources set(swift_runtime_sources AnyHashableSupport.cpp Array.cpp + AutoDiffSupport.cpp BackDeployment.cpp Casting.cpp CompatibilityOverride.cpp diff --git a/stdlib/public/runtime/CompatibilityOverride.cpp b/stdlib/public/runtime/CompatibilityOverride.cpp index 000ff0dff2600..6cb58689a356b 100644 --- a/stdlib/public/runtime/CompatibilityOverride.cpp +++ b/stdlib/public/runtime/CompatibilityOverride.cpp @@ -29,7 +29,7 @@ using namespace swift; /// The definition of the contents of the override section. /// /// The runtime looks in the main executable (not any libraries!) for a -/// __swift53_hooks section and uses the hooks defined therein. This struct +/// __swift54_hooks section and uses the hooks defined therein. This struct /// defines the layout of that section. These hooks allow extending /// runtime functionality when running apps built with a more recent /// compiler. If additional hooks are needed, they may be added at the @@ -54,7 +54,7 @@ static OverrideSection *getOverrideSectionPtr() { swift_once(&Predicate, [](void *) { size_t Size; OverrideSectionPtr = static_cast( - lookupSection("__DATA", "__swift53_hooks", &Size)); + lookupSection("__DATA", "__swift54_hooks", &Size)); if (Size < sizeof(OverrideSection)) OverrideSectionPtr = nullptr; }, nullptr); diff --git a/stdlib/public/runtime/Metadata.cpp b/stdlib/public/runtime/Metadata.cpp index 6364f02c00936..3ce339fa85630 100644 --- a/stdlib/public/runtime/Metadata.cpp +++ b/stdlib/public/runtime/Metadata.cpp @@ -28,6 +28,7 @@ #include "swift/Runtime/Mutex.h" #include "swift/Runtime/Once.h" #include "swift/Strings.h" +#include "llvm/ADT/StringExtras.h" #include #include #include @@ -5805,30 +5806,6 @@ bool swift::_swift_debug_metadataAllocationIterationEnabled = false; const void * const swift::_swift_debug_allocationPoolPointer = &AllocationPool; std::atomic swift::_swift_debug_metadataAllocationBacktraceList; -static void checkAllocatorDebugEnvironmentVariable(void *context) { - _swift_debug_metadataAllocationIterationEnabled - = runtime::environment::SWIFT_DEBUG_ENABLE_METADATA_ALLOCATION_ITERATION(); - if (!_swift_debug_metadataAllocationIterationEnabled) { - if (runtime::environment::SWIFT_DEBUG_ENABLE_METADATA_BACKTRACE_LOGGING()) - swift::warning(RuntimeErrorFlagNone, - "Warning: SWIFT_DEBUG_ENABLE_METADATA_BACKTRACE_LOGGING " - "without SWIFT_DEBUG_ENABLE_METADATA_ALLOCATION_ITERATION " - "has no effect.\n"); - return; - } - - // Write a PoolTrailer to the end of InitialAllocationPool and shrink - // the pool accordingly. - auto poolCopy = AllocationPool.load(std::memory_order_relaxed); - assert(poolCopy.Begin == InitialAllocationPool.Pool); - size_t newPoolSize = InitialPoolSize - sizeof(PoolTrailer); - PoolTrailer trailer = { nullptr, newPoolSize }; - memcpy(InitialAllocationPool.Pool + newPoolSize, &trailer, - sizeof(trailer)); - poolCopy.Remaining = newPoolSize; - AllocationPool.store(poolCopy, std::memory_order_relaxed); -} - static void recordBacktrace(void *allocation) { withCurrentBacktrace([&](void **addrs, int count) { MetadataAllocationBacktraceHeader *record = @@ -5847,28 +5824,74 @@ static void recordBacktrace(void *allocation) { }); } -template -static inline void memsetScribble(Pointee *bytes, size_t totalSize) { +static inline bool scribbleEnabled() { #ifndef NDEBUG // When DEBUG is defined, always scribble. - memset(bytes, 0xAA, totalSize); + return true; #else // When DEBUG is not defined, only scribble when the // SWIFT_DEBUG_ENABLE_MALLOC_SCRIBBLE environment variable is set. - if (SWIFT_UNLIKELY( - runtime::environment::SWIFT_DEBUG_ENABLE_MALLOC_SCRIBBLE())) { - memset(bytes, 0xAA, totalSize); - } + return SWIFT_UNLIKELY( + runtime::environment::SWIFT_DEBUG_ENABLE_MALLOC_SCRIBBLE()); #endif } +static constexpr char scribbleByte = 0xAA; + +template +static inline void memsetScribble(Pointee *bytes, size_t totalSize) { + if (scribbleEnabled()) + memset(bytes, scribbleByte, totalSize); +} + +/// When scribbling is enabled, check the specified region for the scribble +/// values to detect overflows. When scribbling is disabled, this is a no-op. +static inline void checkScribble(char *bytes, size_t totalSize) { + if (scribbleEnabled()) + for (size_t i = 0; i < totalSize; i++) + if (bytes[i] != scribbleByte) { + const size_t maxToPrint = 16; + size_t remaining = totalSize - i; + size_t toPrint = std::min(remaining, maxToPrint); + std::string hex = toHex(llvm::StringRef{&bytes[i], toPrint}); + swift::fatalError( + 0, "corrupt metadata allocation arena detected at %p: %s%s", + &bytes[i], hex.c_str(), toPrint < remaining ? "..." : ""); + } +} + +static void checkAllocatorDebugEnvironmentVariables(void *context) { + memsetScribble(InitialAllocationPool.Pool, InitialPoolSize); + + _swift_debug_metadataAllocationIterationEnabled = + runtime::environment::SWIFT_DEBUG_ENABLE_METADATA_ALLOCATION_ITERATION(); + if (!_swift_debug_metadataAllocationIterationEnabled) { + if (runtime::environment::SWIFT_DEBUG_ENABLE_METADATA_BACKTRACE_LOGGING()) + swift::warning(RuntimeErrorFlagNone, + "Warning: SWIFT_DEBUG_ENABLE_METADATA_BACKTRACE_LOGGING " + "without SWIFT_DEBUG_ENABLE_METADATA_ALLOCATION_ITERATION " + "has no effect.\n"); + return; + } + + // Write a PoolTrailer to the end of InitialAllocationPool and shrink + // the pool accordingly. + auto poolCopy = AllocationPool.load(std::memory_order_relaxed); + assert(poolCopy.Begin == InitialAllocationPool.Pool); + size_t newPoolSize = InitialPoolSize - sizeof(PoolTrailer); + PoolTrailer trailer = {nullptr, newPoolSize}; + memcpy(InitialAllocationPool.Pool + newPoolSize, &trailer, sizeof(trailer)); + poolCopy.Remaining = newPoolSize; + AllocationPool.store(poolCopy, std::memory_order_relaxed); +} + void *MetadataAllocator::Allocate(size_t size, size_t alignment) { assert(Tag != 0); assert(alignment <= alignof(void*)); assert(size % alignof(void*) == 0); static OnceToken_t getenvToken; - SWIFT_ONCE_F(getenvToken, checkAllocatorDebugEnvironmentVariable, nullptr); + SWIFT_ONCE_F(getenvToken, checkAllocatorDebugEnvironmentVariables, nullptr); // If the size is larger than the maximum, just use malloc. if (size > PoolRange::MaxPoolAllocationSize) { @@ -5899,6 +5922,7 @@ void *MetadataAllocator::Allocate(size_t size, size_t alignment) { poolSize -= sizeof(PoolTrailer); allocatedNewPage = true; allocation = new char[PoolRange::PageSize]; + memsetScribble(allocation, PoolRange::PageSize); if (SWIFT_UNLIKELY(_swift_debug_metadataAllocationIterationEnabled)) { PoolTrailer *newTrailer = (PoolTrailer *)(allocation + poolSize); @@ -5919,7 +5943,6 @@ void *MetadataAllocator::Allocate(size_t size, size_t alignment) { // If that succeeded, we've successfully allocated. __msan_allocated_memory(allocation, sizeWithHeader); __asan_unpoison_memory_region(allocation, sizeWithHeader); - memsetScribble(allocation, sizeWithHeader); if (SWIFT_UNLIKELY(_swift_debug_metadataAllocationIterationEnabled)) { AllocationHeader *header = (AllocationHeader *)allocation; @@ -5932,8 +5955,10 @@ void *MetadataAllocator::Allocate(size_t size, size_t alignment) { SWIFT_DEBUG_ENABLE_METADATA_BACKTRACE_LOGGING()) recordBacktrace(returnedAllocation); + checkScribble(returnedAllocation, size); return returnedAllocation; } else { + checkScribble(allocation, size); return allocation; } } @@ -5961,6 +5986,10 @@ void MetadataAllocator::Deallocate(const void *allocation, size_t size, return; } + // If we're scribbling, re-scribble the allocation so that the next call to + // Allocate sees what it expects. + memsetScribble(const_cast(allocation), size); + // Try to swap back to the pre-allocation state. If this fails, // don't bother trying again; we'll just leak the allocation. PoolRange newState = { reinterpret_cast(const_cast(allocation)), diff --git a/stdlib/public/runtime/ReflectionMirror.cpp b/stdlib/public/runtime/ReflectionMirror.cpp index ef6e746064ec2..d8dc1e1292166 100644 --- a/stdlib/public/runtime/ReflectionMirror.cpp +++ b/stdlib/public/runtime/ReflectionMirror.cpp @@ -23,6 +23,7 @@ #include "swift/Runtime/Portability.h" #include "Private.h" #include "WeakReference.h" +#include "../SwiftShims/Reflection.h" #include #include #include @@ -82,6 +83,7 @@ namespace { class FieldType { const Metadata *type; bool indirect; + bool var = false; TypeReferenceOwnership referenceOwnership; public: @@ -97,6 +99,8 @@ class FieldType { const TypeReferenceOwnership getReferenceOwnership() const { return referenceOwnership; } bool isIndirect() const { return indirect; } void setIndirect(bool value) { indirect = value; } + bool isVar() const { return var; } + void setIsVar(bool value) { var = value; } void setReferenceOwnership(TypeReferenceOwnership newOwnership) { referenceOwnership = newOwnership; } @@ -292,7 +296,10 @@ struct TupleImpl : ReflectionMirrorImpl { // Get the nth element. auto &elt = Tuple->getElement(i); - return FieldType(elt.Type); + FieldType result(elt.Type); + // All tuples are mutable. + result.setIsVar(true); + return result; } AnyReturn subscript(intptr_t i, const char **outName, @@ -431,6 +438,7 @@ getFieldAt(const Metadata *base, unsigned index) { auto fieldType = FieldType(typeInfo.getMetadata()); fieldType.setIndirect(field.isIndirectCase()); fieldType.setReferenceOwnership(typeInfo.getReferenceOwnership()); + fieldType.setIsVar(field.isVar()); return {name, fieldType}; } @@ -993,17 +1001,20 @@ intptr_t swift_reflectionMirror_recursiveCount(const Metadata *type) { // func _getChildMetadata( // type: Any.Type, // index: Int, -// outName: UnsafeMutablePointer?>, -// outFreeFunc: UnsafeMutablePointer +// fieldMetadata: UnsafeMutablePointer<_FieldReflectionMetadata> // ) -> Any.Type SWIFT_CC(swift) SWIFT_RUNTIME_STDLIB_API const Metadata *swift_reflectionMirror_recursiveChildMetadata( const Metadata *type, intptr_t index, - const char **outName, - void (**outFreeFunc)(const char *)) { + _FieldReflectionMetadata* field) { return call(nullptr, type, type, [&](ReflectionMirrorImpl *impl) { - return impl->recursiveChildMetadata(index, outName, outFreeFunc).getType(); + FieldType fieldInfo = impl->recursiveChildMetadata(index, &field->name, + &field->freeFunc); + + field->isStrong = fieldInfo.getReferenceOwnership().isStrong(); + field->isVar = fieldInfo.isVar(); + return fieldInfo.getType(); }); } diff --git a/stdlib/public/runtime/StackAllocator.h b/stdlib/public/runtime/StackAllocator.h new file mode 100644 index 0000000000000..01fd89c794daf --- /dev/null +++ b/stdlib/public/runtime/StackAllocator.h @@ -0,0 +1,292 @@ +//===--- StackAllocator.h - A stack allocator -----------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// A bump-pointer allocator that obeys a stack discipline. +// +//===----------------------------------------------------------------------===// + +#include "swift/Runtime/Debug.h" +#include "llvm/Support/Alignment.h" +#include + +namespace swift { + +/// A bump-pointer allocator that obeys a stack discipline. +/// +/// StackAllocator performs fast allocation and deallocation of memory by +/// implementing a bump-pointer allocation strategy. +/// +/// This isn't strictly a bump-pointer allocator as it uses backing slabs of +/// memory rather than relying on a boundless contiguous heap. However, it has +/// bump-pointer semantics in that it is a monotonically growing pool of memory +/// where every allocation is found by merely allocating the next N bytes in +/// the slab, or the next N bytes in the next slab. +/// +/// In contrast to a pure bump-pointer allocator, it's possible to free memory. +/// Allocations and deallocations must follow a strict stack discipline. In +/// general, slabs which become unused are _not_ freed, but reused for +/// subsequent allocations. +/// +/// It's possible to place the first slab into pre-allocated memory. +/// +/// The SlabCapacity specifies the capacity for newly allocated slabs. +template +class StackAllocator { +private: + + struct Allocation; + struct Slab; + + /// The last active allocation. + /// + /// A deallocate() must free this allocation. + Allocation *lastAllocation = nullptr; + + /// The first slab. + Slab *firstSlab; + + /// Used for unit testing. + int32_t numAllocatedSlabs = 0; + + /// True if the first slab is pre-allocated. + bool firstSlabIsPreallocated; + + /// The minimal alignment of allocated memory. + static constexpr size_t alignment = alignof(std::max_align_t); + + /// If set to true, memory allocations are checked for buffer overflows and + /// use-after-free, similar to guard-malloc. + static constexpr bool guardAllocations = +#ifdef NDEBUG + false; +#else + true; +#endif + + static constexpr uintptr_t magicUninitialized = (uintptr_t)0xcdcdcdcdcdcdcdcdull; + static constexpr uintptr_t magicEndOfAllocation = (uintptr_t)0xdeadbeafdeadbeafull; + + /// A memory slab holding multiple allocations. + /// + /// This struct is actually just the slab header. The slab buffer is tail + /// allocated after Slab. + struct Slab { + /// A single linked list of all allocated slabs. + Slab *next = nullptr; + + // Capacity and offset do not include these header fields. + uint32_t capacity; + uint32_t currentOffset = 0; + + // Here starts the tail allocated memory buffer of the slab. + + Slab(size_t newCapacity) : capacity(newCapacity) { + assert((size_t)capacity == newCapacity && "capacity overflow"); + } + + /// The size of the slab header. + static size_t headerSize() { + return llvm::alignTo(sizeof(Slab), llvm::Align(alignment)); + } + + /// Return \p size with the added overhead of the slab header. + static size_t includingHeader(size_t size) { + return headerSize() + size; + } + + /// Return the payload buffer address at \p atOffset. + /// + /// Note: it's valid to call this function on a not-yet-constructed slab. + char *getAddr(size_t atOffset) { + return (char *)this + headerSize() + atOffset; + } + + /// Return true if this slab can fit an allocation of \p size. + /// + /// \p size does not include the allocation header, but must include the + /// overhead for guardAllocations (if enabled). + inline bool canAllocate(size_t size) const { + return currentOffset + Allocation::includingHeader(size) <= capacity; + } + + /// Return true, if no memory is allocated in this slab. + bool isEmpty() const { return currentOffset == 0; } + + /// Allocate \p alignedSize of bytes in this slab. + /// + /// \p alignedSize does not include the allocation header, but must include + /// the overhead for guardAllocations (if enabled). + /// + /// Precondition: \p alignedSize must be aligned up to + /// StackAllocator::alignment. + /// Precondition: there must be enough space in this slab to fit the + /// allocation. + Allocation *allocate(size_t alignedSize, Allocation *lastAllocation) { + assert(llvm::isAligned(llvm::Align(alignment), alignedSize)); + assert(canAllocate(alignedSize)); + void *buffer = getAddr(currentOffset); + auto *allocation = new (buffer) Allocation(lastAllocation, this); + currentOffset += Allocation::includingHeader(alignedSize); + if (guardAllocations) { + uintptr_t *endOfCurrentAllocation = (uintptr_t *)getAddr(currentOffset); + endOfCurrentAllocation[-1] = magicEndOfAllocation; + } + return allocation; + } + + /// Deallocate \p allocation. + /// + /// Precondition: \p allocation must be an allocation in this slab. + void deallocate(Allocation *allocation) { + assert(allocation->slab == this); + if (guardAllocations) { + auto *endOfAllocation = (uintptr_t *)getAddr(currentOffset); + if (endOfAllocation[-1] != magicEndOfAllocation) + fatalError(0, "Buffer overflow in StackAllocator"); + for (auto *p = (uintptr_t *)allocation; p < endOfAllocation; ++p) + *p = magicUninitialized; + } + currentOffset = (char *)allocation - getAddr(0); + } + }; + + /// A single memory allocation. + /// + /// This struct is actually just the allocation header. The allocated + /// memory buffer is located after Allocation. + struct Allocation { + /// A single linked list of previous allocations. + Allocation *previous; + /// The containing slab. + Slab *slab; + + // Here starts the tail allocated memory. + + Allocation(Allocation *previous, Slab *slab) : + previous(previous), slab(slab) {} + + void *getAllocatedMemory() { + return (char *)this + headerSize(); + } + + /// The size of the allocation header. + static size_t headerSize() { + return llvm::alignTo(sizeof(Allocation), llvm::Align(alignment)); + } + + /// Return \p size with the added overhead of the allocation header. + static size_t includingHeader(size_t size) { + return headerSize() + size; + } + }; + + // Return a slab which is suitable to allocate \p size memory. + Slab *getSlabForAllocation(size_t size) { + Slab *slab = (lastAllocation ? lastAllocation->slab : firstSlab); + if (slab) { + // Is there enough space in the current slab? + if (slab->canAllocate(size)) + return slab; + + // Is there a successor slab, which we allocated before (and became free + // in the meantime)? + if (Slab *nextSlab = slab->next) { + assert(nextSlab->isEmpty()); + if (nextSlab->canAllocate(size)) + return nextSlab; + + // No space in the next slab. Although it's empty, the size exceeds its + // capacity. + // As we have to allocate a new slab anyway, free all successor slabs + // and allocate a new one with the accumulated capacity. + size_t alreadyAllocatedCapacity = freeAllSlabs(slab->next); + size = std::max(size, alreadyAllocatedCapacity); + } + } + size_t capacity = std::max(SlabCapacity, + Allocation::includingHeader(size)); + void *slabBuffer = malloc(Slab::includingHeader(capacity)); + Slab *newSlab = new (slabBuffer) Slab(capacity); + if (slab) + slab->next = newSlab; + else + firstSlab = newSlab; + numAllocatedSlabs++; + return newSlab; + } + + /// Deallocate all slabs after \p first and set \p first to null. + size_t freeAllSlabs(Slab *&first) { + size_t freedCapacity = 0; + Slab *slab = first; + first = nullptr; + while (slab) { + Slab *next = slab->next; + freedCapacity += slab->capacity; + free(slab); + numAllocatedSlabs--; + slab = next; + } + return freedCapacity; + } + +public: + /// Construct a StackAllocator without a pre-allocated first slab. + StackAllocator() : firstSlab(nullptr), firstSlabIsPreallocated(false) { } + + /// Construct a StackAllocator with a pre-allocated first slab. + StackAllocator(void *firstSlabBuffer, size_t bufferCapacity) { + char *start = (char *)llvm::alignAddr(firstSlabBuffer, + llvm::Align(alignment)); + char *end = (char *)firstSlabBuffer + bufferCapacity; + assert(start + Slab::headerSize() <= end && + "buffer for first slab too small"); + firstSlab = new (start) Slab(end - start - Slab::headerSize()); + firstSlabIsPreallocated = true; + } + + ~StackAllocator() { + if (lastAllocation) + fatalError(0, "not all allocations are deallocated"); + (void)freeAllSlabs(firstSlabIsPreallocated ? firstSlab->next : firstSlab); + assert(getNumAllocatedSlabs() == 0); + } + + /// Allocate a memory buffer of \p size. + void *alloc(size_t size) { + if (guardAllocations) + size += sizeof(uintptr_t); + size_t alignedSize = llvm::alignTo(size, llvm::Align(alignment)); + Slab *slab = getSlabForAllocation(alignedSize); + Allocation *allocation = slab->allocate(alignedSize, lastAllocation); + lastAllocation = allocation; + assert(llvm::isAddrAligned(llvm::Align(alignment), + allocation->getAllocatedMemory())); + return allocation->getAllocatedMemory(); + } + + /// Deallocate memory \p ptr. + void dealloc(void *ptr) { + if (!lastAllocation || lastAllocation->getAllocatedMemory() != ptr) + fatalError(0, "freed pointer was not the last allocation"); + + Allocation *prev = lastAllocation->previous; + lastAllocation->slab->deallocate(lastAllocation); + lastAllocation = prev; + } + + /// For unit testing. + int getNumAllocatedSlabs() { return numAllocatedSlabs; } +}; + +} // namespace swift + diff --git a/stdlib/public/stubs/Assert.cpp b/stdlib/public/stubs/Assert.cpp index fa0169a5ad6b8..910b691488661 100644 --- a/stdlib/public/stubs/Assert.cpp +++ b/stdlib/public/stubs/Assert.cpp @@ -49,12 +49,12 @@ void _swift_stdlib_reportFatalErrorInFile( ) { char *log; swift_asprintf( - &log, "%.*s: %.*s%sfile %.*s, line %" PRIu32 "\n", - prefixLength, prefix, - messageLength, message, - (messageLength ? ": " : ""), + &log, "%.*s:%" PRIu32 ": %.*s%s%.*s\n", fileLength, file, - line); + line, + prefixLength, prefix, + (messageLength > 0 ? ": " : ""), + messageLength, message); swift_reportError(flags, log); free(log); @@ -89,10 +89,10 @@ void _swift_stdlib_reportUnimplementedInitializerInFile( char *log; swift_asprintf( &log, - "%.*s: %" PRIu32 ": %" PRIu32 ": Fatal error: Use of unimplemented " + "%.*s:%" PRIu32 ": Fatal error: Use of unimplemented " "initializer '%.*s' for class '%.*s'\n", fileLength, file, - line, column, + line, initNameLength, initName, classNameLength, className); diff --git a/stdlib/toolchain/legacy_layouts/CMakeLists.txt b/stdlib/toolchain/legacy_layouts/CMakeLists.txt index 0bab12b0dfc46..56b9f87bc6a09 100644 --- a/stdlib/toolchain/legacy_layouts/CMakeLists.txt +++ b/stdlib/toolchain/legacy_layouts/CMakeLists.txt @@ -1,7 +1,7 @@ add_custom_target("copy-legacy-layouts" ALL) foreach(sdk ${SWIFT_SDKS}) - foreach(arch ${SWIFT_SDK_${sdk}_ARCHITECTURES}) + foreach(arch ${SWIFT_SDK_${sdk}_ARCHITECTURES} ${SWIFT_SDK_${sdk}_MODULE_ARCHITECTURES}) set(platform "${SWIFT_SDK_${sdk}_LIB_SUBDIR}") set(input "${SWIFT_SOURCE_DIR}/stdlib/toolchain/legacy_layouts/${platform}/layouts-${arch}.yaml") diff --git a/test/AutoDiff/IRGen/runtime.swift b/test/AutoDiff/IRGen/runtime.swift new file mode 100644 index 0000000000000..d19edc7deffc9 --- /dev/null +++ b/test/AutoDiff/IRGen/runtime.swift @@ -0,0 +1,24 @@ +// RUN: %target-swift-frontend -parse-stdlib %s -emit-ir | %FileCheck %s + +import Swift +import _Differentiation + +struct ExamplePullbackStruct { + var pb0: (T.TangentVector) -> T.TangentVector +} + +@_silgen_name("test_context_builtins") +func test_context_builtins() { + let pbStruct = ExamplePullbackStruct(pb0: { $0 }) + let context = Builtin.autoDiffCreateLinearMapContext(Builtin.sizeof(type(of: pbStruct))) + let topLevelSubctxAddr = Builtin.autoDiffProjectTopLevelSubcontext(context) + UnsafeMutableRawPointer(topLevelSubctxAddr).storeBytes(of: pbStruct, as: type(of: pbStruct)) + let newBuffer = Builtin.autoDiffAllocateSubcontext(context, Builtin.sizeof(type(of: pbStruct))) + UnsafeMutableRawPointer(newBuffer).storeBytes(of: pbStruct, as: type(of: pbStruct)) +} + +// CHECK-LABEL: define{{.*}}@test_context_builtins() +// CHECK: entry: +// CHECK: [[CTX:%.*]] = call swiftcc %swift.refcounted* @swift_autoDiffCreateLinearMapContext({{i[0-9]+}} {{.*}}) +// CEHCK: call swiftcc i8* @swift_autoDiffProjectTopLevelSubcontext(%swift.refcounted* [[CTX]]) +// CHECK: [[BUF:%.*]] = call swiftcc i8* @swift_autoDiffAllocateSubcontext(%swift.refcounted* [[CTX]], {{i[0-9]+}} {{.*}}) diff --git a/test/AutoDiff/Parse/differentiable_attr_parse.swift b/test/AutoDiff/Parse/differentiable_attr_parse.swift index dacb9eeb3388a..ed225e12b63ed 100644 --- a/test/AutoDiff/Parse/differentiable_attr_parse.swift +++ b/test/AutoDiff/Parse/differentiable_attr_parse.swift @@ -92,6 +92,11 @@ func two(x: Float, y: Float) -> Float { return x + y } +@differentiable(wrt: $x) // ok +func two(x: Float, y: Float) -> Float { + return x + y +} + /// Bad // expected-error @+1 {{expected 'wrt:' or 'where'}} diff --git a/test/AutoDiff/SILGen/autodiff_builtins.swift b/test/AutoDiff/SILGen/autodiff_builtins.swift index f8f521c9d342f..f9806a1f7cf4d 100644 --- a/test/AutoDiff/SILGen/autodiff_builtins.swift +++ b/test/AutoDiff/SILGen/autodiff_builtins.swift @@ -152,3 +152,26 @@ func linearFunction_f_direct_arity1() -> @differentiable(linear) (Float) -> Floa // CHECK: [[THICK_ORIG2:%.*]] = thin_to_thick_function [[ORIG2]] : $@convention(thin) (Float) -> Float to $@callee_guaranteed (Float) -> Float // CHECK: [[LINEAR:%.*]] = linear_function [parameters 0] [[THICK_ORIG1]] : $@callee_guaranteed (Float) -> Float with_transpose [[THICK_ORIG2]] : $@callee_guaranteed (Float) -> Float // CHECK: return [[LINEAR]] : $@differentiable(linear) @callee_guaranteed (Float) -> Float + +struct ExamplePullbackStruct { + var pb0: (T.TangentVector) -> T.TangentVector +} + +@_silgen_name("test_context_builtins") +func test_context_builtins() { + let pbStruct = ExamplePullbackStruct(pb0: { $0 }) + let context = Builtin.autoDiffCreateLinearMapContext(Builtin.sizeof(type(of: pbStruct))) + let topLevelSubctxAddr = Builtin.autoDiffProjectTopLevelSubcontext(context) + UnsafeMutableRawPointer(topLevelSubctxAddr).storeBytes(of: pbStruct, as: type(of: pbStruct)) + let newBuffer = Builtin.autoDiffAllocateSubcontext(context, Builtin.sizeof(type(of: pbStruct))) + UnsafeMutableRawPointer(newBuffer).storeBytes(of: pbStruct, as: type(of: pbStruct)) +} + +// CHECK-LABEL: sil{{.*}}@test_context_builtins +// CHECK: bb0: +// CHECK: [[CTX:%.*]] = builtin "autoDiffCreateLinearMapContext"({{%.*}} : $Builtin.Word) : $Builtin.NativeObject +// CHECK: [[BORROWED_CTX:%.*]] = begin_borrow [[CTX]] : $Builtin.NativeObject +// CHECK: [[BUF:%.*]] = builtin "autoDiffProjectTopLevelSubcontext"([[BORROWED_CTX]] : $Builtin.NativeObject) : $Builtin.RawPointer +// CHECK: [[BORROWED_CTX:%.*]] = begin_borrow [[CTX]] : $Builtin.NativeObject +// CHECK: [[BUF:%.*]] = builtin "autoDiffAllocateSubcontext"([[BORROWED_CTX]] : $Builtin.NativeObject, {{.*}} : $Builtin.Word) : $Builtin.RawPointer +// CHECK: destroy_value [[CTX]] diff --git a/test/AutoDiff/SILOptimizer/activity_analysis.swift b/test/AutoDiff/SILOptimizer/activity_analysis.swift index 2a2fdcca7f650..5dc16cbf9263e 100644 --- a/test/AutoDiff/SILOptimizer/activity_analysis.swift +++ b/test/AutoDiff/SILOptimizer/activity_analysis.swift @@ -347,31 +347,29 @@ func testArrayUninitializedIntrinsicNested(_ x: Float, _ y: Float) -> [Float] { // CHECK: [VARIED] %11 = integer_literal $Builtin.Word, 1 // CHECK: [ACTIVE] %12 = index_addr %9 : $*Float, %11 : $Builtin.Word // CHECK: [NONE] // function_ref _finalizeUninitializedArray(_:) -// CHECK: [ACTIVE] %15 = apply %14(%7) : $@convention(thin) <τ_0_0> (@owned Array<τ_0_0>) -> @owned Array<τ_0_0> +// CHECK: [ACTIVE] [[ARRAY:%.*]] = apply %14(%7) : $@convention(thin) <τ_0_0> (@owned Array<τ_0_0>) -> @owned Array<τ_0_0> // CHECK: [USEFUL] %17 = integer_literal $Builtin.Word, 2 // CHECK: [NONE] // function_ref _allocateUninitializedArray(_:) // CHECK: [ACTIVE] %19 = apply %18(%17) : $@convention(thin) <τ_0_0> (Builtin.Word) -> (@owned Array<τ_0_0>, Builtin.RawPointer) // CHECK: [ACTIVE] (**%20**, %21) = destructure_tuple %19 : $(Array, Builtin.RawPointer) // CHECK: [VARIED] (%20, **%21**) = destructure_tuple %19 : $(Array, Builtin.RawPointer) // CHECK: [ACTIVE] %22 = pointer_to_address %21 : $Builtin.RawPointer to [strict] $*Float -// CHECK: [ACTIVE] %23 = begin_borrow %15 : $Array -// CHECK: [USEFUL] %24 = integer_literal $Builtin.IntLiteral, 0 -// CHECK: [USEFUL] %25 = metatype $@thin Int.Type +// CHECK: [USEFUL] %23 = integer_literal $Builtin.IntLiteral, 0 +// CHECK: [USEFUL] %24 = metatype $@thin Int.Type // CHECK: [NONE] // function_ref Int.init(_builtinIntegerLiteral:) -// CHECK: [USEFUL] %27 = apply %26(%24, %25) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int +// CHECK: [USEFUL] %26 = apply %25(%23, %24) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int // CHECK: [NONE] // function_ref Array.subscript.getter -// CHECK: [NONE] %29 = apply %28(%22, %27, %23) : $@convention(method) <τ_0_0> (Int, @guaranteed Array<τ_0_0>) -> @out τ_0_0 -// CHECK: [VARIED] %30 = integer_literal $Builtin.Word, 1 -// CHECK: [ACTIVE] %31 = index_addr %22 : $*Float, %30 : $Builtin.Word -// CHECK: [ACTIVE] %32 = begin_borrow %15 : $Array -// CHECK: [USEFUL] %33 = integer_literal $Builtin.IntLiteral, 1 -// CHECK: [USEFUL] %34 = metatype $@thin Int.Type +// CHECK: [NONE] %28 = apply %27(%22, %26, %15) : $@convention(method) <τ_0_0> (Int, @guaranteed Array<τ_0_0>) -> @out τ_0_0 +// CHECK: [VARIED] %29 = integer_literal $Builtin.Word, 1 +// CHECK: [ACTIVE] %30 = index_addr %22 : $*Float, %29 : $Builtin.Word +// CHECK: [USEFUL] %31 = integer_literal $Builtin.IntLiteral, 1 +// CHECK: [USEFUL] %32 = metatype $@thin Int.Type // CHECK: [NONE] // function_ref Int.init(_builtinIntegerLiteral:) -// CHECK: [USEFUL] %36 = apply %35(%33, %34) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int +// CHECK: [USEFUL] %34 = apply %33(%31, %32) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int // CHECK: [NONE] // function_ref Array.subscript.getter -// CHECK: [NONE] %38 = apply %37(%31, %36, %32) : $@convention(method) <τ_0_0> (Int, @guaranteed Array<τ_0_0>) -> @out τ_0_0 +// CHECK: [NONE] %36 = apply %35(%30, %34, %15) : $@convention(method) <τ_0_0> (Int, @guaranteed Array<τ_0_0>) -> @out τ_0_0 // CHECK: [NONE] // function_ref _finalizeUninitializedArray(_:) -// CHECK: [ACTIVE] %40 = apply %39(%20) : $@convention(thin) <τ_0_0> (@owned Array<τ_0_0>) -> @owned Array<τ_0_0> +// CHECK: [ACTIVE] %38 = apply %37(%20) : $@convention(thin) <τ_0_0> (@owned Array<τ_0_0>) -> @owned Array<τ_0_0> // TF-978: Test array literal initialized with `apply` indirect results. struct Wrapper: Differentiable { @@ -733,16 +731,13 @@ func testClassModifyAccessor(_ c: inout C) { // CHECK: [VARIED] %4 = load [copy] %3 : $*C // CHECK: [ACTIVE] %6 = begin_access [read] [static] %0 : $*C // CHECK: [VARIED] %7 = load [copy] %6 : $*C -// CHECK: [VARIED] %9 = begin_borrow %7 : $C -// CHECK: [VARIED] %10 = class_method %9 : $C, #C.float!getter : (C) -> () -> Float, $@convention(method) (@guaranteed C) -> Float -// CHECK: [VARIED] %11 = apply %10(%9) : $@convention(method) (@guaranteed C) -> Float -// CHECK: [VARIED] %14 = begin_borrow %4 : $C -// CHECK: [VARIED] %15 = class_method %14 : $C, #C.float!modify : (C) -> () -> (), $@yield_once @convention(method) (@guaranteed C) -> @yields @inout Float -// CHECK: [VARIED] (**%16**, %17) = begin_apply %15(%14) : $@yield_once @convention(method) (@guaranteed C) -> @yields @inout Float -// CHECK: [VARIED] (%16, **%17**) = begin_apply %15(%14) : $@yield_once @convention(method) (@guaranteed C) -> @yields @inout Float +// CHECK: [VARIED] %9 = class_method %7 : $C, #C.float!getter : (C) -> () -> Float, $@convention(method) (@guaranteed C) -> Float +// CHECK: [VARIED] %10 = apply %9(%7) : $@convention(method) (@guaranteed C) -> Float +// CHECK: [VARIED] %12 = class_method %4 : $C, #C.float!modify : (C) -> () -> (), $@yield_once @convention(method) (@guaranteed C) -> @yields @inout Float +// CHECK: [VARIED] (**%13**, %14) = begin_apply %12(%4) : $@yield_once @convention(method) (@guaranteed C) -> @yields @inout Float +// CHECK: [VARIED] (%13, **%14**) = begin_apply %12(%4) : $@yield_once @convention(method) (@guaranteed C) -> @yields @inout Float // CHECK: [NONE] // function_ref static Float.*= infix(_:_:) -// CHECK: [NONE] %19 = apply %18(%16, %11, %2) : $@convention(method) (@inout Float, Float, @thin Float.Type) -> () -// CHECK: [NONE] %23 = tuple () +// CHECK: [NONE] %16 = apply %15(%13, %10, %2) : $@convention(method) (@inout Float, Float, @thin Float.Type) -> () //===----------------------------------------------------------------------===// // Enum differentiation diff --git a/test/AutoDiff/SILOptimizer/derivative_sil.swift b/test/AutoDiff/SILOptimizer/derivative_sil.swift index b4442b6800b99..10eba87f65276 100644 --- a/test/AutoDiff/SILOptimizer/derivative_sil.swift +++ b/test/AutoDiff/SILOptimizer/derivative_sil.swift @@ -33,7 +33,6 @@ func foo(_ x: Float) -> Float { // CHECK-SIL: [[ADD_VJP_REF:%.*]] = differentiability_witness_function [vjp] [parameters 0 1] [results 0] @add // CHECK-SIL: [[ADD_DIFF_FN:%.*]] = differentiable_function [parameters 0 1] [results 0] [[ADD_ORIG_REF]] : $@convention(method) (Float, Float, @thin Float.Type) -> Float with_derivative {[[ADD_JVP_REF]] : $@convention(method) (Float, Float, @thin Float.Type) -> (Float, @owned @callee_guaranteed (Float, Float) -> Float), [[ADD_VJP_REF]] : $@convention(method) (Float, Float, @thin Float.Type) -> (Float, @owned @callee_guaranteed (Float) -> (Float, Float))} // CHECK-SIL: [[ADD_JVP_FN:%.*]] = differentiable_function_extract [jvp] [[ADD_DIFF_FN]] -// CHECK-SIL: end_borrow [[ADD_DIFF_FN]] // CHECK-SIL: [[ADD_RESULT:%.*]] = apply [[ADD_JVP_FN]]([[X]], [[X]], {{.*}}) // CHECK-SIL: ([[ORIG_RES:%.*]], [[ADD_DF:%.*]]) = destructure_tuple [[ADD_RESULT]] // CHECK-SIL: [[DF_STRUCT:%.*]] = struct $_AD__foo_bb0__DF__src_0_wrt_0 ([[ADD_DF]] : $@callee_guaranteed (Float, Float) -> Float) @@ -58,7 +57,6 @@ func foo(_ x: Float) -> Float { // CHECK-SIL: [[ADD_VJP_REF:%.*]] = differentiability_witness_function [vjp] [parameters 0 1] [results 0] @add // CHECK-SIL: [[ADD_DIFF_FN:%.*]] = differentiable_function [parameters 0 1] [results 0] [[ADD_ORIG_REF]] : $@convention(method) (Float, Float, @thin Float.Type) -> Float with_derivative {[[ADD_JVP_REF]] : $@convention(method) (Float, Float, @thin Float.Type) -> (Float, @owned @callee_guaranteed (Float, Float) -> Float), [[ADD_VJP_REF]] : $@convention(method) (Float, Float, @thin Float.Type) -> (Float, @owned @callee_guaranteed (Float) -> (Float, Float))} // CHECK-SIL: [[ADD_VJP_FN:%.*]] = differentiable_function_extract [vjp] [[ADD_DIFF_FN]] -// CHECK-SIL: end_borrow [[ADD_DIFF_FN]] // CHECK-SIL: [[ADD_RESULT:%.*]] = apply [[ADD_VJP_FN]]([[X]], [[X]], {{.*}}) // CHECK-SIL: ([[ORIG_RES:%.*]], [[ADD_PB:%.*]]) = destructure_tuple [[ADD_RESULT]] // CHECK-SIL: [[PB_STRUCT:%.*]] = struct $_AD__foo_bb0__PB__src_0_wrt_0 ([[ADD_PB]] : $@callee_guaranteed (Float) -> (Float, Float)) diff --git a/test/AutoDiff/SILOptimizer/differentiation_diagnostics.swift b/test/AutoDiff/SILOptimizer/differentiation_diagnostics.swift index 22d3fca6a17c3..a8e9ddcee5064 100644 --- a/test/AutoDiff/SILOptimizer/differentiation_diagnostics.swift +++ b/test/AutoDiff/SILOptimizer/differentiation_diagnostics.swift @@ -714,7 +714,7 @@ func modify(_ s: Struct, _ x: Float) -> Float { func tupleArrayLiteralInitialization(_ x: Float, _ y: Float) -> Float { // `Array<(Float, Float)>` does not conform to `Differentiable`. let array = [(x * y, x * y)] - // expected-note @+1 {{cannot differentiate through a non-differentiable argument; do you want to use 'withoutDerivative(at:)'?}} {{10-10=withoutDerivative(at: }} {{15-15=)}} + // expected-note @-1 {{cannot differentiate through a non-differentiable argument; do you want to use 'withoutDerivative(at:)'?}} {{15-15=withoutDerivative(at: }} {{31-31=)}} return array[0].0 } diff --git a/test/AutoDiff/SILOptimizer/generics.swift b/test/AutoDiff/SILOptimizer/generics.swift new file mode 100644 index 0000000000000..d58a806b863d9 --- /dev/null +++ b/test/AutoDiff/SILOptimizer/generics.swift @@ -0,0 +1,390 @@ +// RUN: %target-swift-emit-sil -verify %s | %FileCheck %s -check-prefix=CHECK-SIL + +import _Differentiation + +@_silgen_name("identity") +func identity(_ x: T) -> T { + return x +} +_ = gradient(at: Float(1), in: { x in identity(x) }) + +// Test PullbackCloner local buffer allocation. +// Verify that local buffers are immediately set to zero. + +// CHECK-SIL-LABEL: sil private @AD__identity__pullback_src_0_wrt_0_{{16_Differentiation|s}}14DifferentiableRzl +// CHECK-SIL: [[ORIG_COTAN:%.*]] = alloc_stack $τ_0_0.TangentVector +// CHECK-SIL-NEXT: [[ZERO_WITNESS:%.*]] = witness_method $τ_0_0.TangentVector, #AdditiveArithmetic.zero!getter +// CHECK-SIL-NEXT: [[ORIG_COTAN_METATYPE:%.*]] = metatype $@thick τ_0_0.TangentVector.Type +// CHECK-SIL-NEXT: [[EMIT_ZERO_INDIRECT:%.*]] = apply [[ZERO_WITNESS]]<τ_0_0.TangentVector>([[ORIG_COTAN]], [[ORIG_COTAN_METATYPE]]) +// CHECK-SIL: } + +// Test TF-201: differentiate direct references to generic function. +// This involves reabstraction thunk differentiation. + +_ = gradient(at: Float(1), in: identity) + +protocol DifferentiableAdditiveArithmetic: Differentiable & AdditiveArithmetic { + @differentiable + static func + (lhs: Self, rhs: Self) -> Self +} +extension Float: DifferentiableAdditiveArithmetic {} +func generic(_ x: T) -> T { + x + x + x +} +_ = gradient(at: Float(10), in: generic) + +struct Wrapper : Differentiable { + var value: Scalar + init(_ value: Scalar) { self.value = value } +} +func generic(_ x: Wrapper) -> T { + return x.value +} +_ = gradient(at: Wrapper(1), in: generic) + +func generic2(_ x: T, _ y: Float, _ z: U) -> T { + return x +} +func foo(_ x: Wrapper) { + _ = gradient(at: Float(1), 2, x, in: generic2) +} + +// Test case where associated derivative function's requirements are met. +extension Wrapper where Scalar : Numeric { + @differentiable(wrt: self where Scalar : Differentiable & FloatingPoint) + func mean() -> Wrapper { + return self + } + + @differentiable(wrt: self where Scalar : Differentiable & FloatingPoint) + func variance() -> Wrapper { + return mean() // ok + } +} +_ = pullback(at: Wrapper(1), in: { $0.variance() }) + +// Tests TF-277. +protocol Layer : Differentiable { + associatedtype Output : Differentiable +} +struct SupervisedTrainer { + var model: Model + var lossFunction: @differentiable (Model.Output, Model.Output) -> Float + func fit(y: Model.Output) { + _ = gradient(at: y) { y in return self.lossFunction(y, y) } + } +} + +// Tests TF-440. +struct TF_440_Input + : Differentiable { + var input: Input + var state: State +} +struct TF_440 { + @differentiable + func applied(to input: TF_440_Input) -> Float { + return input.state + } + + @differentiable + func applied(to input: TF_440_Input) -> Float { + return input.state + } + + @differentiable + func applied(to input: TF_440_Input) -> T { + return input.input + } +} + +// Tests TF-508: differentiation requirements with dependent member types. +protocol TF_508_Proto { + associatedtype Scalar +} +extension TF_508_Proto where Scalar : FloatingPoint { + @differentiable( + where Self : Differentiable, Scalar : Differentiable, + // Conformance requirement with dependent member type. + Self.TangentVector : TF_508_Proto + ) + static func +(lhs: Self, rhs: Self) -> Self { + return lhs + } + + @differentiable( + where Self : Differentiable, Scalar : Differentiable, + // Same-type requirement with dependent member type. + Self.TangentVector == Float + ) + static func -(lhs: Self, rhs: Self) -> Self { + return lhs + } +} +extension TF_508_Proto where Self : Differentiable, + Scalar : FloatingPoint & Differentiable, + Self.TangentVector : TF_508_Proto { + @derivative(of: +) + static func vjpAdd(lhs: Self, rhs: Self) + -> (value: Self, pullback: (TangentVector) -> (TangentVector, TangentVector)) { + return (lhs, { v in (v, v) }) + } +} +extension TF_508_Proto where Self : Differentiable, + Scalar : FloatingPoint & Differentiable, + Self.TangentVector == Float { + @derivative(of: -) + static func vjpSubtract(lhs: Self, rhs: Self) + -> (value: Self, pullback: (TangentVector) -> (TangentVector, TangentVector)) { + return (lhs, { v in (v, v) }) + } +} + +struct TF_508_Struct + : TF_508_Proto, AdditiveArithmetic {} +extension TF_508_Struct : Differentiable where Scalar : Differentiable { + typealias TangentVector = TF_508_Struct +} + +func TF_508() { + let x = TF_508_Struct() + // Test conformance requirement with dependent member type. + _ = pullback(at: x, in: { (x: TF_508_Struct) -> TF_508_Struct in + return x + x + }) + // Test same-type requirement with dependent member type. + _ = pullback(at: x, in: { (x: TF_508_Struct) -> TF_508_Struct in + return x - x + }) +} + +// TF-523 +struct TF_523_Struct : Differentiable & AdditiveArithmetic { + var a: Float = 1 + typealias TangentVector = TF_523_Struct +} + +@differentiable +func TF_523_f(_ x: TF_523_Struct) -> Float { + return x.a * 2 +} + +// TF-534: Thunk substitution map remapping. +protocol TF_534_Layer : Differentiable { + associatedtype Input : Differentiable + associatedtype Output : Differentiable + + @differentiable + func callAsFunction(_ input: Input) -> Output +} +struct TF_534_Tensor : Differentiable {} + +func TF_534( + _ model: inout Model, inputs: Model.Input +) -> TF_534_Tensor where Model.Output == TF_534_Tensor { + return valueWithPullback(at: model) { model -> Model.Output in + return model(inputs) + }.0 +} + +// TF-546: Test that SILGen linear map thunk performs correct reabstraction. +struct TF_546: AdditiveArithmetic { + var real: T + var imaginary: T + + @differentiable(where T: Differentiable, T == T.TangentVector) + init(real: T = 0, imaginary: T = 0) { + self.real = real + self.imaginary = imaginary + } +} +extension TF_546: Differentiable where T: Differentiable { + typealias TangentVector = TF_546 +} +extension TF_546 where T: Differentiable, T == T.TangentVector { + @derivative(of: init) + static func _vjpInit(real: T, imaginary: T) -> (value: TF_546, pullback: (TF_546) -> (T, T)) { + return (TF_546(real: real, imaginary: imaginary), { ($0.real, $0.imaginary) }) + } +} +let _: @differentiable(Float, Float) -> TF_546 = { r, i in + TF_546(real: r, imaginary: i) +} + +// TF-652: Test VJPCloner substitution map generic signature. +// The substitution map should have the VJP's generic signature, not the +// original function's. +struct TF_652 {} +extension TF_652 : Differentiable where Scalar : FloatingPoint {} + +@differentiable(wrt: x where Scalar: FloatingPoint) +func test(x: TF_652) -> TF_652 { + for _ in 0..<10 { + let _ = x + } + return x +} + +// TF-682: Test that SILGen linear map thunk performs correct reabstraction. +protocol TF_682_Proto { + associatedtype Scalar +} +extension TF_682_Proto where Scalar : FloatingPoint { + @differentiable( + where Self : Differentiable, Scalar : Differentiable, + // Same-type requirement with dependent member type. + Self.TangentVector == Float + ) + func foo(lhs: Self) -> Self { + return lhs + } +} +extension TF_682_Proto where Self : Differentiable, + Scalar : FloatingPoint & Differentiable, + Self.TangentVector == Float { + @derivative(of: foo) + func vjpFoo(lhs: Self) -> ( + value: Self, pullback: (TangentVector) -> (TangentVector, TangentVector) + ) { + return (lhs, { v in (v, v) }) + } +} + +// NOTE(TF-1208): Differentiation regression due to changes in curry thunk generation. +/* +// TF-688: Test generic curry thunk cloning. +public struct TF_688_Struct { + var x: Scalar +} +extension TF_688_Struct: Differentiable where Scalar: Differentiable { + @differentiable + public static func id(x: Self) -> Self { + return x + } +} +@differentiable(wrt: x) +public func TF_688( + _ x: TF_688_Struct, + reduction: @differentiable (TF_688_Struct) -> TF_688_Struct = TF_688_Struct.id +) -> TF_688_Struct { + reduction(x) +} +*/ + +// TF-697: Test generic requirements of generated derivative function. +protocol TF_697_Module: Differentiable { + associatedtype Input + associatedtype Output: Differentiable + + @differentiable(wrt: self) + func callModule(_ input: Input) -> Output +} +protocol TF_697_Layer: TF_697_Module where Input: Differentiable { + @differentiable + func callLayer(_ input: Input) -> Output +} +struct TF_697_Sequential: TF_697_Module + where Layer1.Output == Layer2.Input { + var layer1: Layer1 + var layer2: Layer2 + + @differentiable(wrt: self) + func callModule(_ input: Layer1.Input) -> Layer2.Output { + layer2.callLayer(layer1.callModule(input)) + } +} +extension TF_697_Sequential: TF_697_Layer where Layer1: TF_697_Layer { + @differentiable + func callLayer(_ input: Layer1.Input) -> Layer2.Output { + layer2.callLayer(layer1.callLayer(input)) + } +} + +// TF-817: Test remapping `apply` callee types in derivative function context. +struct TF_817 { + func foo(_ index: Int) -> T { + fatalError() + } +} +extension TF_817: Differentiable where T: Differentiable { + @derivative(of: foo) + func vjpFoo(index: Int) -> (value: T, pullback: (T.TangentVector) -> (TangentVector)) { + fatalError() + } +} +extension TF_817 { + @differentiable(wrt: self where T: Differentiable) + public func test(index: Int) -> T { + return self.foo(0) // crash happened here + } +} + +// TF-886: Test `partial_apply` of linear map subset parameters thunk. +@differentiable +func TF_886_foo(_: Float, _: T, _: U) -> Float { + return 0 +} +@differentiable +func TF_886_bar(x: Float, y: T) -> Float { + return TF_886_foo(x, y, 0) +} + +// Test layout requirements. + +// The layout requirement is "contextual": the requirement is not on `T`, the +// differentiable function parameter/result type. +struct ContextualLayoutRequirement { + var stored: T +} +extension ContextualLayoutRequirement { + func test(_ x: T) { + let _: @differentiable (T) -> T = { _ in self.stored } + let _: @differentiable (T) -> T = { $0 } + } +} +// The layout requirement directly involves `T`, the differentiable function +// parameter/result type. +// TODO(TF-851): Uncomment the tests below after `@differentiable` function +// SILGen thunking is fixed. +/* +struct LayoutRequirement { + var stored: T +} +extension LayoutRequirement { + func test(_ x: T) { + let _: @differentiable (T) -> T = { _ in self.stored } + let _: @differentiable (T) -> T = { $0 } + } +} +*/ + +// Test superclass requirements. + +class Super: Differentiable {} + +// The superclass requirement is "contextual": the requirement is not on `T`, +// the differentiable function parameter/result type. +struct ContextualSuperclassRequirement { + var stored: T +} +extension ContextualSuperclassRequirement { + func test(_ x: T) { + let _: @differentiable (T) -> T = { _ in self.stored } + let _: @differentiable (T) -> T = { $0 } + } +} +// The superclass requirement directly involves `T`, the differentiable +// function parameter/result type. +// TODO(TF-851): Uncomment the tests below after `@differentiable` function +// SILGen thunking is fixed. +/* +struct SuperclassRequirement { + var stored: T +} +extension SuperclassRequirement { + func test(_ x: T) { + let _: @differentiable (T) -> T = { _ in self.stored } + let _: @differentiable (T) -> T = { $0 } + } +} +*/ diff --git a/test/AutoDiff/Sema/DerivedConformances/derived_differentiable.swift b/test/AutoDiff/Sema/DerivedConformances/derived_differentiable.swift index e933855276a4b..d99cf0b775565 100644 --- a/test/AutoDiff/Sema/DerivedConformances/derived_differentiable.swift +++ b/test/AutoDiff/Sema/DerivedConformances/derived_differentiable.swift @@ -8,7 +8,7 @@ struct GenericTangentVectorMember: Differentiable, var x: T.TangentVector } -// CHECK-AST-LABEL: internal struct GenericTangentVectorMember : Differentiable, AdditiveArithmetic where T : Differentiable +// CHECK-AST-LABEL: internal struct GenericTangentVectorMember : {{(Differentiable, AdditiveArithmetic)|(AdditiveArithmetic, Differentiable)}} where T : Differentiable // CHECK-AST: internal var x: T.TangentVector // CHECK-AST: internal init(x: T.TangentVector) // CHECK-AST: internal typealias TangentVector = GenericTangentVectorMember @@ -62,7 +62,7 @@ final class AdditiveArithmeticClass: Add // CHECK-AST-LABEL: final internal class AdditiveArithmeticClass : AdditiveArithmetic, Differentiable where T : AdditiveArithmetic, T : Differentiable { // CHECK-AST: final internal var x: T, y: T -// CHECK-AST: internal struct TangentVector : Differentiable, AdditiveArithmetic +// CHECK-AST: internal struct TangentVector : {{(Differentiable, AdditiveArithmetic)|(AdditiveArithmetic, Differentiable)}} // CHECK-AST: } @frozen @@ -70,7 +70,7 @@ public struct FrozenStruct: Differentiable {} // CHECK-AST-LABEL: @frozen public struct FrozenStruct : Differentiable { // CHECK-AST: internal init() -// CHECK-AST: @frozen public struct TangentVector : Differentiable, AdditiveArithmetic { +// CHECK-AST: @frozen public struct TangentVector : {{(Differentiable, AdditiveArithmetic)|(AdditiveArithmetic, Differentiable)}} { @usableFromInline struct UsableFromInlineStruct: Differentiable {} @@ -79,7 +79,7 @@ struct UsableFromInlineStruct: Differentiable {} // CHECK-AST: struct UsableFromInlineStruct : Differentiable { // CHECK-AST: internal init() // CHECK-AST: @usableFromInline -// CHECK-AST: struct TangentVector : Differentiable, AdditiveArithmetic { +// CHECK-AST: struct TangentVector : {{(Differentiable, AdditiveArithmetic)|(AdditiveArithmetic, Differentiable)}} { // Test property wrappers. @@ -96,7 +96,7 @@ struct WrappedPropertiesStruct: Differentiable { } // CHECK-AST-LABEL: internal struct WrappedPropertiesStruct : Differentiable { -// CHECK-AST: internal struct TangentVector : Differentiable, AdditiveArithmetic { +// CHECK-AST: internal struct TangentVector : {{(Differentiable, AdditiveArithmetic)|(AdditiveArithmetic, Differentiable)}} { // CHECK-AST: internal var x: Float.TangentVector // CHECK-AST: internal var y: Float.TangentVector // CHECK-AST: internal var z: Float.TangentVector @@ -111,9 +111,48 @@ class WrappedPropertiesClass: Differentiable { } // CHECK-AST-LABEL: internal class WrappedPropertiesClass : Differentiable { -// CHECK-AST: internal struct TangentVector : Differentiable, AdditiveArithmetic { +// CHECK-AST: internal struct TangentVector : {{(Differentiable, AdditiveArithmetic)|(AdditiveArithmetic, Differentiable)}} { // CHECK-AST: internal var x: Float.TangentVector // CHECK-AST: internal var y: Float.TangentVector // CHECK-AST: internal var z: Float.TangentVector // CHECK-AST: } // CHECK-AST: } + +protocol TangentVectorMustBeEncodable: Differentiable where TangentVector: Encodable {} + +struct AutoDeriveEncodableTV1: TangentVectorMustBeEncodable { + var x: Float +} + +// CHECK-AST-LABEL: internal struct AutoDeriveEncodableTV1 : TangentVectorMustBeEncodable { +// CHECK-AST: internal struct TangentVector : {{(Encodable, Differentiable, AdditiveArithmetic)|(Encodable, AdditiveArithmetic, Differentiable)|(Differentiable, Encodable, AdditiveArithmetic)|(AdditiveArithmetic, Encodable, Differentiable)|(Differentiable, AdditiveArithmetic, Encodable)|(AdditiveArithmetic, Differentiable, Encodable)}} { + +struct AutoDeriveEncodableTV2 { + var x: Float +} + +extension AutoDeriveEncodableTV2: TangentVectorMustBeEncodable {} + +// CHECK-AST-LABEL: extension AutoDeriveEncodableTV2 : TangentVectorMustBeEncodable { +// CHECK-AST: internal struct TangentVector : {{(Encodable, Differentiable, AdditiveArithmetic)|(Encodable, AdditiveArithmetic, Differentiable)|(Differentiable, Encodable, AdditiveArithmetic)|(AdditiveArithmetic, Encodable, Differentiable)|(Differentiable, AdditiveArithmetic, Encodable)|(AdditiveArithmetic, Differentiable, Encodable)}} { + +protocol TangentVectorP: Differentiable { + var requirement: Int { get } +} + +protocol TangentVectorConstrained: Differentiable where TangentVector: TangentVectorP {} + +struct StructWithTangentVectorConstrained: TangentVectorConstrained { + var x: Float +} + +// `extension StructWithTangentVectorConstrained.TangentVector: TangentVectorP` gives +// "error: type 'StructWithTangentVectorConstrained.TangentVector' does not conform to protocol 'TangentVectorP'", +// maybe because it typechecks the conformance before seeing the extension. But this roundabout way +// of stating the same thing works. +extension TangentVectorP where Self == StructWithTangentVectorConstrained.TangentVector { + var requirement: Int { 42 } +} + +// CHECK-AST-LABEL: internal struct StructWithTangentVectorConstrained : TangentVectorConstrained { +// CHECK-AST: internal struct TangentVector : {{(TangentVectorP, Differentiable, AdditiveArithmetic)|(TangentVectorP, AdditiveArithmetic, Differentiable)|(Differentiable, TangentVectorP, AdditiveArithmetic)|(AdditiveArithmetic, TangentVectorP, Differentiable)|(Differentiable, AdditiveArithmetic, TangentVectorP)|(AdditiveArithmetic, Differentiable, TangentVectorP)}} { diff --git a/test/AutoDiff/Sema/DerivedConformances/derived_differentiable_diagnostics.swift b/test/AutoDiff/Sema/DerivedConformances/derived_differentiable_diagnostics.swift new file mode 100644 index 0000000000000..889f489e339c2 --- /dev/null +++ b/test/AutoDiff/Sema/DerivedConformances/derived_differentiable_diagnostics.swift @@ -0,0 +1,15 @@ +// RUN: %target-swift-frontend -typecheck -verify %s + +import _Differentiation + +protocol TangentVectorP: Differentiable { + // expected-note @+1 {{protocol requires property 'requirement' with type 'Int'; do you want to add a stub?}} + var requirement: Int { get } +} + +protocol TangentVectorConstrained: Differentiable where TangentVector: TangentVectorP {} + +struct StructWithTangentVectorConstrained: TangentVectorConstrained { + var x: Float +} +// expected-error @-1 {{type 'StructWithTangentVectorConstrained.TangentVector' does not conform to protocol 'TangentVectorP'}} diff --git a/test/AutoDiff/compiler_crashers_fixed/sr12493-differentiable-function-extract-subst-function-type.swift b/test/AutoDiff/compiler_crashers_fixed/sr12493-differentiable-function-extract-subst-function-type.swift index 359c1d2ddb845..885cd437a048d 100644 --- a/test/AutoDiff/compiler_crashers_fixed/sr12493-differentiable-function-extract-subst-function-type.swift +++ b/test/AutoDiff/compiler_crashers_fixed/sr12493-differentiable-function-extract-subst-function-type.swift @@ -3,7 +3,7 @@ // SR-12493: SIL verification error regarding substituted function types and // `differentiable_function_extract` instruction. Occurs only with `-O`. -// FIXME(SR-13021): Disabled due to flakiness on Linux. +// FIXME(SR-13021): Disabled due to flakiness on Linux, likely related to TF-1197. // REQUIRES: SR13021 import _Differentiation diff --git a/test/AutoDiff/compiler_crashers_fixed/sr12650-noderivative-parameter-type-mangling.swift b/test/AutoDiff/compiler_crashers_fixed/sr12650-noderivative-parameter-type-mangling.swift index 9258d97c063c0..3148112d60c55 100644 --- a/test/AutoDiff/compiler_crashers_fixed/sr12650-noderivative-parameter-type-mangling.swift +++ b/test/AutoDiff/compiler_crashers_fixed/sr12650-noderivative-parameter-type-mangling.swift @@ -3,7 +3,7 @@ // SR-12650: IRGenDebugInfo type reconstruction crash because `@noDerivative` // parameters are not mangled. -// FIXME(SR-13021): Disabled due to flakiness on Linux. +// FIXME(SR-13021): Disabled due to flakiness on Linux, likely related to TF-1197. // REQUIRES: SR13021 import _Differentiation diff --git a/test/AutoDiff/compiler_crashers_fixed/sr12732-optimize-partial-apply-convention-thin-only.swift b/test/AutoDiff/compiler_crashers_fixed/sr12732-optimize-partial-apply-convention-thin-only.swift index 32d8a1cdc8b7a..71b7bffab5af4 100644 --- a/test/AutoDiff/compiler_crashers_fixed/sr12732-optimize-partial-apply-convention-thin-only.swift +++ b/test/AutoDiff/compiler_crashers_fixed/sr12732-optimize-partial-apply-convention-thin-only.swift @@ -6,6 +6,9 @@ // Do not rewrite `partial_apply` to `thin_to_thick_function` if the specialized // callee is not `@convention(thin)`. +// FIXME(SR-13021): Disabled due to flakiness on Linux, likely related to TF-1197. +// REQUIRES: SR13021 + import DifferentiationUnittest func callback(_ x: inout Tracked.TangentVector) {} diff --git a/test/AutoDiff/compiler_crashers_fixed/sr13933-vjpcloner-apply-multiple-consuming-users.swift b/test/AutoDiff/compiler_crashers_fixed/sr13933-vjpcloner-apply-multiple-consuming-users.swift new file mode 100644 index 0000000000000..0789739df3403 --- /dev/null +++ b/test/AutoDiff/compiler_crashers_fixed/sr13933-vjpcloner-apply-multiple-consuming-users.swift @@ -0,0 +1,29 @@ +// RUN: %target-build-swift %s +// REQUIRES: asserts + +// SR-13933: Fix "multiple consuming users" ownership error caused by +// `VJPCloner::visitApply` related to `@differentiable`-function-typed callees. + +import _Differentiation + +protocol P: Differentiable { + associatedtype Assoc: Differentiable +} + +struct S { + var fn: @differentiable (T.Assoc, T.Assoc) -> Float + + func method(y: T.Assoc) { + _ = gradient(at: y) { y in return self.fn(y, y) } + } +} + +// Original error: +// Begin Error in Function: 'AD__$s4main1SV6method1yy5AssocQz_tFSfAGcfU___vjp_src_0_wrt_0_4main1PRzl' +// Found over consume?! +// Value: %5 = copy_value %4 : $@differentiable @callee_guaranteed @substituted <τ_0_0, τ_0_1> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> Float for <τ_0_0.Assoc, τ_0_0.Assoc> // users: %19, %6 +// User: %6 = convert_function %5 : $@differentiable @callee_guaranteed @substituted <τ_0_0, τ_0_1> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> Float for <τ_0_0.Assoc, τ_0_0.Assoc> to $@differentiable @callee_guaranteed (@in_guaranteed τ_0_0.Assoc, @in_guaranteed τ_0_0.Assoc) -> Float // user: %7 +// Block: bb0 +// Consuming Users: +// destroy_value %5 : $@differentiable @callee_guaranteed @substituted <τ_0_0, τ_0_1> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> Float for <τ_0_0.Assoc, τ_0_0.Assoc> // id: %19 +// %6 = convert_function %5 : $@differentiable @callee_guaranteed @substituted <τ_0_0, τ_0_1> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_1) -> Float for <τ_0_0.Assoc, τ_0_0.Assoc> to $@differentiable @callee_guaranteed (@in_guaranteed τ_0_0.Assoc, @in_guaranteed τ_0_0.Assoc) -> Float // user: %7 diff --git a/test/AutoDiff/stdlib/simd.swift b/test/AutoDiff/stdlib/simd.swift index 3bd76f04fff30..a60c81a7c09b1 100644 --- a/test/AutoDiff/stdlib/simd.swift +++ b/test/AutoDiff/stdlib/simd.swift @@ -1,6 +1,9 @@ // RUN: %target-run-simple-swift // REQUIRES: executable_test +// Would fail due to unavailability of swift_autoDiffCreateLinearMapContext. +// UNSUPPORTED: use_os_stdlib + import _Differentiation import StdlibUnittest diff --git a/test/AutoDiff/validation-test/address_only_tangentvector.swift b/test/AutoDiff/validation-test/address_only_tangentvector.swift index a29556e29cda6..7e48ac7d65318 100644 --- a/test/AutoDiff/validation-test/address_only_tangentvector.swift +++ b/test/AutoDiff/validation-test/address_only_tangentvector.swift @@ -1,6 +1,9 @@ // RUN: %target-run-simple-swift // REQUIRES: executable_test +// Would fail due to unavailability of swift_autoDiffCreateLinearMapContext. +// UNSUPPORTED: use_os_stdlib + import StdlibUnittest import DifferentiationUnittest diff --git a/test/AutoDiff/validation-test/array.swift b/test/AutoDiff/validation-test/array.swift index abc4f16bf086d..860e574d2be71 100644 --- a/test/AutoDiff/validation-test/array.swift +++ b/test/AutoDiff/validation-test/array.swift @@ -1,6 +1,9 @@ // RUN: %target-run-simple-swift // REQUIRES: executable_test +// Would fail due to unavailability of swift_autoDiffCreateLinearMapContext. +// UNSUPPORTED: use_os_stdlib + import StdlibUnittest import _Differentiation diff --git a/test/AutoDiff/validation-test/inout_parameters.swift b/test/AutoDiff/validation-test/inout_parameters.swift index 626546040a900..ecfc60e345af7 100644 --- a/test/AutoDiff/validation-test/inout_parameters.swift +++ b/test/AutoDiff/validation-test/inout_parameters.swift @@ -1,6 +1,9 @@ // RUN: %target-run-simple-swift // REQUIRES: executable_test +// Would fail due to unavailability of swift_autoDiffCreateLinearMapContext. +// UNSUPPORTED: use_os_stdlib + // `inout` parameter differentiation tests. import DifferentiationUnittest diff --git a/test/AutoDiff/validation-test/optional-property.swift b/test/AutoDiff/validation-test/optional-property.swift index 000433ffaf96c..fadd93b26e807 100644 --- a/test/AutoDiff/validation-test/optional-property.swift +++ b/test/AutoDiff/validation-test/optional-property.swift @@ -1,5 +1,5 @@ // RUN: %target-run-simple-swift -// RUN: %target-swift-emit-sil -Xllvm -debug-only=differentiation -o /dev/null 2>&1 %s | %FileCheck %s +// RUN: %target-swift-emit-sil -Xllvm -debug-only=differentiation -module-name null -o /dev/null 2>&1 %s | %FileCheck %s // REQUIRES: executable_test // REQUIRES: asserts diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 601cdb099a6a5..dfcace2593ee3 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,3 +1,7 @@ +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") + +include(SwiftTestUtils) + function(swift_configure_lit_site_cfg source_path destination_path installed_name) if (CMAKE_CFG_INTDIR STREQUAL ".") set(SWIFT_BUILD_MODE ".") @@ -198,266 +202,231 @@ foreach(SDK ${SWIFT_SDKS}) # and one with the the macCatalyst ios-macabi triple. The build_flavors list will # have have only the "default" flavor for all SDKs and architectures except # OSX when macCatalyst support is enabled. - set(build_flavors "default") - if(SWIFT_ENABLE_MACCATALYST AND "${SDK}" STREQUAL "OSX") - list(APPEND build_flavors "ios-like" ) - endif() + get_swift_test_build_flavors(build_flavors "${SDK}") foreach(BUILD_FLAVOR ${build_flavors}) - # Configure variables for this subdirectory. - set(VARIANT_SUFFIX "-${SWIFT_SDK_${SDK}_LIB_SUBDIR}-${ARCH}") - get_versioned_target_triple(VARIANT_TRIPLE ${SDK} ${ARCH} "${SWIFT_SDK_${SDK}_DEPLOYMENT_VERSION}") - set(VARIANT_SDK "${SWIFT_SDK_${SDK}_ARCH_${ARCH}_PATH}") - set(DEFAULT_OSX_VARIANT_SUFFIX "") - - if(BUILD_FLAVOR STREQUAL "ios-like") - set(DEFAULT_OSX_VARIANT_SUFFIX "${VARIANT_SUFFIX}") - # Use the macCatalyst target triple and compiler resources for the iOS-like build flavor. - set(VARIANT_SUFFIX "-${SWIFT_SDK_${SDK}_LIB_SUBDIR}-maccatalyst-${ARCH}") - set(VARIANT_TRIPLE "${ARCH}-apple-ios13.0-macabi") - endif() + # Configure variables for this subdirectory. + set(VARIANT_SDK "${SWIFT_SDK_${SDK}_ARCH_${ARCH}_PATH}") + get_swift_test_variant_suffix(VARIANT_SUFFIX "${SDK}" "${ARCH}" "${BUILD_FLAVOR}") + get_swift_test_variant_suffix(DEFAULT_OSX_VARIANT_SUFFIX "${SDK}" "${ARCH}" "default") + get_swift_test_versioned_target_triple(VARIANT_TRIPLE "${SDK}" "${ARCH}" "${SWIFT_SDK_${SDK}_DEPLOYMENT_VERSION}" "${BUILD_FLAVOR}") + + # A directory where to put the xUnit-style XML test results. + set(SWIFT_TEST_RESULTS_DIR + "${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/swift-test-results/${VARIANT_TRIPLE}") + + set(command_clean_test_results_dir + COMMAND "${CMAKE_COMMAND}" -E remove_directory "${SWIFT_TEST_RESULTS_DIR}" + COMMAND "${CMAKE_COMMAND}" -E make_directory "${SWIFT_TEST_RESULTS_DIR}") + + set(test_bin_dir "${CMAKE_CURRENT_BINARY_DIR}${VARIANT_SUFFIX}") + set(validation_test_bin_dir + "${CMAKE_CURRENT_BINARY_DIR}/../validation-test${VARIANT_SUFFIX}") + + if(LLVM_ENABLE_LIBXML2) + set(SWIFT_HAVE_LIBXML2 TRUE) + else() + set(SWIFT_HAVE_LIBXML2 FALSE) + endif() - # A directory where to put the xUnit-style XML test results. - set(SWIFT_TEST_RESULTS_DIR - "${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/swift-test-results/${VARIANT_TRIPLE}") + swift_configure_lit_site_cfg( + "${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in" + "${test_bin_dir}/lit.site.cfg" + "test${VARIANT_SUFFIX}.lit.site.cfg") - set(command_clean_test_results_dir - COMMAND "${CMAKE_COMMAND}" -E remove_directory "${SWIFT_TEST_RESULTS_DIR}" - COMMAND "${CMAKE_COMMAND}" -E make_directory "${SWIFT_TEST_RESULTS_DIR}") + swift_configure_lit_site_cfg( + "${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in" + "${test_bin_dir}/Unit/lit.site.cfg" + "") - set(test_bin_dir "${CMAKE_CURRENT_BINARY_DIR}${VARIANT_SUFFIX}") - set(validation_test_bin_dir - "${CMAKE_CURRENT_BINARY_DIR}/../validation-test${VARIANT_SUFFIX}") + swift_configure_lit_site_cfg( + "${CMAKE_CURRENT_SOURCE_DIR}/../validation-test/lit.site.cfg.in" + "${validation_test_bin_dir}/lit.site.cfg" + "validation-test${VARIANT_SUFFIX}.lit.site.cfg") - if(LLVM_ENABLE_LIBXML2) - set(SWIFT_HAVE_LIBXML2 TRUE) - else() - set(SWIFT_HAVE_LIBXML2 FALSE) - endif() + set(test_dependencies) + get_test_dependencies("${SDK}" test_dependencies) - swift_configure_lit_site_cfg( - "${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in" - "${test_bin_dir}/lit.site.cfg" - "test${VARIANT_SUFFIX}.lit.site.cfg") - - swift_configure_lit_site_cfg( - "${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in" - "${test_bin_dir}/Unit/lit.site.cfg" - "") - - swift_configure_lit_site_cfg( - "${CMAKE_CURRENT_SOURCE_DIR}/../validation-test/lit.site.cfg.in" - "${validation_test_bin_dir}/lit.site.cfg" - "validation-test${VARIANT_SUFFIX}.lit.site.cfg") - - set(test_dependencies) - get_test_dependencies("${SDK}" test_dependencies) - - # Keep in sync with stdlib/tools/CMakeLists.txt: swift-reflection-test is - # only used when testing dynamic stdlib. - if(SWIFT_BUILD_DYNAMIC_STDLIB AND SWIFT_INCLUDE_TESTS AND NOT SWIFTWASM_DISABLE_REFLECTION_TEST) - # NOTE create a stub BlocksRuntime library that can be used for the - # reflection tests - file(WRITE ${test_bin_dir}/Inputs/BlocksRuntime.c - "void -#if defined(_WIN32) -__declspec(dllexport) -#endif -_Block_release(void) { }\n") - _add_swift_target_library_single( - BlocksRuntimeStub${VARIANT_SUFFIX} - BlocksRuntimeStub - SHARED - ARCHITECTURE ${ARCH} - SDK ${SDK} - INSTALL_IN_COMPONENT dev - ${test_bin_dir}/Inputs/BlocksRuntime.c) - set_target_properties(BlocksRuntimeStub${VARIANT_SUFFIX} PROPERTIES - ARCHIVE_OUTPUT_DIRECTORY ${test_bin_dir} - LIBRARY_OUTPUT_DIRECTORY ${test_bin_dir} - RUNTIME_OUTPUT_DIRECTORY ${test_bin_dir} - OUTPUT_NAME BlocksRuntime) - list(APPEND test_dependencies BlocksRuntimeStub${VARIANT_SUFFIX}) - - list(APPEND test_dependencies - "swift-test-stdlib-${SWIFT_SDK_${SDK}_LIB_SUBDIR}") - - if(BUILD_FLAVOR STREQUAL "ios-like") - # When testing the iOS-like build flavor, use the the normal macOS - # swift-reflection-test-tool. That tool runs out of process so it - # doesn't need to be build for macCatalyst. - list(APPEND test_dependencies - "swift-reflection-test${DEFAULT_OSX_VARIANT_SUFFIX}") + # Keep in sync with stdlib/tools/CMakeLists.txt: swift-reflection-test is + # only used when testing dynamic stdlib. + if(SWIFT_BUILD_DYNAMIC_STDLIB AND SWIFT_INCLUDE_TESTS) + list(APPEND test_dependencies BlocksRuntimeStub${VARIANT_SUFFIX}) - endif() - - if(NOT SWIFTWASM_DISABLE_REFLECTION_TEST) - # wasm: Avoid to build swift-reflection-test because it uses unsupported linker flags for wasm-ld list(APPEND test_dependencies - "swift-reflection-test${VARIANT_SUFFIX}_signed") - endif() - endif() - - if(NOT "${COVERAGE_DB}" STREQUAL "") - list(APPEND test_dependencies "touch-covering-tests") - endif() - - set(validation_test_dependencies - "swiftStdlibCollectionUnittest-${SWIFT_SDK_${SDK}_LIB_SUBDIR}" - "swiftStdlibUnicodeUnittest-${SWIFT_SDK_${SDK}_LIB_SUBDIR}") - - set(command_upload_stdlib) - set(command_upload_swift_reflection_test) - if("${SDK}" STREQUAL "IOS" OR "${SDK}" STREQUAL "TVOS" OR "${SDK}" STREQUAL "WATCHOS") - # These are supported testing SDKs, but their implementation of - # `command_upload_stdlib` is hidden. - elseif("${SDK}" STREQUAL "ANDROID" AND NOT "${SWIFT_HOST_VARIANT}" STREQUAL "android") - # This adb setup is only needed when cross-compiling for Android, so the - # second check above makes sure we don't bother when the host is Android. - if("${SWIFT_ANDROID_DEPLOY_DEVICE_PATH}" STREQUAL "") - message(FATAL_ERROR - "When running Android host tests, you must specify the directory on the device " - "to which Swift build products will be deployed.") - endif() - - # Warning: This step will fail if you do not have an Android device - # connected via USB. See docs/Android.md for details on - # how to run the test suite for Android. - set(command_upload_stdlib - COMMAND - # Reboot the device and remove everything in its tmp - # directory. Build products and test executables are pushed - # to that directory when running the test suite. - $ "${SWIFT_SOURCE_DIR}/utils/android/adb_clean.py" - COMMAND - $ "${SWIFT_SOURCE_DIR}/utils/android/adb_push_built_products.py" - --ndk "${SWIFT_ANDROID_NDK_PATH}" - --destination "${SWIFT_ANDROID_DEPLOY_DEVICE_PATH}" - --destination-arch "${ARCH}" - # Build products like libswiftCore.so. - "${SWIFTLIB_DIR}/android" - # These two directories may contain the same libraries, - # but upload both to device just in case. Duplicates will be - # overwritten, and uploading doesn't take very long anyway. - "${SWIFT_ANDROID_${ARCH}_ICU_UC}" - "${SWIFT_ANDROID_${ARCH}_ICU_I18N}" - "${SWIFT_ANDROID_${ARCH}_ICU_DATA}") - endif() - add_custom_target("upload-stdlib${VARIANT_SUFFIX}" - ${command_upload_stdlib} - ${command_upload_swift_reflection_test} - COMMENT "Uploading stdlib") - - foreach(test_mode ${TEST_MODES}) - set(LIT_ARGS "${SWIFT_LIT_ARGS} ${LLVM_LIT_ARGS}") - separate_arguments(LIT_ARGS) - - if(NOT SWIFT_BUILD_STDLIB AND NOT SWIFT_PATH_TO_EXTERNAL_STDLIB_BUILD) - list(APPEND LIT_ARGS - "--param" "test_sdk_overlay_dir=${SWIFTLIB_DIR}/${SWIFT_SDK_${SDK}_LIB_SUBDIR}") + "swift-test-stdlib-${SWIFT_SDK_${SDK}_LIB_SUBDIR}") + + if(BUILD_FLAVOR STREQUAL "ios-like") + # When testing the iOS-like build flavor, use the the normal macOS + # swift-reflection-test-tool. That tool runs out of process so it + # doesn't need to be build for macCatalyst. + list(APPEND test_dependencies + "swift-reflection-test${DEFAULT_OSX_VARIANT_SUFFIX}") + else() + list(APPEND test_dependencies + "swift-reflection-test${VARIANT_SUFFIX}_signed") + endif() endif() - execute_process(COMMAND - $ "-c" "import psutil" - RESULT_VARIABLE python_psutil_status - TIMEOUT 1 # second - ERROR_QUIET) - if(NOT python_psutil_status) - list(APPEND LIT_ARGS "--timeout=3000") # 50 minutes + if(NOT "${COVERAGE_DB}" STREQUAL "") + list(APPEND test_dependencies "touch-covering-tests") endif() - list(APPEND LIT_ARGS "--xunit-xml-output=${SWIFT_TEST_RESULTS_DIR}/lit-tests.xml") - - if(SWIFT_ENABLE_EXPERIMENTAL_DIFFERENTIABLE_PROGRAMMING) - list(APPEND LIT_ARGS "--param" "differentiable_programming") - endif() + set(validation_test_dependencies + "swiftStdlibCollectionUnittest-${SWIFT_SDK_${SDK}_LIB_SUBDIR}" + "swiftStdlibUnicodeUnittest-${SWIFT_SDK_${SDK}_LIB_SUBDIR}") + + set(command_upload_stdlib) + set(command_upload_swift_reflection_test) + if("${SDK}" STREQUAL "IOS" OR "${SDK}" STREQUAL "TVOS" OR "${SDK}" STREQUAL "WATCHOS") + # These are supported testing SDKs, but their implementation of + # `command_upload_stdlib` is hidden. + elseif("${SDK}" STREQUAL "ANDROID" AND NOT "${SWIFT_HOST_VARIANT}" STREQUAL "android") + # This adb setup is only needed when cross-compiling for Android, so the + # second check above makes sure we don't bother when the host is Android. + if("${SWIFT_ANDROID_DEPLOY_DEVICE_PATH}" STREQUAL "") + message(FATAL_ERROR + "When running Android host tests, you must specify the directory on the device " + "to which Swift build products will be deployed.") + endif() - if(SWIFT_ENABLE_EXPERIMENTAL_CONCURRENCY) - list(APPEND LIT_ARGS "--param" "concurrency") + # Warning: This step will fail if you do not have an Android device + # connected via USB. See docs/Android.md for details on + # how to run the test suite for Android. + set(command_upload_stdlib + COMMAND + # Reboot the device and remove everything in its tmp + # directory. Build products and test executables are pushed + # to that directory when running the test suite. + $ "${SWIFT_SOURCE_DIR}/utils/android/adb_clean.py" + COMMAND + $ "${SWIFT_SOURCE_DIR}/utils/android/adb_push_built_products.py" + --ndk "${SWIFT_ANDROID_NDK_PATH}" + --destination "${SWIFT_ANDROID_DEPLOY_DEVICE_PATH}" + --destination-arch "${ARCH}" + # Build products like libswiftCore.so. + "${SWIFTLIB_DIR}/android" + # These two directories may contain the same libraries, + # but upload both to device just in case. Duplicates will be + # overwritten, and uploading doesn't take very long anyway. + "${SWIFT_ANDROID_${ARCH}_ICU_UC}" + "${SWIFT_ANDROID_${ARCH}_ICU_I18N}" + "${SWIFT_ANDROID_${ARCH}_ICU_DATA}") endif() - - foreach(test_subset ${TEST_SUBSETS}) - set(directories) - set(dependencies ${test_dependencies}) - - if((test_subset STREQUAL "primary") OR - (test_subset STREQUAL "validation") OR - (test_subset STREQUAL "only_long") OR - (test_subset STREQUAL "only_stress") OR - (test_subset STREQUAL "all")) - list(APPEND directories "${test_bin_dir}") - endif() - if((test_subset STREQUAL "validation") OR - (test_subset STREQUAL "only_validation") OR - (test_subset STREQUAL "only_long") OR - (test_subset STREQUAL "only_stress") OR - (test_subset STREQUAL "all")) - list(APPEND directories "${validation_test_bin_dir}") - list(APPEND dependencies ${validation_test_dependencies}) + add_custom_target("upload-stdlib${VARIANT_SUFFIX}" + ${command_upload_stdlib} + ${command_upload_swift_reflection_test} + COMMENT "Uploading stdlib") + + foreach(test_mode ${TEST_MODES}) + set(LIT_ARGS "${SWIFT_LIT_ARGS} ${LLVM_LIT_ARGS}") + separate_arguments(LIT_ARGS) + + if(NOT SWIFT_BUILD_STDLIB AND NOT SWIFT_PATH_TO_EXTERNAL_STDLIB_BUILD) + list(APPEND LIT_ARGS + "--param" "test_sdk_overlay_dir=${SWIFTLIB_DIR}/${SWIFT_SDK_${SDK}_LIB_SUBDIR}") endif() - if("${SWIFT_SDK_${SDK}_OBJECT_FORMAT}" STREQUAL "ELF") - list(APPEND dependencies swiftImageRegistration${VARIANT_SUFFIX}) + execute_process(COMMAND + $ "-c" "import psutil" + RESULT_VARIABLE python_psutil_status + TIMEOUT 1 # second + ERROR_QUIET) + if(NOT python_psutil_status) + list(APPEND LIT_ARGS "--timeout=3000") # 50 minutes endif() - set(test_subset_target_suffix "-${test_subset}") - if(test_subset STREQUAL "primary") - set(test_subset_target_suffix "") - endif() + list(APPEND LIT_ARGS "--xunit-xml-output=${SWIFT_TEST_RESULTS_DIR}/lit-tests.xml") - set(test_mode_target_suffix "") - if(NOT test_mode STREQUAL "optimize_none") - set(test_mode_target_suffix "-${test_mode}") + if(SWIFT_ENABLE_EXPERIMENTAL_DIFFERENTIABLE_PROGRAMMING) + list(APPEND LIT_ARGS "--param" "differentiable_programming") endif() - set(maybe_command_upload_stdlib) - if(NOT test_mode STREQUAL "only_non_executable") - set(maybe_command_upload_stdlib ${command_upload_stdlib}) + if(SWIFT_ENABLE_EXPERIMENTAL_CONCURRENCY) + list(APPEND LIT_ARGS "--param" "concurrency") endif() - set(test_target_name - "check-swift${test_subset_target_suffix}${test_mode_target_suffix}${VARIANT_SUFFIX}") - add_custom_target("${test_target_name}" - ${maybe_command_upload_stdlib} - ${command_upload_swift_reflection_test} - ${command_clean_test_results_dir} - COMMAND - $ "${LIT}" - ${LIT_ARGS} - "--param" "swift_test_subset=${test_subset}" - "--param" "swift_test_mode=${test_mode}" - ${directories} - DEPENDS ${dependencies} - COMMENT "Running ${test_subset} Swift tests for ${VARIANT_TRIPLE}" - USES_TERMINAL) - - set(test_dependencies_target_name - "swift${test_subset_target_suffix}${test_mode_target_suffix}${VARIANT_SUFFIX}-test-depends") - add_custom_target("${test_dependencies_target_name}" - DEPENDS ${dependencies}) - - add_custom_target("${test_target_name}-custom" - ${command_upload_stdlib} - ${command_upload_swift_reflection_test} - ${command_clean_test_results_dir} - COMMAND - $ "${LIT}" - ${LIT_ARGS} - "--param" "swift_test_subset=${test_subset}" - "--param" "swift_test_mode=${test_mode}" - ${SWIFT_LIT_TEST_PATHS} - DEPENDS ${dependencies} - COMMENT "Running ${test_subset} Swift tests for ${VARIANT_TRIPLE} from custom test locations" - USES_TERMINAL) - set_property(TARGET - "${test_target_name}" - "${test_target_name}-custom" - "${test_dependencies_target_name}" - PROPERTY FOLDER "Tests/check-swift") + foreach(test_subset ${TEST_SUBSETS}) + set(directories) + set(dependencies ${test_dependencies}) + + if((test_subset STREQUAL "primary") OR + (test_subset STREQUAL "validation") OR + (test_subset STREQUAL "only_long") OR + (test_subset STREQUAL "only_stress") OR + (test_subset STREQUAL "all")) + list(APPEND directories "${test_bin_dir}") + endif() + if((test_subset STREQUAL "validation") OR + (test_subset STREQUAL "only_validation") OR + (test_subset STREQUAL "only_long") OR + (test_subset STREQUAL "only_stress") OR + (test_subset STREQUAL "all")) + list(APPEND directories "${validation_test_bin_dir}") + list(APPEND dependencies ${validation_test_dependencies}) + endif() + + if("${SWIFT_SDK_${SDK}_OBJECT_FORMAT}" STREQUAL "ELF") + list(APPEND dependencies swiftImageRegistration${VARIANT_SUFFIX}) + endif() + + set(test_subset_target_suffix "-${test_subset}") + if(test_subset STREQUAL "primary") + set(test_subset_target_suffix "") + endif() + + set(test_mode_target_suffix "") + if(NOT test_mode STREQUAL "optimize_none") + set(test_mode_target_suffix "-${test_mode}") + endif() + + set(maybe_command_upload_stdlib) + if(NOT test_mode STREQUAL "only_non_executable") + set(maybe_command_upload_stdlib ${command_upload_stdlib}) + endif() + + set(test_target_name + "check-swift${test_subset_target_suffix}${test_mode_target_suffix}${VARIANT_SUFFIX}") + add_custom_target("${test_target_name}" + ${maybe_command_upload_stdlib} + ${command_upload_swift_reflection_test} + ${command_clean_test_results_dir} + COMMAND + $ "${LIT}" + ${LIT_ARGS} + "--param" "swift_test_subset=${test_subset}" + "--param" "swift_test_mode=${test_mode}" + ${directories} + DEPENDS ${dependencies} + COMMENT "Running ${test_subset} Swift tests for ${VARIANT_TRIPLE}" + USES_TERMINAL) + + set(test_dependencies_target_name + "swift${test_subset_target_suffix}${test_mode_target_suffix}${VARIANT_SUFFIX}-test-depends") + add_custom_target("${test_dependencies_target_name}" + DEPENDS ${dependencies}) + + add_custom_target("${test_target_name}-custom" + ${command_upload_stdlib} + ${command_upload_swift_reflection_test} + ${command_clean_test_results_dir} + COMMAND + $ "${LIT}" + ${LIT_ARGS} + "--param" "swift_test_subset=${test_subset}" + "--param" "swift_test_mode=${test_mode}" + ${SWIFT_LIT_TEST_PATHS} + DEPENDS ${dependencies} + COMMENT "Running ${test_subset} Swift tests for ${VARIANT_TRIPLE} from custom test locations" + USES_TERMINAL) + set_property(TARGET + "${test_target_name}" + "${test_target_name}-custom" + "${test_dependencies_target_name}" + PROPERTY FOLDER "Tests/check-swift") + endforeach() endforeach() endforeach() endforeach() - endforeach() endforeach() # Add shortcuts for the default variant. diff --git a/test/ClangImporter/Dispatch_test.swift b/test/ClangImporter/Dispatch_test.swift index b3d0cd01dc561..9a610d376616a 100644 --- a/test/ClangImporter/Dispatch_test.swift +++ b/test/ClangImporter/Dispatch_test.swift @@ -15,7 +15,8 @@ func test2(_ queue: DispatchQueue) { // Make sure the dispatch types are actually distinct types! let _ = queue as DispatchSource // expected-error {{cannot convert value of type 'DispatchQueue' to type 'DispatchSource' in coercion}} - let _ = base as DispatchSource // expected-error {{'NSObjectProtocol' is not convertible to 'DispatchSource'; did you mean to use 'as!' to force downcast?}} + let _ = base as DispatchSource // expected-error {{'NSObjectProtocol' is not convertible to 'DispatchSource'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{16-18=as!}} } extension dispatch_queue_t {} // expected-error {{'dispatch_queue_t' is unavailable}} diff --git a/test/ClangImporter/Security_test.swift b/test/ClangImporter/Security_test.swift index e2ca596fb9a25..247d70beb34db 100644 --- a/test/ClangImporter/Security_test.swift +++ b/test/ClangImporter/Security_test.swift @@ -6,7 +6,8 @@ import Security _ = kSecClass as CFString _ = kSecClassGenericPassword as CFString -_ = kSecClassGenericPassword as CFDictionary // expected-error {{'CFString?' is not convertible to 'CFDictionary'}} {{30-32=as!}} +_ = kSecClassGenericPassword as CFDictionary // expected-error {{'CFString?' is not convertible to 'CFDictionary'}} +// expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{30-32=as!}} func testIntegration() { // Based on code in . diff --git a/test/ClangImporter/objc_async.swift b/test/ClangImporter/objc_async.swift index f720aa10834e9..d158b45b2d9bd 100644 --- a/test/ClangImporter/objc_async.swift +++ b/test/ClangImporter/objc_async.swift @@ -8,8 +8,17 @@ import ObjCConcurrency func testSlowServer(slowServer: SlowServer) async throws { let _: Int = await slowServer.doSomethingSlow("mail") let _: Bool = await slowServer.checkAvailability() - let _: String = try await slowServer.findAnswer() + let _: String = await try slowServer.findAnswer() let _: String = await try slowServer.findAnswerFailingly() + + let (aOpt, b) = await try slowServer.findQAndA() + if let a = aOpt { // make sure aOpt is optional + print(a) + } + let _: String = b // make sure b is non-optional + + let _: String = await try slowServer.findAnswer() + let _: Void = await slowServer.doSomethingFun("jump") let _: (Int) -> Void = slowServer.completionHandler @@ -24,14 +33,19 @@ func testSlowServer(slowServer: SlowServer) async throws { let _: Int = await try slowServer.magicNumber(withSeed: 42) await slowServer.serverRestart("localhost") - await slowServer.server("localhost", atPriorityRestart: 0.8) + await slowServer.serverRestart("localhost", atPriority: 0.8) _ = await slowServer.allOperations() + + let _: Int = await slowServer.bestName("hello") + let _: Int = await slowServer.customize("hello") } func testSlowServerSynchronous(slowServer: SlowServer) { // synchronous version let _: Int = slowServer.doSomethingConflicted("thinking") + slowServer.poorlyNamed("hello") { (i: Int) in print(i) } + slowServer.customize(with: "hello") { (i: Int) in print(i) } } func testSlowServerOldSchool(slowServer: SlowServer) { diff --git a/test/ClangImporter/objc_bridging_custom.swift b/test/ClangImporter/objc_bridging_custom.swift index 86690621d521e..f7f193be475e5 100644 --- a/test/ClangImporter/objc_bridging_custom.swift +++ b/test/ClangImporter/objc_bridging_custom.swift @@ -218,11 +218,16 @@ func testExplicitConversion(objc: APPManufacturerInfo, swift: ManufacturerInfo) { // Bridging to Swift let _ = objc as ManufacturerInfo - let _ = objc as ManufacturerInfo // expected-error{{'APPManufacturerInfo' is not convertible to 'ManufacturerInfo'; did you mean to use 'as!' to force downcast?}} - let _ = objc as ManufacturerInfo // expected-error{{'APPManufacturerInfo' is not convertible to 'ManufacturerInfo'; did you mean to use 'as!' to force downcast?}} + let _ = objc as ManufacturerInfo // expected-error{{'APPManufacturerInfo' is not convertible to 'ManufacturerInfo'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{16-18=as!}} + let _ = objc as ManufacturerInfo // expected-error{{'APPManufacturerInfo' is not convertible to 'ManufacturerInfo'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{16-18=as!}} // Bridging to Objective-C let _ = swift as APPManufacturerInfo - let _ = swift as APPManufacturerInfo // expected-error{{'ManufacturerInfo' is not convertible to 'APPManufacturerInfo'; did you mean to use 'as!' to force downcast?}} - let _ = swift as APPManufacturerInfo // expected-error{{'ManufacturerInfo' is not convertible to 'APPManufacturerInfo'; did you mean to use 'as!' to force downcast?}} + let _ = swift as APPManufacturerInfo // expected-error{{'ManufacturerInfo' is not convertible to 'APPManufacturerInfo'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{17-19=as!}} + let _ = swift as APPManufacturerInfo // expected-error{{'ManufacturerInfo' is not convertible to 'APPManufacturerInfo'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{17-19=as!}} + } diff --git a/test/ClangImporter/objc_parse.swift b/test/ClangImporter/objc_parse.swift index a39a815378b99..d8751da548a99 100644 --- a/test/ClangImporter/objc_parse.swift +++ b/test/ClangImporter/objc_parse.swift @@ -540,10 +540,12 @@ func testStrangeSelectors(obj: StrangeSelectors) { func testProtocolQualified(_ obj: CopyableNSObject, cell: CopyableSomeCell, plainObj: NSObject, plainCell: SomeCell) { - _ = obj as NSObject // expected-error {{'CopyableNSObject' (aka 'NSCopying & NSObjectProtocol') is not convertible to 'NSObject'; did you mean to use 'as!' to force downcast?}} {{11-13=as!}} + _ = obj as NSObject // expected-error {{'CopyableNSObject' (aka 'NSCopying & NSObjectProtocol') is not convertible to 'NSObject'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{11-13=as!}} _ = obj as NSObjectProtocol _ = obj as NSCopying - _ = obj as SomeCell // expected-error {{'CopyableNSObject' (aka 'NSCopying & NSObjectProtocol') is not convertible to 'SomeCell'; did you mean to use 'as!' to force downcast?}} {{11-13=as!}} + _ = obj as SomeCell // expected-error {{'CopyableNSObject' (aka 'NSCopying & NSObjectProtocol') is not convertible to 'SomeCell'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{11-13=as!}} _ = cell as NSObject _ = cell as NSObjectProtocol diff --git a/test/ClangImporter/unserializable-clang-function-types.swift b/test/ClangImporter/unserializable-clang-function-types.swift deleted file mode 100644 index 7332e437876cd..0000000000000 --- a/test/ClangImporter/unserializable-clang-function-types.swift +++ /dev/null @@ -1,6 +0,0 @@ -// RUN: %target-swift-frontend -typecheck -swift-version 5 -emit-module-interface-path - -sdk %clang-importer-sdk -enable-library-evolution %s -experimental-print-full-convention -verify - -import ctypes - -public var f1 : UnserializableFunctionPointer? -// expected-error@-1 {{cannot export the underlying C type of the function type 'UnserializableFunctionPointer' (aka '@convention(c) () -> Optional'); it may use anonymous types or types defined outside of a module}} diff --git a/test/Concurrency/Runtime/async_task_priority_basic.swift b/test/Concurrency/Runtime/async_task_priority_basic.swift index 7c1510b77fbf5..fbe823a805615 100644 --- a/test/Concurrency/Runtime/async_task_priority_basic.swift +++ b/test/Concurrency/Runtime/async_task_priority_basic.swift @@ -1,10 +1,16 @@ -// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency) | %FileCheck %s --dump-input always +// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency %import-libdispatch) | %FileCheck %s --dump-input always // REQUIRES: executable_test // REQUIRES: concurrency -// REQUIRES: OS=macosx +// REQUIRES: libdispatch import Dispatch +#if canImport(Darwin) +import Darwin +#elseif canImport(Glibc) +import Glibc +#endif + // ==== ------------------------------------------------------------------------ // MARK: "Infrastructure" for the tests diff --git a/test/Concurrency/Runtime/basic_future.swift b/test/Concurrency/Runtime/basic_future.swift index dc72c89e2784e..b95793f86bcc9 100644 --- a/test/Concurrency/Runtime/basic_future.swift +++ b/test/Concurrency/Runtime/basic_future.swift @@ -1,12 +1,17 @@ -// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency) +// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency %import-libdispatch) // REQUIRES: executable_test // REQUIRES: concurrency -// REQUIRES: OS=macosx -// XFAIL: CPU=arm64e +// REQUIRES: libdispatch import Dispatch +#if canImport(Darwin) +import Darwin +#elseif canImport(Glibc) +import Glibc +#endif + extension DispatchQueue { func async(execute: @escaping () async throws -> R) -> Task.Handle { let handle = Task.runDetached(operation: execute) diff --git a/test/Concurrency/Runtime/future_fibonacci.swift b/test/Concurrency/Runtime/future_fibonacci.swift index f7296800be529..12d4294fb112c 100644 --- a/test/Concurrency/Runtime/future_fibonacci.swift +++ b/test/Concurrency/Runtime/future_fibonacci.swift @@ -1,12 +1,17 @@ -// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency) +// RUN: %target-run-simple-swift(-Xfrontend -enable-experimental-concurrency %import-libdispatch) // REQUIRES: executable_test // REQUIRES: concurrency -// REQUIRES: OS=macosx -// XFAIL: CPU=arm64e +// REQUIRES: libdispatch import Dispatch +#if canImport(Darwin) +import Darwin +#elseif canImport(Glibc) +import Glibc +#endif + extension DispatchQueue { func async(execute: @escaping () async throws -> R) -> Task.Handle { let handle = Task.runDetached(operation: execute) diff --git a/test/Concurrency/actor_call_implicitly_async.swift b/test/Concurrency/actor_call_implicitly_async.swift index f70743362e49e..e7211aff66091 100644 --- a/test/Concurrency/actor_call_implicitly_async.swift +++ b/test/Concurrency/actor_call_implicitly_async.swift @@ -184,4 +184,34 @@ func blender(_ peeler : () -> Void) { @OrangeActor func quinoa() async { rice() // expected-error {{call is 'async' but is not marked with 'await'}} +} + +/////////// +// check various curried applications to ensure we mark the right expression. + +actor class Calculator { + func addCurried(_ x : Int) -> ((Int) -> Int) { + return { (_ y : Int) in x + y } + } + + func add(_ x : Int, _ y : Int) -> Int { + return x + y + } +} + +@BananaActor func bananaAdd(_ x : Int) -> ((Int) -> Int) { + return { (_ y : Int) in x + y } +} + +@OrangeActor func doSomething() async { + let _ = (await bananaAdd(1))(2) + let _ = await (await bananaAdd(1))(2) // expected-warning{{no calls to 'async' functions occur within 'await' expression}} + + let calc = Calculator() + + let _ = (await calc.addCurried(1))(2) + let _ = await (await calc.addCurried(1))(2) // expected-warning{{no calls to 'async' functions occur within 'await' expression}} + + let plusOne = await calc.addCurried(await calc.add(0, 1)) + let _ = plusOne(2) } \ No newline at end of file diff --git a/test/Concurrency/async_tasks.swift b/test/Concurrency/async_tasks.swift index bfe83cc68a667..a7be5449250ef 100644 --- a/test/Concurrency/async_tasks.swift +++ b/test/Concurrency/async_tasks.swift @@ -43,7 +43,7 @@ func buyVegetables(shoppingList: [String]) async throws -> [Vegetable] { func test_unsafeContinuations() async { // the closure should not allow async operations; // after all: if you have async code, just call it directly, without the unsafe continuation - let _: String = Task.withUnsafeContinuation { continuation in // expected-error{{cannot convert value of type '(_) async -> ()' to expected argument type '(Task.UnsafeContinuation) -> Void'}} + let _: String = Task.withUnsafeContinuation { continuation in // expected-error{{invalid conversion from 'async' function of type '(UnsafeContinuation) async -> Void' to synchronous function type '(UnsafeContinuation) -> Void'}} let s = await someAsyncFunc() // rdar://70610141 for getting a better error message here continuation.resume(returning: s) } @@ -54,11 +54,11 @@ func test_unsafeContinuations() async { } func test_unsafeThrowingContinuations() async { - let _: String = try await Task.withUnsafeThrowingContinuation { continuation in + let _: String = await try Task.withUnsafeThrowingContinuation { continuation in continuation.resume(returning: "") } - let _: String = try await Task.withUnsafeThrowingContinuation { continuation in + let _: String = await try Task.withUnsafeThrowingContinuation { continuation in continuation.resume(throwing: MyError()) } diff --git a/test/Constraints/ErrorBridging.swift b/test/Constraints/ErrorBridging.swift index 81a8a7675fc60..aa64ca78841a7 100644 --- a/test/Constraints/ErrorBridging.swift +++ b/test/Constraints/ErrorBridging.swift @@ -37,7 +37,8 @@ var ns4 = compo as NSError ns4 = compo // expected-error{{cannot assign value of type 'HairyError & Runcible' to type 'NSError'}} let e1 = ns1 as? FooError -let e1fix = ns1 as FooError // expected-error{{did you mean to use 'as!'}} {{17-19=as!}} +let e1fix = ns1 as FooError // expected-error {{'NSError' is not convertible to 'FooError'}} +// expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{17-19=as!}} let esub = ns1 as Error let esub2 = ns1 as? Error // expected-warning{{conditional cast from 'NSError' to 'Error' always succeeds}} diff --git a/test/Constraints/async.swift b/test/Constraints/async.swift index 51ad6292df886..a11d463e905d1 100644 --- a/test/Constraints/async.swift +++ b/test/Constraints/async.swift @@ -5,14 +5,14 @@ func doAsynchronously() async { } func doSynchronously() { } -func testNonConversions() async { - let _: () -> Void = doAsynchronously // expected-error{{cannot convert value of type '() async -> ()' to specified type '() -> Void'}} - let _: () async -> Void = doSynchronously // expected-error{{cannot convert value of type '() -> ()' to specified type '() async -> Void'}} +func testConversions() async { + let _: () -> Void = doAsynchronously // expected-error{{invalid conversion from 'async' function of type '() async -> ()' to synchronous function type '() -> Void'}} + let _: () async -> Void = doSynchronously // okay } // Overloading @available(swift, deprecated: 4.0, message: "synchronous is no fun") -func overloadedSame() -> String { "synchronous" } +func overloadedSame(_: Int = 0) -> String { "synchronous" } func overloadedSame() async -> String { "asynchronous" } @@ -94,3 +94,24 @@ func testPassAsyncClosure() { let b = takesAsyncClosure { overloadedSame() } // expected-warning{{synchronous is no fun}} let _: Double = b // expected-error{{convert value of type 'String'}} } + +struct FunctionTypes { + var syncNonThrowing: () -> Void + var syncThrowing: () throws -> Void + var asyncNonThrowing: () async -> Void + var asyncThrowing: () async throws -> Void + + mutating func demonstrateConversions() { + // Okay to add 'async' and/or 'throws' + asyncNonThrowing = syncNonThrowing + asyncThrowing = syncThrowing + syncThrowing = syncNonThrowing + asyncThrowing = asyncNonThrowing + + // Error to remove 'async' or 'throws' + syncNonThrowing = asyncNonThrowing // expected-error{{invalid conversion}} + syncThrowing = asyncThrowing // expected-error{{invalid conversion}} + syncNonThrowing = syncThrowing // expected-error{{invalid conversion}} + asyncNonThrowing = syncThrowing // expected-error{{invalid conversion}} + } +} diff --git a/test/Constraints/bridging-nsnumber-and-nsvalue.swift.gyb b/test/Constraints/bridging-nsnumber-and-nsvalue.swift.gyb index 52995ab17df95..487d01ed4f2a9 100644 --- a/test/Constraints/bridging-nsnumber-and-nsvalue.swift.gyb +++ b/test/Constraints/bridging-nsnumber-and-nsvalue.swift.gyb @@ -57,17 +57,17 @@ func bridgeNSNumberBackToSpecificType(object: ${ObjectType}, dictBoth: [${ObjectType}: ${ObjectType}], set: Set<${ObjectType}>) { % for Type in ValueTypes: - _ = object as ${Type} // expected-error{{use 'as!'}} + _ = object as ${Type} // expected-error{{is not convertible to}} expected-note {{use 'as!'}} _ = object is ${Type} _ = object as? ${Type} _ = object as! ${Type} - _ = optional as ${Type}? // expected-error{{use 'as!'}} + _ = optional as ${Type}? // expected-error{{is not convertible to}} expected-note {{use 'as!'}} _ = optional is ${Type}? _ = optional as? ${Type}? _ = optional as! ${Type}? - _ = optional as ${Type} // expected-error{{use 'as!'}} + _ = optional as ${Type} // expected-error{{is not convertible to}} expected-note {{use 'as!'}} _ = optional is ${Type} _ = optional as? ${Type} _ = optional as! ${Type} @@ -82,7 +82,7 @@ func bridgeNSNumberBackToSpecificType(object: ${ObjectType}, _ = dictKeys as? [${Type}: Any] _ = dictKeys as! [${Type}: Any] - _ = dictKeys as [${Type}: AnyObject] // expected-error{{use 'as!'}} + _ = dictKeys as [${Type}: AnyObject] // expected-error{{is not convertible to}} expected-note {{use 'as!'}} _ = dictKeys is [${Type}: AnyObject] _ = dictKeys as? [${Type}: AnyObject] _ = dictKeys as! [${Type}: AnyObject] @@ -92,7 +92,7 @@ func bridgeNSNumberBackToSpecificType(object: ${ObjectType}, _ = dictValues as? [AnyHashable: ${Type}] _ = dictValues as! [AnyHashable: ${Type}] - _ = dictValues as [NSObject: ${Type}] // expected-error{{use 'as!'}} + _ = dictValues as [NSObject: ${Type}] // expected-error{{is not convertible to}} expected-note {{use 'as!'}} _ = dictValues is [NSObject: ${Type}] _ = dictValues as? [NSObject: ${Type}] _ = dictValues as! [NSObject: ${Type}] @@ -107,7 +107,7 @@ func bridgeNSNumberBackToSpecificType(object: ${ObjectType}, _ = dictBoth as? [${Type}: ${ObjectType}] _ = dictBoth as! [${Type}: ${ObjectType}] - _ = dictBoth as [${Type}: ${Type}] // expected-error{{use 'as!'}} + _ = dictBoth as [${Type}: ${Type}] // expected-error{{is not convertible to}} expected-note {{use 'as!'}} _ = dictBoth is [${Type}: ${Type}] _ = dictBoth as? [${Type}: ${Type}] _ = dictBoth as! [${Type}: ${Type}] diff --git a/test/Constraints/bridging.swift b/test/Constraints/bridging.swift index ebca997e18627..bf894ce00e29e 100644 --- a/test/Constraints/bridging.swift +++ b/test/Constraints/bridging.swift @@ -124,16 +124,24 @@ func arrayToNSArray() { // NSArray -> Array func nsArrayToArray(_ nsa: NSArray) { var arr1: [AnyObject] = nsa // expected-error{{'NSArray' is not implicitly convertible to '[AnyObject]'; did you mean to use 'as' to explicitly convert?}} {{30-30= as [AnyObject]}} - var _: [BridgedClass] = nsa // expected-error{{'NSArray' is not convertible to '[BridgedClass]'}} {{30-30= as! [BridgedClass]}} - var _: [OtherClass] = nsa // expected-error{{'NSArray' is not convertible to '[OtherClass]'}} {{28-28= as! [OtherClass]}} - var _: [BridgedStruct] = nsa // expected-error{{'NSArray' is not convertible to '[BridgedStruct]'}} {{31-31= as! [BridgedStruct]}} - var _: [NotBridgedStruct] = nsa // expected-error{{use 'as!' to force downcast}} + var _: [BridgedClass] = nsa // expected-error{{'NSArray' is not convertible to '[BridgedClass]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{30-30= as! [BridgedClass]}} + var _: [OtherClass] = nsa // expected-error{{'NSArray' is not convertible to '[OtherClass]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{28-28= as! [OtherClass]}} + var _: [BridgedStruct] = nsa // expected-error{{'NSArray' is not convertible to '[BridgedStruct]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{31-31= as! [BridgedStruct]}} + var _: [NotBridgedStruct] = nsa // expected-error{{'NSArray' is not convertible to '[NotBridgedStruct]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{34-34= as! [NotBridgedStruct]}} var _: [AnyObject] = nsa as [AnyObject] - var _: [BridgedClass] = nsa as [BridgedClass] // expected-error{{'NSArray' is not convertible to '[BridgedClass]'; did you mean to use 'as!' to force downcast?}} {{31-33=as!}} - var _: [OtherClass] = nsa as [OtherClass] // expected-error{{'NSArray' is not convertible to '[OtherClass]'; did you mean to use 'as!' to force downcast?}} {{29-31=as!}} - var _: [BridgedStruct] = nsa as [BridgedStruct] // expected-error{{'NSArray' is not convertible to '[BridgedStruct]'; did you mean to use 'as!' to force downcast?}} {{32-34=as!}} - var _: [NotBridgedStruct] = nsa as [NotBridgedStruct] // expected-error{{use 'as!' to force downcast}} + var _: [BridgedClass] = nsa as [BridgedClass] // expected-error{{'NSArray' is not convertible to '[BridgedClass]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{31-33=as!}} + var _: [OtherClass] = nsa as [OtherClass] // expected-error{{'NSArray' is not convertible to '[OtherClass]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{29-31=as!}} + var _: [BridgedStruct] = nsa as [BridgedStruct] // expected-error{{'NSArray' is not convertible to '[BridgedStruct]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{32-34=as!}} + var _: [NotBridgedStruct] = nsa as [NotBridgedStruct] // expected-error{{'NSArray' is not convertible to '[NotBridgedStruct]'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{35-37=as!}} var arr6: Array = nsa as Array arr6 = arr1 @@ -211,7 +219,8 @@ func rdar18330319(_ s: String, d: [String : AnyObject]) { func rdar19551164a(_ s: String, _ a: [String]) {} func rdar19551164b(_ s: NSString, _ a: NSArray) { rdar19551164a(s, a) // expected-error{{'NSString' is not implicitly convertible to 'String'; did you mean to use 'as' to explicitly convert?}}{{18-18= as String}} - // expected-error@-1{{'NSArray' is not convertible to '[String]'; did you mean to use 'as!' to force downcast?}}{{21-21= as! [String]}} + // expected-error@-1{{'NSArray' is not convertible to '[String]'}} + // expected-note@-2 {{did you mean to use 'as!' to force downcast?}}{{21-21= as! [String]}} } // rdar://problem/19695671 diff --git a/test/Constraints/casts.swift b/test/Constraints/casts.swift index c63dc7354d225..36f78e3d2df87 100644 --- a/test/Constraints/casts.swift +++ b/test/Constraints/casts.swift @@ -252,11 +252,13 @@ func test_coercions_with_overloaded_operator(str: String, optStr: String?, veryO _ = (str ?? "") as Int // expected-error {{cannot convert value of type 'String' to type 'Int' in coercion}} _ = (optStr ?? "") as Int // expected-error {{cannot convert value of type 'String' to type 'Int' in coercion}} - _ = (optStr ?? "") as Int? // expected-error {{'String' is not convertible to 'Int?'; did you mean to use 'as!' to force downcast?}} + _ = (optStr ?? "") as Int? // expected-error {{'String' is not convertible to 'Int?'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{22-24=as!}} _ = (str ^^^ "") as Int // expected-error {{cannot convert value of type 'String' to type 'Int' in coercion}} _ = (optStr ^^^ "") as Int // expected-error {{cannot convert value of type 'String' to type 'Int' in coercion}} - _ = (optStr ^^^ "") as Int? // expected-error {{'String' is not convertible to 'Int?'; did you mean to use 'as!' to force downcast?}} + _ = (optStr ^^^ "") as Int? // expected-error {{'String' is not convertible to 'Int?'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{23-25=as!}} _ = ([] ?? []) as String // expected-error {{cannot convert value of type '[Any]' to type 'String' in coercion}} _ = ([""] ?? []) as [Int: Int] // expected-error {{cannot convert value of type '[String]' to type '[Int : Int]' in coercion}} @@ -290,7 +292,8 @@ func test_compatibility_coercions(_ arr: [Int], _ optArr: [Int]?, _ dict: [Strin // expected-note@-1 {{arguments to generic parameter 'Element' ('Int' and 'String') are expected to be equal}} _ = dict as [String: String] // expected-error {{cannot convert value of type '[String : Int]' to type '[String : String]' in coercion}} // expected-note@-1 {{arguments to generic parameter 'Value' ('Int' and 'String') are expected to be equal}} - _ = dict as [String: String]? // expected-error {{'[String : Int]' is not convertible to '[String : String]?'; did you mean to use 'as!' to force downcast?}} + _ = dict as [String: String]? // expected-error {{'[String : Int]' is not convertible to '[String : String]?'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{12-14=as!}} _ = (dict as [String: Int]?) as [String: Int] // expected-error {{value of optional type '[String : Int]?' must be unwrapped to a value of type '[String : Int]'}} // expected-note@-1 {{coalesce using '??' to provide a default when the optional value contains 'nil'}} // expected-note@-2 {{force-unwrap using '!' to abort execution if the optional value contains 'nil'}} diff --git a/test/Constraints/casts_objc.swift b/test/Constraints/casts_objc.swift index d988aa147b3d3..9261a9e190e9a 100644 --- a/test/Constraints/casts_objc.swift +++ b/test/Constraints/casts_objc.swift @@ -81,7 +81,8 @@ func optionalityMismatchingCasts(f: CGFloat, n: NSNumber, fooo: CGFloat???, nooo: NSNumber???) { _ = f as NSNumber? _ = f as NSNumber?? - let _ = fooo as NSNumber?? // expected-error{{'CGFloat???' is not convertible to 'NSNumber??'; did you mean to use 'as!' to force downcast?}} + let _ = fooo as NSNumber?? // expected-error{{'CGFloat???' is not convertible to 'NSNumber??'}} + //expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{16-18=as!}} let _ = fooo as NSNumber???? // okay: injects extra optionals } diff --git a/test/Constraints/closures.swift b/test/Constraints/closures.swift index 0b77ce6ac87b2..e99c89febf661 100644 --- a/test/Constraints/closures.swift +++ b/test/Constraints/closures.swift @@ -1042,3 +1042,17 @@ let explicitUnboundResult2: (Array) -> Array = { let explicitUnboundResult3: (Array) -> Array = { (arr: Array) -> Array in [true] } + +// rdar://problem/71525503 - Assertion failed: (!shouldHaveDirectCalleeOverload(call) && "Should we have resolved a callee for this?") +func test_inout_with_invalid_member_ref() { + struct S { + static func createS(_ arg: inout Int) -> S { S() } + } + class C { + static subscript(s: (Int) -> Void) -> Bool { get { return false } } + } + + let _: Bool = C[{ .createS(&$0) }] + // expected-error@-1 {{value of tuple type 'Void' has no member 'createS'}} + // expected-error@-2 {{cannot pass immutable value as inout argument: '$0' is immutable}} +} diff --git a/test/Constraints/fixes.swift b/test/Constraints/fixes.swift index 09d18f68bb91c..98f9624122788 100644 --- a/test/Constraints/fixes.swift +++ b/test/Constraints/fixes.swift @@ -77,7 +77,8 @@ func forgotOptionalBang(_ a: A, obj: AnyObject) { func forgotAnyObjectBang(_ obj: AnyObject) { var a = A() - a = obj // expected-error{{'AnyObject' is not convertible to 'A'; did you mean to use 'as!' to force downcast?}}{{10-10= as! A}} + a = obj // expected-error{{'AnyObject' is not convertible to 'A'}} + //expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{10-10= as! A}} _ = a } diff --git a/test/Constraints/lvalues.swift b/test/Constraints/lvalues.swift index 165c752f7db21..3e31776a00027 100644 --- a/test/Constraints/lvalues.swift +++ b/test/Constraints/lvalues.swift @@ -266,3 +266,23 @@ func testWritePrefixIterator() { var underflow = (1..<10).makeIterator() var (writtenCount, afterLastWritten) = a.writePrefix(from: underflow) // expected-error {{passing value of type 'IndexingIterator<(Range)>' to an inout parameter requires explicit '&'}} {{62-62=&}} } + +// rdar://problem/71356981 - wrong error message for state passed as inout with ampersand within parentheses +func look_through_parens_when_checking_inout() { + struct Point { + var x: Int = 0 + var y: Int = 0 + } + + func modifyPoint(_ point: inout Point, _: Int = 42) {} + func modifyPoint(_ point: inout Point, msg: String) {} + func modifyPoint(source: inout Point) {} + + var point = Point(x: 0, y: 0) + modifyPoint((&point)) // expected-error {{use of extraneous '&}} {{16-17=(}} {{15-16=&}} + modifyPoint(((&point))) // expected-error {{use of extraneous '&}} {{17-18=(}} {{15-16=&}} + modifyPoint(source: (&point)) // expected-error {{use of extraneous '&}} {{24-25=(}} {{23-24=&}} + modifyPoint(source: ((&point))) // expected-error {{use of extraneous '&}} {{25-26=(}} {{23-24=&}} + modifyPoint((&point), 0) // expected-error {{use of extraneous '&}} {{16-17=(}} {{15-16=&}} + modifyPoint((&point), msg: "") // expected-error {{use of extraneous '&}} {{16-17=(}} {{15-16=&}} +} diff --git a/test/Constraints/rdar45511837.swift b/test/Constraints/rdar45511837.swift index 73d3b20271fa2..0bd2524aebda9 100644 --- a/test/Constraints/rdar45511837.swift +++ b/test/Constraints/rdar45511837.swift @@ -18,7 +18,8 @@ class Foo { lazy var foo: () -> Void = { // TODO: improve diagnostic message - _ = self.foobar + nil // expected-error {{'Bar' is not convertible to 'String'; did you mean to use 'as!' to force downcast?}} - // expected-error@-1 {{'nil' is not compatible with expected argument type 'String'}} + _ = self.foobar + nil // expected-error {{'Bar' is not convertible to 'String'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} + // expected-error@-2 {{'nil' is not compatible with expected argument type 'String'}} } } diff --git a/test/Constraints/rdar71858936.swift b/test/Constraints/rdar71858936.swift new file mode 100644 index 0000000000000..30af9b6368ca0 --- /dev/null +++ b/test/Constraints/rdar71858936.swift @@ -0,0 +1,28 @@ +// RUN: %target-typecheck-verify-swift + +@propertyWrapper +@dynamicMemberLookup +struct Binding { + var wrappedValue: Value + + init(get: @escaping () -> Value, set: @escaping (Value) -> Void) { + self.wrappedValue = get() + } + + subscript(dynamicMember keyPath: WritableKeyPath) -> Binding { + get { fatalError() } + } +} + +class S { + var value: String = "" + var buffer: String? = nil + + var body: String { + let binding = Binding( + get: { self.buffer ?? self.value }, + set: { self.buffer = $0 } + ) + return binding.wrappedValue + } +} diff --git a/test/Constraints/result_builder_diags.swift b/test/Constraints/result_builder_diags.swift index 08c59e638ce96..6979a214e6622 100644 --- a/test/Constraints/result_builder_diags.swift +++ b/test/Constraints/result_builder_diags.swift @@ -286,6 +286,21 @@ struct MyTuplifiedStruct { } } +func test_invalid_return_type_in_body() { + tuplify(true) { _ -> (Void, Int) in + tuplify(false) { condition in + if condition { + return 42 // expected-error {{cannot use explicit 'return' statement in the body of result builder 'TupleBuilder'}} + // expected-note@-1 {{remove 'return' statements to apply the result builder}} {{9-16=}} + } else { + 1 + } + } + + 42 + } +} + // Check that we're performing syntactic use diagnostics. func acceptMetatype(_: T.Type) -> Bool { true } diff --git a/test/Demangle/Inputs/manglings.txt b/test/Demangle/Inputs/manglings.txt index 294e7a08a34c2..bc81a7c74b28f 100644 --- a/test/Demangle/Inputs/manglings.txt +++ b/test/Demangle/Inputs/manglings.txt @@ -349,6 +349,7 @@ $S1T19protocol_resilience17ResilientProtocolPTl ---> associated type descriptor $S18resilient_protocol21ResilientBaseProtocolTL ---> protocol requirements base descriptor for resilient_protocol.ResilientBaseProtocol $S1t1PP10AssocType2_AA1QTn ---> associated conformance descriptor for t.P.AssocType2: t.Q $S1t1PP10AssocType2_AA1QTN ---> default associated conformance accessor for t.P.AssocType2: t.Q +$s4Test6testityyxlFAA8MystructV_TB5 ---> generic specialization of Test.testit(A) -> () $sSD5IndexVy__GD ---> $sSD5IndexVy__GD $s4test3StrCACycfC ---> {T:$s4test3StrCACycfc} test.Str.__allocating_init() -> test.Str $s18keypaths_inlinable13KeypathStructV8computedSSvpACTKq ---> key path getter for keypaths_inlinable.KeypathStruct.computed : Swift.String : keypaths_inlinable.KeypathStruct, serialized diff --git a/test/Driver/tools_directory.swift b/test/Driver/tools_directory.swift index d5e1536955dc1..bd879d25395c6 100644 --- a/test/Driver/tools_directory.swift +++ b/test/Driver/tools_directory.swift @@ -6,18 +6,18 @@ // RUN: %swiftc_driver -### -target x86_64-linux-unknown -tools-directory /Something/obviously/fake %s 2>&1 | %FileCheck -check-prefix BINUTILS %s // CLANGSUB: swift -// CLANGSUB-SAME: -o [[OBJECTFILE:.*]] +// CLANGSUB: -o [[OBJECTFILE:.*]] // CLANGSUB: swift-autolink-extract{{(\.exe)?"?}} [[OBJECTFILE]] -// CLANGSUB-SAME: -o {{"?}}[[AUTOLINKFILE:.*]] +// CLANGSUB: -o {{"?}}[[AUTOLINKFILE:.*]] // CLANGSUB: {{[^ ]+(\\\\|/)}}Inputs{{/|\\\\}}fake-toolchain{{/|\\\\}}clang // CLANGSUB-DAG: [[OBJECTFILE]] // CLANGSUB-DAG: @[[AUTOLINKFILE]] // CLANGSUB: -o tools_directory // BINUTILS: swift -// BINUTILS-SAME: -o [[OBJECTFILE:.*]] +// BINUTILS: -o [[OBJECTFILE:.*]] // BINUTILS: swift-autolink-extract{{(\.exe)?"?}} [[OBJECTFILE]] -// BINUTILS-SAME: -o {{"?}}[[AUTOLINKFILE:.*]] +// BINUTILS: -o {{"?}}[[AUTOLINKFILE:.*]] // BINUTILS: clang // BINUTILS-DAG: [[OBJECTFILE]] // BINUTILS-DAG: @[[AUTOLINKFILE]] @@ -31,6 +31,6 @@ // RUN: %swiftc_driver -### -target x86_64-apple-macosx10.9 -tools-directory %S/Inputs/fake-toolchain %s 2>&1 | %FileCheck -check-prefix LDSUB %s // LDSUB: swift -// LDSUB-SAME: -o [[OBJECTFILE:.*]] +// LDSUB: -o [[OBJECTFILE:.*]] // LDSUB: {{[^ ]+(\\\\|/)}}Inputs{{/|\\\\}}fake-toolchain{{(\\\\|/)ld"?}} [[OBJECTFILE]] // LDSUB: -o tools_directory diff --git a/test/Frontend/missing_files.swift b/test/Frontend/missing_files.swift new file mode 100644 index 0000000000000..13dad1be4ef03 --- /dev/null +++ b/test/Frontend/missing_files.swift @@ -0,0 +1,17 @@ +// RUN: %empty-directory(%t) + +// RUN: not %target-swift-frontend -c -parse-as-library /tmp/SOMETHING_DOES_NOT_EXIST_1.swift -primary-file %s /tmp/SOMETHING_DOES_NOT_EXIST_2.swift -o %t/out.o 2> %t/error1.output +// RUN: not test -f %t/out.o +// RUN: %FileCheck %s -input-file %t/error1.output --check-prefixes=CHECK + +// RUN: not %target-swift-frontend -c -parse-as-library -primary-file /tmp/SOMETHING_DOES_NOT_EXIST_1.swift -primary-file %s /tmp/SOMETHING_DOES_NOT_EXIST_2.swift -o %t/out1.o -o %t/out2.o 2> %t/error2.output +// RUN: not test -f %t/out1.o +// RUN: not test -f %t/out2.o +// RUN: %FileCheck %s -input-file %t/error2.output --check-prefixes=CHECK + +// CHECK-DAG: :0: error: error opening input file '{{[/\\]}}tmp{{[/\\]}}SOMETHING_DOES_NOT_EXIST_1.swift' ({{.*}}) +// CHECK-DAG: :0: error: error opening input file '{{[/\\]}}tmp{{[/\\]}}SOMETHING_DOES_NOT_EXIST_2.swift' ({{.*}}) + +public var x = INVALID_DECL +// CHECK-NOT: INVALID_DECL + diff --git a/test/IDE/print_clang_objc_async.swift b/test/IDE/print_clang_objc_async.swift index 251d12ac87d01..17c2d1db6c068 100644 --- a/test/IDE/print_clang_objc_async.swift +++ b/test/IDE/print_clang_objc_async.swift @@ -13,10 +13,15 @@ // CHECK-DAG: func doSomethingDangerous(_ operation: String) async throws -> String // CHECK-DAG: func checkAvailability(completionHandler: @escaping (Bool) -> Void) // CHECK-DAG: func checkAvailability() async -> Bool +// CHECK-DAG: func anotherExample() async -> String +// CHECK-DAG: func finalExample() async -> String +// CHECK-DAG: func replyingOperation(_ operation: String) async -> String // CHECK-DAG: func findAnswer(completionHandler handler: @escaping (String?, Error?) -> Void) // CHECK-DAG: func findAnswer() async throws -> String // CHECK-DAG: func findAnswerFailingly(completionHandler handler: @escaping (String?, Error?) -> Void) throws // CHECK-DAG: func findAnswerFailingly() async throws -> String +// CHECK-DAG: func findQAndA() async throws -> (String?, String) +// CHECK-DAG: func findQuestionableAnswers() async throws -> (String, String?) // CHECK-DAG: func doSomethingFun(_ operation: String) async // CHECK: {{^[}]$}} diff --git a/test/IRGen/actor_class.swift b/test/IRGen/actor_class.swift new file mode 100644 index 0000000000000..35fd0c525b5a9 --- /dev/null +++ b/test/IRGen/actor_class.swift @@ -0,0 +1,49 @@ +// RUN: %target-swift-frontend -emit-ir %s -swift-version 5 -enable-experimental-concurrency | %target-FileCheck %s +// REQUIRES: concurrency + +// rdar_72047158 +// XFAIL: CPU=arm64e + +// CHECK: %T11actor_class7MyClassC = type <{ %swift.refcounted, [10 x i8*], %TSi }> + +// CHECK-objc-LABEL: @"$s11actor_class7MyClassCMm" = global +// CHECK-objc-SAME: %objc_class* @"OBJC_METACLASS_$__TtCs12_SwiftObject" + +// CHECK: @"$s11actor_class7MyClassCMf" = internal global +// CHECK-SAME: @"$s11actor_class7MyClassCfD" +// CHECK-objc-SAME: %objc_class* @"OBJC_CLASS_$__TtCs12_SwiftObject" +// CHECK-nonobjc-SAME: %swift.type* null, +// Flags: uses Swift refcounting +// CHECK-SAME: i32 2, +// Instance size +// CHECK-64-SAME: i32 104, +// CHECK-32-SAME: i32 52, +// Alignment mask +// CHECK-SAME: i16 15, +// Field offset for 'x' +// CHECK-objc-SAME: [[INT]] {{48|96}}, + +public actor class MyClass { + public var x: Int + public init() { self.x = 0 } +} + +// CHECK-LABEL: define {{.*}}void @"$s11actor_class7MyClassC7enqueue11partialTasky12_Concurrency012PartialAsyncG0V_tF" +// CHECK: swift_retain +// CHECK: [[T0:%.*]] = bitcast %T11actor_class7MyClassC* %1 to {{.*}}* +// CHECK-NEXT: call swiftcc void @swift_defaultActor_enqueue(%swift.job* %0, {{.*}}* [[T0]]) + +// CHECK-LABEL: define {{.*}}@"$s11actor_class7MyClassC1xSivg" +// CHECK: [[T0:%.*]] = getelementptr inbounds %T11actor_class7MyClassC, %T11actor_class7MyClassC* %0, i32 0, i32 2 +// CHECK: [[T1:%.*]] = getelementptr inbounds %TSi, %TSi* [[T0]], i32 0, i32 0 +// CHECK: load [[INT]], [[INT]]* [[T1]], align 16 + +// CHECK-LABEL: define {{.*}}swiftcc %T11actor_class7MyClassC* @"$s11actor_class7MyClassCACycfc" +// FIXME: need to do this initialization! +// CHECK-NOT: swift_defaultActor_initialize +// CHECK-LABEL: ret %T11actor_class7MyClassC* + +// CHECK-LABEL: define {{.*}}swiftcc %swift.refcounted* @"$s11actor_class7MyClassCfd" +// FIXME: neeed to do this destruction! +// CHECK-NOT: swift_defaultActor_destroy +// CHECK-LABEL: ret diff --git a/test/IRGen/actor_class_forbid_objc_assoc_objects.swift b/test/IRGen/actor_class_forbid_objc_assoc_objects.swift new file mode 100644 index 0000000000000..45c47575dc011 --- /dev/null +++ b/test/IRGen/actor_class_forbid_objc_assoc_objects.swift @@ -0,0 +1,25 @@ +// RUN: %target-swift-frontend -enable-experimental-concurrency -emit-ir %s | %FileCheck %s + +// REQUIRES: concurrency +// REQUIRES: objc_interop + +import _Concurrency + +// CHECK: @_METACLASS_DATA__TtC37actor_class_forbid_objc_assoc_objects5Actor = internal constant { {{.*}} } { i32 [[METAFLAGS:1153]], +// CHECK: @_DATA__TtC37actor_class_forbid_objc_assoc_objects5Actor = internal constant { {{.*}} } { i32 [[OBJECTFLAGS:1152|1216]], +final actor class Actor { +} + +// CHECK: @_METACLASS_DATA__TtC37actor_class_forbid_objc_assoc_objects6Actor2 = internal constant { {{.*}} } { i32 [[METAFLAGS]], +// CHECK: @_DATA__TtC37actor_class_forbid_objc_assoc_objects6Actor2 = internal constant { {{.*}} } { i32 [[OBJECTFLAGS]], +actor class Actor2 { +} + +// CHECK: @_METACLASS_DATA__TtC37actor_class_forbid_objc_assoc_objects6Actor3 = internal constant { {{.*}} } { i32 [[METAFLAGS]], +// CHECK: @_DATA__TtC37actor_class_forbid_objc_assoc_objects6Actor3 = internal constant { {{.*}} } { i32 [[OBJECTFLAGS]], +class Actor3 : Actor2 {} + +actor class GenericActor { + var state: T + init(state: T) { self.state = state } +} diff --git a/test/IRGen/actor_class_objc.swift b/test/IRGen/actor_class_objc.swift new file mode 100644 index 0000000000000..41a7b6d815302 --- /dev/null +++ b/test/IRGen/actor_class_objc.swift @@ -0,0 +1,53 @@ +// RUN: %target-swift-frontend -emit-ir %s -swift-version 5 -enable-experimental-concurrency | %target-FileCheck %s +// REQUIRES: concurrency +// REQUIRES: objc_interop + +// rdar_72047158 +// XFAIL: CPU=arm64e + +import Foundation + +// CHECK: %T16actor_class_objc7MyClassC = type <{ %swift.refcounted, [10 x i8*], %TSi }> + +// CHECK-LABEL: @"OBJC_METACLASS_$__TtC16actor_class_objc7MyClass" = global +// Metaclass is an instance of the root class. +// CHECK-SAME: %objc_class* @"OBJC_METACLASS_$_NSObject", +// Metaclass superclass is the metaclass of the superclass. +// CHECK-SAME: %objc_class* @"OBJC_METACLASS_$_SwiftNativeNSObject", + +// CHECK: @"$s16actor_class_objc7MyClassCMf" = internal global +// CHECK-SAME: @"$s16actor_class_objc7MyClassCfD" +// CHECK-SAME: @"OBJC_METACLASS_$__TtC16actor_class_objc7MyClass" +// CHECK-SAME: @"OBJC_CLASS_$_SwiftNativeNSObject" +// Flags: uses Swift refcounting +// CHECK-SAME: i32 2, +// Instance size +// CHECK-64-SAME: i32 104, +// CHECK-32-SAME: i32 52, +// Alignment mask +// CHECK-SAME: i16 15, +// Field offset for 'x' +// CHECK-64-SAME: i64 96, +// CHECK-32-SAME: i32 48, + +public actor class MyClass: NSObject { + public var x: Int + public override init() { self.x = 0 } +} + +// CHECK-LABEL: define {{.*}}void @"$s16actor_class_objc7MyClassC7enqueue11partialTasky12_Concurrency012PartialAsyncH0V_tF" +// CHECK: [[T0:%.*]] = bitcast %T16actor_class_objc7MyClassC* %1 to %objc_object* +// CHECK-NEXT: call swiftcc void @swift_defaultActor_enqueue(%swift.job* %0, %objc_object* [[T0]]) + +// CHECK-LABEL: define {{.*}} @"$s16actor_class_objc7MyClassC1xSivg" +// CHECK: [[T0:%.*]] = getelementptr inbounds %T16actor_class_objc7MyClassC, %T16actor_class_objc7MyClassC* %0, i32 0, i32 2 +// CHECK: [[T1:%.*]] = getelementptr inbounds %TSi, %TSi* [[T0]], i32 0, i32 0 +// CHECK: load [[INT]], [[INT]]* [[T1]], align 16 + +// CHECK-LABEL: define {{.*}}swiftcc %T16actor_class_objc7MyClassC* @"$s16actor_class_objc7MyClassCACycfc" +// FIXME: need to do this initialization! +// CHECK-NOT: swift_defaultActor_initialize +// CHECK-LABEL: ret %T16actor_class_objc7MyClassC* + +// FIXME: neeed to do this destruction! +// CHECK-NOT: swift_defaultActor_destroy diff --git a/test/IRGen/alloc_box.swift b/test/IRGen/alloc_box.swift index 0812e3f6feed8..e98c127c6e883 100644 --- a/test/IRGen/alloc_box.swift +++ b/test/IRGen/alloc_box.swift @@ -1,4 +1,4 @@ -// RUN: %target-swift-frontend -primary-file %s -emit-ir -o - | %FileCheck %s +// RUN: %target-swift-frontend -Xllvm -sil-disable-pass=SILGenCleanup -primary-file %s -emit-ir -o - | %FileCheck %s func f() -> Bool? { return nil } diff --git a/test/IRGen/async.swift b/test/IRGen/async.swift index 40cbe55607c76..1c9a7bac4cef8 100644 --- a/test/IRGen/async.swift +++ b/test/IRGen/async.swift @@ -15,11 +15,11 @@ public class SomeClass {} public func task_future_wait(_ task: __owned SomeClass) async throws -> Int // CHECK: define{{.*}} swiftcc void @"$s5async8testThisyyAA9SomeClassCnYF"(%swift.task* %0, %swift.executor* %1, %swift.context* %2) -// CHECK-64: call swiftcc i8* @swift_task_alloc(%swift.task* %0, i64 64) +// CHECK-64: call swiftcc i8* @swift_task_alloc(%swift.task* %{{[0-9]+}}, i64 64) // CHECK: tail call swiftcc void @swift_task_future_wait( public func testThis(_ task: __owned SomeClass) async { do { - let _ = try await task_future_wait(task) + let _ = await try task_future_wait(task) } catch _ { print("error") } diff --git a/test/IRGen/async/builtins.sil b/test/IRGen/async/builtins.sil index 221a0faec81a8..0a0edb56ef255 100644 --- a/test/IRGen/async/builtins.sil +++ b/test/IRGen/async/builtins.sil @@ -11,9 +11,12 @@ import Swift // CHECK-LABEL: define hidden swiftcc void @get_task(%swift.task* %0, %swift.executor* %1, %swift.context* %2) sil hidden [ossa] @get_task : $@async @convention(thin) () -> @owned Builtin.NativeObject { bb0: - // CHECK: [[TASK:%.*]] = bitcast %swift.task* %0 to %swift.refcounted* + // CHECK: [[TASKLOC:%.*]] = alloca %swift.task* + // CHECK: store %swift.task* %0, %swift.task** [[TASKLOC]] + // CHECK: [[TASK:%.*]] = load %swift.task*, %swift.task** [[TASKLOC]] + // CHECK: [[TASKRC:%.*]] = bitcast %swift.task* [[TASK]] to %swift.refcounted* %0 = builtin "getCurrentAsyncTask"() : $Builtin.NativeObject - // CHECK-NEXT: [[TASK_COPY:%.*]] = call %swift.refcounted* @swift_retain(%swift.refcounted* returned [[TASK]]) + // CHECK-NEXT: [[TASK_COPY:%.*]] = call %swift.refcounted* @swift_retain(%swift.refcounted* returned [[TASKRC]]) %1 = copy_value %0 : $Builtin.NativeObject end_lifetime %0 : $Builtin.NativeObject return %1 : $Builtin.NativeObject diff --git a/test/IRGen/async/get_async_continuation.sil b/test/IRGen/async/get_async_continuation.sil new file mode 100644 index 0000000000000..c24a9603af731 --- /dev/null +++ b/test/IRGen/async/get_async_continuation.sil @@ -0,0 +1,145 @@ +// RUN: %target-swift-frontend -enable-experimental-concurrency -enable-objc-interop -primary-file %s -emit-ir -sil-verify-all -disable-llvm-optzns -disable-swift-specific-llvm-optzns | %FileCheck %s +// RUN: %target-swift-frontend -enable-experimental-concurrency -enable-objc-interop -primary-file %s -emit-ir -sil-verify-all + +// REQUIRES: concurrency + +import Builtin +import Swift +import _Concurrency + +sil @not_async_test : $@convention(thin) () -> () { +bb0: + %0 = tuple () + return %0 : $() +} + +// CHECK-LABEL: define{{.*}} @async_continuation( +// CHECK: [[tsk_addr:%.*]] = alloca %swift.task* +// CHECK: [[exe_addr:%.*]] = alloca %swift.executor* +// CHECK: [[ctxt_addr:%.*]] = alloca %swift.context* +// CHECK: [[cont_context:%.*]] = alloca %swift.async_continuation_context +// CHECK: [[result_storage:%.*]] = alloca i32 +// CHECK: call token @llvm.coro.id.async +// CHECK: call i8* @llvm.coro.begin( +// Create a Builtin.RawUnsafeContinuation. +// CHECK: [[tsk:%.*]] = load %swift.task*, %swift.task** [[tsk_addr]] +// CHECK: [[continuation:%.*]] = bitcast %swift.task* [[tsk]] to i8* +// Initialize the async continuation context. +// CHECK: [[context_addr:%.*]] = getelementptr inbounds %swift.async_continuation_context, %swift.async_continuation_context* [[cont_context]], i32 0, i32 0 +// CHECK: [[ctxt:%.*]] = load %swift.context*, %swift.context** [[ctxt_addr]] +// CHECK: store %swift.context* [[ctxt]], %swift.context** [[context_addr]] +// CHECK: [[error_addr:%.*]] = getelementptr inbounds %swift.async_continuation_context, %swift.async_continuation_context* [[cont_context]], i32 0, i32 2 +// CHECK: store %swift.error* null, %swift.error** [[error_addr]] +// CHECK: [[result_addr:%.*]] = getelementptr inbounds %swift.async_continuation_context, %swift.async_continuation_context* [[cont_context]], i32 0, i32 3 +// CHECK: [[result_storage_as_opaque:%.*]] = bitcast i32* [[result_storage]] to %swift.opaque* +// CHECK: store %swift.opaque* [[result_storage_as_opaque]], %swift.opaque** [[result_addr]] +// CHECK: [[exectuor_addr:%.*]] = getelementptr inbounds %swift.async_continuation_context, %swift.async_continuation_context* [[cont_context]], i32 0, i32 4 +// CHECK: [[exe:%.*]] = load %swift.executor*, %swift.executor** [[exe_addr]] +// CHECK: store %swift.executor* [[exe]], %swift.executor** [[exectuor_addr]] +// Initialize the async task with the continuation function and async continuation context. +// CHECK: [[task_continuation_fn_addr:%.*]] = getelementptr inbounds %swift.task, %swift.task* [[tsk]], i32 0, i32 4 +// CHECK: [[continuation_fn:%.*]] = call i8* @llvm.coro.async.resume() +// CHECK: store i8* [[continuation_fn]], i8** [[task_continuation_fn_addr]] +// CHECK: [[task_resume_context_addr:%.*]] = getelementptr inbounds %swift.task, %swift.task* [[tsk]], i32 0, i32 5 +// CHECK: [[cont_context2:%.*]] = bitcast %swift.async_continuation_context* [[cont_context]] to %swift.context* +// CHECK: store %swift.context* [[cont_context2]], %swift.context** [[task_resume_context_addr]] +// Initialize the synchronization variable. +// CHECK: [[synchronization_addr:%.*]] = getelementptr inbounds %swift.async_continuation_context, %swift.async_continuation_context* [[cont_context]], i32 0, i32 1 +// CHECK: store atomic {{(i64|i32)}} 0, {{(i64|i32)}}* [[synchronization_addr]] release +// Do some stuff. +// CHECK: call swiftcc void @not_async_test() +// Arrive at the await_async_continuation point. +// CHECK: [[synchronization_addr_before_await:%.*]] = getelementptr inbounds %swift.async_continuation_context, %swift.async_continuation_context* [[cont_context]], i32 0, i32 1 +// CHECK: [[first_at_sync_pt:%.*]] = cmpxchg {{(i64|i32)}}* [[synchronization_addr_before_await]], {{(i64|i32)}} 0, {{(i64|i32)}} 1 release acquire +// CHECK: [[first_at_sync_pt_bool:%.*]] = extractvalue { {{(i64|i32)}}, i1 } [[first_at_sync_pt]], 1 +// CHECK: br i1 [[first_at_sync_pt_bool]], label %await.async.abort, label %await.async.maybe.resume + +// Abort if we are the first to arrive at the await/or continuation point -- +// we must wait on the other to arrive. +// CHECK: await.async.abort: +// CHECK: br label %coro.end + +// CHECK: coro.end: +// CHECK: call i1 @llvm.coro.end( +// CHECK: unreachable + +// CHECK: await.async.maybe.resume: +// CHECK: call { i8*, i8*, i8* } (i8*, i8*, ...) @llvm.coro.suspend.async({{.*}} @__swift_async_resume_project_context +// Abort if we are the first to arrive at the continuation point we must wait +// on the await to arrive. +// CHECK: [[first_at_sync_pt:%.*]] = cmpxchg {{(i64|i32)}}* [[synchronization_addr_before_await]], {{(i64|i32)}} 0, {{(i64|i32)}} 1 release acquire +// CHECK: [[first_at_sync_pt_bool:%.*]] = extractvalue { {{(i64|i32)}}, i1 } [[first_at_sync_pt]], 1 +// CHECK: br i1 [[first_at_sync_pt_bool]], label %await.async.abort, label %await.async.resume + +// CHECK: await.async.resume: +// CHECK: br label %await.async.normal + +// CHECK: await.async.normal: +// CHECK: [[result_addr_addr:%.*]] = getelementptr inbounds %swift.async_continuation_context, %swift.async_continuation_context* [[cont_context]], i32 0, i32 3 +// CHECK: [[result_addr:%.*]] = load %swift.opaque*, %swift.opaque** [[result_addr_addr]] +// CHECK: [[typed_result_addr:%.*]] = bitcast %swift.opaque* [[result_addr]] to i32* +// CHECK: [[result_value:%.*]] = load i32, i32* [[typed_result_addr]] +// CHECK: br label %[[result_bb:[0-9]+]] + +// CHECK: [[result_bb]]: +// CHECK: phi i32 [ [[result_value]], %await.async.normal ] + +sil @async_continuation : $@async () -> () { +entry: + %c = get_async_continuation Builtin.Int32 + %f = function_ref @not_async_test : $@convention(thin) () -> () + apply %f() : $@convention(thin) () -> () + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1 + +bb1(%r : $Builtin.Int32): + %t = tuple() + return %t : $() +} + +sil @async_continuation_throws : $@async () -> () { +entry: + %c = get_async_continuation [throws] Builtin.Int32 + %f = function_ref @not_async_test : $@convention(thin) () -> () + apply %f() : $@convention(thin) () -> () + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1, error bb2 +bb1(%r : $Builtin.Int32): + br bb3 +bb2(%e : $Error): + br bb3 + +bb3: + %t = tuple() + return %t : $() +} + +sil @async_continuation_addr : $@async () -> () { +entry: + %a = alloc_stack $Builtin.Int32 + %c = get_async_continuation_addr Builtin.Int32, %a : $*Builtin.Int32 + %f = function_ref @not_async_test : $@convention(thin) () -> () + apply %f() : $@convention(thin) () -> () + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1 +bb1: + dealloc_stack %a : $*Builtin.Int32 + %t = tuple() + return %t : $() +} + +sil @async_continuation_throws_addr : $@async () -> () { +entry: + %a = alloc_stack $Builtin.Int32 + %c = get_async_continuation_addr [throws] Builtin.Int32, %a : $*Builtin.Int32 + %f = function_ref @not_async_test : $@convention(thin) () -> () + apply %f() : $@convention(thin) () -> () + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1, error bb2 +bb1: + dealloc_stack %a : $*Builtin.Int32 + br bb3 +bb2(%e : $Error): + dealloc_stack %a : $*Builtin.Int32 + br bb3 + +bb3: + %t = tuple() + return %t : $() +} diff --git a/test/IRGen/async/hop_to_executor.sil b/test/IRGen/async/hop_to_executor.sil new file mode 100644 index 0000000000000..dde2a3c5adbcf --- /dev/null +++ b/test/IRGen/async/hop_to_executor.sil @@ -0,0 +1,40 @@ +// RUN: %target-swift-frontend -enable-experimental-concurrency -primary-file %s -module-name=test -disable-llvm-optzns -disable-swift-specific-llvm-optzns -emit-ir -sil-verify-all | %FileCheck %s + +// REQUIRES: concurrency + +// This test fails on 32bit archs. +// UNSUPPORTED: PTRSIZE=32 + +sil_stage canonical + +import Builtin +import Swift +import _Concurrency + +final actor class MyActor { +} + +// CHECK-LABEL: define{{.*}} void @test_simple(%swift.task* %0, %swift.executor* %1, %swift.context* %2) +// CHECK: [[CTX:%[0-9]+]] = bitcast %swift.context* %2 +// CHECK: [[ACTOR_ADDR:%[0-9]+]] = getelementptr {{.*}} [[CTX]], i32 0, i32 6 +// CHECK: [[ACTOR:%[0-9]+]] = load %T4test7MyActorC*, %T4test7MyActorC** [[ACTOR_ADDR]] +// CHECK: [[RESUME:%[0-9]+]] = call i8* @llvm.coro.async.resume() +// CHECK: [[CAST_ACTOR:%[0-9]+]] = bitcast %T4test7MyActorC* [[ACTOR]] to %swift.executor* +// CHECK: call {{.*}} @llvm.coro.suspend.async(i8* [[RESUME]], i8* bitcast (i8* (i8*)* @__swift_async_resume_get_context to i8*), i8* bitcast (void (i8*, %swift.executor*, %swift.task*, %swift.executor*, %swift.context*)* @__swift_suspend_point to i8*), i8* [[RESUME]], %swift.executor* [[CAST_ACTOR]], %swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) +sil @test_simple : $@convention(method) @async (@guaranteed MyActor) -> () { +bb0(%0 : $MyActor): + hop_to_executor %0 : $MyActor + %3 = tuple () + return %3 : $() +} + +// CHECK-LABEL: define internal void @__swift_suspend_point(i8* %0, %swift.executor* %1, %swift.task* %2, %swift.executor* %3, %swift.context* %4) +// CHECK: [[RESUME_ADDR:%[0-9]+]] = getelementptr inbounds %swift.task, %swift.task* %2, i32 0, i32 4 +// CHECK: store i8* %0, i8** [[RESUME_ADDR]] +// CHECK: [[CTXT_ADDR:%[0-9]+]] = getelementptr inbounds %swift.task, %swift.task* %2, i32 0, i32 5 +// CHECK: store %swift.context* %4, %swift.context** [[CTXT_ADDR]] +// CHECK: tail call swiftcc void @swift_task_switch(%swift.task* %2, %swift.executor* %3, %swift.executor* %1) +// CHECK: ret void + +sil_vtable MyActor { +} diff --git a/test/IRGen/async/partial_apply.sil b/test/IRGen/async/partial_apply.sil index 8f7068cd469d3..7fbc5b97bc558 100644 --- a/test/IRGen/async/partial_apply.sil +++ b/test/IRGen/async/partial_apply.sil @@ -2,7 +2,6 @@ // RUN: %target-swift-frontend -emit-module -enable-library-evolution -emit-module-path=%t/resilient_struct.swiftmodule -module-name=resilient_struct %S/../../Inputs/resilient_struct.swift // RUN: %target-swift-frontend -I %t -emit-ir %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize -// REQUIRES: CPU=x86_64 // REQUIRES: concurrency import Builtin diff --git a/test/IRGen/async/partial_apply_forwarder.sil b/test/IRGen/async/partial_apply_forwarder.sil index 466aae534137c..ebccadd0e04d0 100644 --- a/test/IRGen/async/partial_apply_forwarder.sil +++ b/test/IRGen/async/partial_apply_forwarder.sil @@ -2,7 +2,6 @@ // RUN: %target-swift-frontend -enable-experimental-concurrency -disable-objc-interop -primary-file %s -emit-ir | %FileCheck %s -DINT=i%target-ptrsize --check-prefixes=CHECK,CHECK-native // REQUIRES: concurrency -// UNSUPPORTED: CPU=arm64e sil_stage canonical diff --git a/test/IRGen/async/run-call-classinstance-int64-to-void.sil b/test/IRGen/async/run-call-classinstance-int64-to-void.sil index e4b086c6b2001..f6d721a6dd609 100644 --- a/test/IRGen/async/run-call-classinstance-int64-to-void.sil +++ b/test/IRGen/async/run-call-classinstance-int64-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-classinstance-void-to-void.sil b/test/IRGen/async/run-call-classinstance-void-to-void.sil index d9c729cc6d7d1..d1b5ae658de62 100644 --- a/test/IRGen/async/run-call-classinstance-void-to-void.sil +++ b/test/IRGen/async/run-call-classinstance-void-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-existential-to-void.sil b/test/IRGen/async/run-call-existential-to-void.sil index 8811cbeb1ddfc..eeaaecdfac73c 100644 --- a/test/IRGen/async/run-call-existential-to-void.sil +++ b/test/IRGen/async/run-call-existential-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-generic-to-generic.sil b/test/IRGen/async/run-call-generic-to-generic.sil index a1909edcefea0..6ddfdce3a99cb 100644 --- a/test/IRGen/async/run-call-generic-to-generic.sil +++ b/test/IRGen/async/run-call-generic-to-generic.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-generic-to-void.sil b/test/IRGen/async/run-call-generic-to-void.sil index 973df57826ffe..45bfb2c4fed95 100644 --- a/test/IRGen/async/run-call-generic-to-void.sil +++ b/test/IRGen/async/run-call-generic-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil b/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil index 3fd0c1427f371..06bc79f92bdc8 100644 --- a/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil +++ b/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-int64-and-int64-to-void.sil b/test/IRGen/async/run-call-int64-and-int64-to-void.sil index 31ee136974a8b..6fa39cb5a1010 100644 --- a/test/IRGen/async/run-call-int64-and-int64-to-void.sil +++ b/test/IRGen/async/run-call-int64-and-int64-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-int64-to-void.sil b/test/IRGen/async/run-call-int64-to-void.sil index 475f1dc09853f..677dd37bcf891 100644 --- a/test/IRGen/async/run-call-int64-to-void.sil +++ b/test/IRGen/async/run-call-int64-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil b/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil index 4df2bc086742d..f687f07258e2e 100644 --- a/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil b/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil index d7f73ce866460..078367453da62 100644 --- a/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-call-resilient-classinstance-void-to-void.sil b/test/IRGen/async/run-call-resilient-classinstance-void-to-void.sil index 02441131c9c3b..dd93d0beb6145 100644 --- a/test/IRGen/async/run-call-resilient-classinstance-void-to-void.sil +++ b/test/IRGen/async/run-call-resilient-classinstance-void-to-void.sil @@ -12,7 +12,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-resilient-protocolinstance-void-to-void.swift b/test/IRGen/async/run-call-resilient-protocolinstance-void-to-void.swift index 330facd73c9bd..4ca531fbd4de8 100644 --- a/test/IRGen/async/run-call-resilient-protocolinstance-void-to-void.swift +++ b/test/IRGen/async/run-call-resilient-protocolinstance-void-to-void.swift @@ -12,7 +12,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import _Concurrency import ResilientProtocol diff --git a/test/IRGen/async/run-call-struct_five_bools-to-void.sil b/test/IRGen/async/run-call-struct_five_bools-to-void.sil index 1ad1a3bbe4582..0ef4d38240d3d 100644 --- a/test/IRGen/async/run-call-struct_five_bools-to-void.sil +++ b/test/IRGen/async/run-call-struct_five_bools-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-structinstance-int64-to-void.sil b/test/IRGen/async/run-call-structinstance-int64-to-void.sil index 5449493e5ba95..d8b5e07c52203 100644 --- a/test/IRGen/async/run-call-structinstance-int64-to-void.sil +++ b/test/IRGen/async/run-call-structinstance-int64-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing.sil index 5f2cd9a42003f..69404d1e9f2e6 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil index f35a2462b9a36..b0ecc1ca3a16e 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil index 6dd180887a612..5aa58383b29d2 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil index ac44411758f0d..020562ee89d81 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil index e265b1fd75d2d..a268c6ab6db07 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-void-to-existential.sil b/test/IRGen/async/run-call-void-to-existential.sil index 9dc245041d479..afdb71e07f7cd 100644 --- a/test/IRGen/async/run-call-void-to-existential.sil +++ b/test/IRGen/async/run-call-void-to-existential.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-void-to-int64-and-int64.sil b/test/IRGen/async/run-call-void-to-int64-and-int64.sil index 7c354c73208c2..2f7c941dba602 100644 --- a/test/IRGen/async/run-call-void-to-int64-and-int64.sil +++ b/test/IRGen/async/run-call-void-to-int64-and-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-void-to-int64.sil b/test/IRGen/async/run-call-void-to-int64.sil index fd1c4a3a67858..032ed04f32be1 100644 --- a/test/IRGen/async/run-call-void-to-int64.sil +++ b/test/IRGen/async/run-call-void-to-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin diff --git a/test/IRGen/async/run-call-void-to-struct_large.sil b/test/IRGen/async/run-call-void-to-struct_large.sil index 10f1afe9ccadf..7afb1a5007c43 100644 --- a/test/IRGen/async/run-call-void-to-struct_large.sil +++ b/test/IRGen/async/run-call-void-to-struct_large.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil b/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil index c3a9fbfe1735c..facd88e049a05 100644 --- a/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil +++ b/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil b/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil index 6367891014abf..de67a1c435ad7 100644 --- a/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-convertfunction-int64-to-void.sil b/test/IRGen/async/run-convertfunction-int64-to-void.sil index 7dda53421140e..53e8001513fa8 100644 --- a/test/IRGen/async/run-convertfunction-int64-to-void.sil +++ b/test/IRGen/async/run-convertfunction-int64-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-class-to-void.sil b/test/IRGen/async/run-partialapply-capture-class-to-void.sil index 2fe81ff70bff5..f150079949ad3 100644 --- a/test/IRGen/async/run-partialapply-capture-class-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-class-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil b/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil index 1292308ec8075..18ed05137b8c7 100644 --- a/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil b/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil index c09bcb94ef04e..f0b113eadaf4b 100644 --- a/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil +++ b/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-int64-int64-throws-to-int64.sil b/test/IRGen/async/run-partialapply-capture-int64-int64-throws-to-int64.sil index 42961d66b1674..d85580cd673eb 100644 --- a/test/IRGen/async/run-partialapply-capture-int64-int64-throws-to-int64.sil +++ b/test/IRGen/async/run-partialapply-capture-int64-int64-throws-to-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil b/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil index df55ebf1a8ab4..df216f94dabdb 100644 --- a/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil +++ b/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil b/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil index 180488f9d9247..3cea647215ed3 100644 --- a/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil +++ b/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil b/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil index 052c2f8c8f163..ff51a42e4fd1c 100644 --- a/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil +++ b/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil b/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil index 7286331630346..16b769ddd6db4 100644 --- a/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil +++ b/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil b/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil index 41bcbeaa77559..107dcbc937a1f 100644 --- a/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil b/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil index 9f2112c2f8306..64c45df3ad5f3 100644 --- a/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil b/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil index 41ec9afe6d3da..2cb041614665d 100644 --- a/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_oC_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/async/run-switch-executor.swift b/test/IRGen/async/run-switch-executor.swift new file mode 100644 index 0000000000000..a13a60b7a5b1c --- /dev/null +++ b/test/IRGen/async/run-switch-executor.swift @@ -0,0 +1,54 @@ +// RUN: %empty-directory(%t) +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency %s -module-name main -o %t/main +// RUN: %target-codesign %t/main +// RUN: %target-run %t/main | %FileCheck %s + +// REQUIRES: executable_test +// REQUIRES: concurrency +// UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e + +// Currently this test just checks if nothing crashes. +// TODO: also check if the current executor is the correct one. + +final actor class MyActor { + var p: Int + + @inline(never) + init(p: Int) { + self.p = p + } + + @inline(never) + func callee() async -> Int { + print("callee") + return p + } + + @inline(never) + func testit() async -> Int { + print("don't switch") + let x = await callee() + let otherActor = MyActor(p: 12) + print("switch") + let y = await otherActor.callee() + print("switch back") + return x + y + p + } +} + +// CHECK: run +// CHECK: don't switch +// CHECK: callee +// CHECK: switch +// CHECK: callee +// CHECK: switch back +// CHECK: 66 + +runAsync { + let a = MyActor(p: 27) + print("run") + await print(a.testit()) +} + + diff --git a/test/IRGen/async/run-thintothick-int64-to-void.sil b/test/IRGen/async/run-thintothick-int64-to-void.sil index 580df10310168..0dc38a8cc4e23 100644 --- a/test/IRGen/async/run-thintothick-int64-to-void.sil +++ b/test/IRGen/async/run-thintothick-int64-to-void.sil @@ -10,7 +10,6 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib -// UNSUPPORTED: CPU=arm64e import Builtin import Swift diff --git a/test/IRGen/builtins.swift b/test/IRGen/builtins.swift index 43adb03237981..e14cbc2e01deb 100644 --- a/test/IRGen/builtins.swift +++ b/test/IRGen/builtins.swift @@ -759,9 +759,10 @@ func generic_unsafeGuaranteed_test(_ t : T) -> T { } // CHECK-LABEL: define {{.*}} @{{.*}}unsafeGuaranteed_test -// CHECK: [[LOCAL:%.*]] = alloca %swift.refcounted* +// CHECK: [[LOCAL1:%.*]] = alloca %swift.refcounted* +// CHECK: [[LOCAL2:%.*]] = alloca %swift.refcounted* // CHECK: call %swift.refcounted* @swift_retain(%swift.refcounted* returned %0) -// CHECK: store %swift.refcounted* %0, %swift.refcounted** [[LOCAL]] +// CHECK: store %swift.refcounted* %0, %swift.refcounted** [[LOCAL2]] // CHECK-NOT: call void @swift_release(%swift.refcounted* %0) // CHECK: ret %swift.refcounted* %0 func unsafeGuaranteed_test(_ x: Builtin.NativeObject) -> Builtin.NativeObject { diff --git a/test/IRGen/class_forbid_objc_assoc_objects.swift b/test/IRGen/class_forbid_objc_assoc_objects.swift new file mode 100644 index 0000000000000..23fcaa8e49c57 --- /dev/null +++ b/test/IRGen/class_forbid_objc_assoc_objects.swift @@ -0,0 +1,79 @@ +// RUN: %target-swift-frontend -emit-ir %s | %FileCheck %s + +// REQUIRES: objc_interop + +// CHECK: @_METACLASS_DATA__TtC31class_forbid_objc_assoc_objects24AllowedToHaveAssocObject = internal constant { {{.*}} } { i32 129, +// CHECK: @_DATA__TtC31class_forbid_objc_assoc_objects24AllowedToHaveAssocObject = internal constant { {{.*}} } { i32 128, +final class AllowedToHaveAssocObject { +} + +// CHECK: @_METACLASS_DATA__TtC31class_forbid_objc_assoc_objects24UnableToHaveAssocObjects = internal constant { {{.*}} } { i32 1153, +// CHECK: @_DATA__TtC31class_forbid_objc_assoc_objects24UnableToHaveAssocObjects = internal constant { {{.*}} } { i32 1152, +@_semantics("objc.forbidAssociatedObjects") +final class UnableToHaveAssocObjects { +} + +// Class Metadata For Generic Metadata +// +// CHECK: [[CLASS_METADATA:@[0-9][0-9]*]] = internal constant <{ {{.*}} }> <{ {{.*}} { i32 1152, +// +// Generic Metadata Pattern +// +// CHECK: @"$s31class_forbid_objc_assoc_objects31UnableToHaveAssocObjectsGenericCMP" = internal constant {{.*}}[[CLASS_METADATA]] +@_semantics("objc.forbidAssociatedObjects") +final class UnableToHaveAssocObjectsGeneric { + var state: T + init(state: T) { self.state = state } +} + +// This should be normal. +// +// CHECK: @_METACLASS_DATA__TtC31class_forbid_objc_assoc_objects40UnsoundAbleToHaveAssocObjectsParentClass = internal constant { {{.*}} } { i32 129, +// CHECK: @_DATA__TtC31class_forbid_objc_assoc_objects40UnsoundAbleToHaveAssocObjectsParentClass = internal constant { {{.*}} } { i32 128, +class UnsoundAbleToHaveAssocObjectsParentClass { +} + +// This should have assoc object constraints +// +// CHECK: @_METACLASS_DATA__TtC31class_forbid_objc_assoc_objects39UnsoundUnableToHaveAssocObjectsSubClass = internal constant { {{.*}} } { i32 1153, +// CHECK: @_DATA__TtC31class_forbid_objc_assoc_objects39UnsoundUnableToHaveAssocObjectsSubClass = internal constant { {{.*}} } { i32 1152, +@_semantics("objc.forbidAssociatedObjects") +final class UnsoundUnableToHaveAssocObjectsSubClass : UnsoundAbleToHaveAssocObjectsParentClass { +} + +// CHECK: @_DATA__TtC31class_forbid_objc_assoc_objects41UnsoundAbleToHaveAssocObjectsParentClass2 = internal constant { {{.*}} } { i32 1152, +@_semantics("objc.forbidAssociatedObjects") +class UnsoundAbleToHaveAssocObjectsParentClass2 { +} + +// This has normal metadata. We must at runtime add the flags of the subclass to +// the child. +// +// CHECK: @_DATA__TtC31class_forbid_objc_assoc_objects40UnsoundUnableToHaveAssocObjectsSubClass2 = internal constant { {{.*}} } { i32 128, +final class UnsoundUnableToHaveAssocObjectsSubClass2 : UnsoundAbleToHaveAssocObjectsParentClass2 { +} + +// CHECK: @_DATA__TtC31class_forbid_objc_assoc_objects40UnsoundUnableToHaveAssocObjectsSubClass3 = internal constant { {{.*}} } { i32 128, +class UnsoundUnableToHaveAssocObjectsSubClass3 : UnsoundAbleToHaveAssocObjectsParentClass2 { +} + +class GenericAbleToHaveAssocObjectsParentClass { + public var state: T + init(state: T) { self.state = state } +} + +@_semantics("objc.forbidAssociatedObjects") +final class GenericUnableToHaveAssocObjectsSubClass : GenericAbleToHaveAssocObjectsParentClass { +} + +@_semantics("objc.forbidAssociatedObjects") +class GenericAbleToHaveAssocObjectsParentClass2 { + public var state: T + init(state: T) { self.state = state } +} + +final class GenericUnableToHaveAssocObjectsSubClass2 : GenericAbleToHaveAssocObjectsParentClass2 { +} + +class GenericUnableToHaveAssocObjectsSubClass3 : GenericAbleToHaveAssocObjectsParentClass2 { +} diff --git a/test/IRGen/class_resilience.swift b/test/IRGen/class_resilience.swift index 5f1e2212559fd..76886c0e6e83f 100644 --- a/test/IRGen/class_resilience.swift +++ b/test/IRGen/class_resilience.swift @@ -58,7 +58,7 @@ // -- field offset vector offset: // CHECK-SAME: i32 0, // -- superclass: -// CHECK-SAME: @"{{got.|__imp_}}$s15resilient_class22ResilientOutsideParentCMn" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s15resilient_class22ResilientOutsideParentCMn" // -- singleton metadata initialization cache: // CHECK-SAME: @"$s16class_resilience14ResilientChildCMl" // -- resilient pattern: @@ -69,17 +69,17 @@ // CHECK-SAME: i32 2, // CHECK-SAME: %swift.method_override_descriptor { // -- base class: -// CHECK-SAME: @"{{got.|__imp_}}$s15resilient_class22ResilientOutsideParentCMn" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s15resilient_class22ResilientOutsideParentCMn" // -- base method: -// CHECK-SAME: @"{{got.|__imp_}}$s15resilient_class22ResilientOutsideParentC8getValueSiyFTq" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s15resilient_class22ResilientOutsideParentC8getValueSiyFTq" // -- implementation: // CHECK-SAME: @"$s16class_resilience14ResilientChildC8getValueSiyF" // CHECK-SAME: } // CHECK-SAME: %swift.method_override_descriptor { // -- base class: -// CHECK-SAME: @"{{got.|__imp_}}$s15resilient_class22ResilientOutsideParentCMn" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s15resilient_class22ResilientOutsideParentCMn" // -- base method: -// CHECK-SAME: @"{{got.|__imp_}}$s15resilient_class22ResilientOutsideParentCACycfCTq" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s15resilient_class22ResilientOutsideParentCACycfCTq" // -- implementation: // CHECK-SAME: @"$s16class_resilience14ResilientChildCACycfC" // CHECK-SAME: } diff --git a/test/IRGen/dllimport.swift b/test/IRGen/dllimport.swift index 98fdc11c24591..acfa3284b470c 100644 --- a/test/IRGen/dllimport.swift +++ b/test/IRGen/dllimport.swift @@ -46,7 +46,7 @@ public func g() { // CHECK-NO-OPT-DAG: declare dllimport void @swift_deallocClassInstance(%swift.refcounted*, i32, i32) // CHECK-OPT-DAG: declare dllimport %swift.refcounted* @swift_retain(%swift.refcounted* returned) local_unnamed_addr -// CHECK-OPT-DAG: @"__imp_$s9dllexport1pMp" = external externally_initialized constant %swift.protocol* +// CHECK-OPT-DAG: @"\01__imp_{{_?}}$s9dllexport1pMp" = external externally_initialized constant %swift.protocol* // CHECK-OPT-DAG: declare dllimport swiftcc i8* @"$s9dllexport2ciAA1cCvau"() // CHECK-OPT-DAG: declare dllimport void @swift_deallocClassInstance(%swift.refcounted*, i32, i32) // CHECK-OPT-DAG: declare dllimport swiftcc %swift.refcounted* @"$s9dllexport1cCfd"(%T9dllexport1cC* swiftself) diff --git a/test/IRGen/keypath_witness_overrides.swift b/test/IRGen/keypath_witness_overrides.swift index 4a6fc364e91d0..7e2b81bf72fde 100644 --- a/test/IRGen/keypath_witness_overrides.swift +++ b/test/IRGen/keypath_witness_overrides.swift @@ -5,7 +5,7 @@ import protocol_overrides // CHECK: @keypath = private global -// CHECK-SAME: %swift.method_descriptor** @"{{got.|__imp_}}$s18protocol_overrides14OriginalGetterPy7ElementQz5IndexQzcigTq" +// CHECK-SAME: %swift.method_descriptor** @"{{got.|\\01__imp__?}}$s18protocol_overrides14OriginalGetterPy7ElementQz5IndexQzcigTq" public func getWritableKeyPath(_ c: OS, index: OS.Index) -> AnyKeyPath where OS.Index: Hashable { let keypath = \OS.[index] diff --git a/test/IRGen/keypaths.sil b/test/IRGen/keypaths.sil index cd1ea10eaec5b..efee4b9078b4b 100644 --- a/test/IRGen/keypaths.sil +++ b/test/IRGen/keypaths.sil @@ -171,7 +171,7 @@ sil_vtable C2 {} // CHECK-SAME: , // -- computed, get-only, identified by (indirected) function pointer, no args // CHECK-SAME: , -// CHECK-SAME: @{{got.|__imp_}}k_id +// CHECK-SAME: @{{got.|"\\01__imp__?}}k_id // CHECK-SAME: void (%TSi*, %T8keypaths1SV*)* {{.*}}@k_get{{.*}} // -- %l: computed diff --git a/test/IRGen/keypaths_external.sil b/test/IRGen/keypaths_external.sil index 109ce35759c9d..333edc8d458dd 100644 --- a/test/IRGen/keypaths_external.sil +++ b/test/IRGen/keypaths_external.sil @@ -41,13 +41,13 @@ sil @s_equals : $@convention(thin) (UnsafeRawPointer, sil @s_hash : $@convention(thin) (UnsafeRawPointer) -> Int // -- %t -// CHECK: [[KP_T:@keypath(\..*)?]] = private global <{ {{.*}} }> <{ {{.*}} i32 1, {{.*}} @"{{got.|__imp_}}$s23keypaths_external_other1GV1xxvpMV" +// CHECK: [[KP_T:@keypath(\..*)?]] = private global <{ {{.*}} }> <{ {{.*}} i32 1, {{.*}} @"{{got.|\\01__imp__?}}$s23keypaths_external_other1GV1xxvpMV" // CHECK-SAME: @"symbolic x" // -- computed get-only property, identified by indirect pointer // CHECK-SAME: // -- %u -// CHECK: [[KP_U:@keypath(\..*)?]] = private global <{ {{.*}} }> <{ {{.*}} i32 3, {{.*}} @"{{got.|__imp_}}$s23keypaths_external_other1GVyxqd__cSHRd__luipMV" +// CHECK: [[KP_U:@keypath(\..*)?]] = private global <{ {{.*}} }> <{ {{.*}} i32 3, {{.*}} @"{{got.|\\01__imp__?}}$s23keypaths_external_other1GVyxqd__cSHRd__luipMV" // CHECK-SAME: @"symbolic q_" // CHECK-SAME: @"symbolic x" // CHECK-SAME: @"get_witness_table diff --git a/test/IRGen/objc_super.swift b/test/IRGen/objc_super.swift index 645e75c53cc53..a2c1478ba1690 100644 --- a/test/IRGen/objc_super.swift +++ b/test/IRGen/objc_super.swift @@ -5,6 +5,8 @@ // REQUIRES: CPU=x86_64 // REQUIRES: objc_interop +// REQUIRES: rdar_72091795 + import gizmo // CHECK: [[CLASS:%objc_class]] = type @@ -136,5 +138,5 @@ class GenericRuncer : Gizmo { } // CHECK: define internal swiftcc void [[PARTIAL_FORWARDING_THUNK]](%swift.refcounted* swiftself %0) {{.*}} { -// CHECK: @"$ss12StaticStringV14withUTF8BufferyxxSRys5UInt8VGXElFxAFXEfU_" +// CHECK: @"$ss12StaticStringV14withUTF8BufferyxxSRys5UInt8VGXElFxAFXEfU_yt_Tgq5" // CHECK: } diff --git a/test/IRGen/pre_specialize.swift b/test/IRGen/pre_specialize.swift index 0f3979855bc64..303b9744314d7 100644 --- a/test/IRGen/pre_specialize.swift +++ b/test/IRGen/pre_specialize.swift @@ -1,34 +1,34 @@ // RUN: %empty-directory(%t) // Module A code generation. -// RUN: %target-swift-frontend -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-FRAG -// RUN: %target-swift-frontend -O -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-FRAG -// RUN: %target-swift-frontend -enable-library-evolution -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-RES -// RUN: %target-swift-frontend -O -enable-library-evolution -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-RES +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-FRAG +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-FRAG +// RUN: %target-swift-frontend -enable-experimental-prespecialization -enable-library-evolution -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-RES +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -enable-library-evolution -emit-ir -primary-file %S/Inputs/pre_specialize_module.swift -module-name A | %FileCheck %s -check-prefix=CHECK-A -check-prefix=CHECK-A-RES // Module B code generation with A.swiftmodule. // RUN: %empty-directory(%t) -// RUN: %target-build-swift -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -emit-library -o %t/%target-library-name(A) -// RUN: %target-swift-frontend -I %t -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B -// RUN: %target-swift-frontend -I %t -O -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B -// RUN: %target-build-swift -I %t -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA -// RUN: %target-build-swift -swift-version 5 -I %t -Xfrontend -validate-tbd-against-ir=all -enable-library-evolution -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -emit-library -o %t/%target-library-name(A) +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -O -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -I %t -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -swift-version 5 -I %t -Xfrontend -validate-tbd-against-ir=all -enable-library-evolution -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA // Module B code generation with A.swiftmodule with library evolution. // RUN: %empty-directory(%t) -// RUN: %target-build-swift -enable-library-evolution -Xfrontend -validate-tbd-against-ir=all -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -emit-library -o %t/%target-library-name(A) -// RUN: %target-swift-frontend -I %t -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B -// RUN: %target-swift-frontend -I %t -O -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B -// RUN: %target-build-swift -I %t -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA -// RUN: %target-build-swift -swift-version 5 -I %t -Xfrontend -validate-tbd-against-ir=all -enable-library-evolution -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -enable-library-evolution -Xfrontend -validate-tbd-against-ir=all -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -emit-library -o %t/%target-library-name(A) +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -O -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -I %t -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -swift-version 5 -I %t -Xfrontend -validate-tbd-against-ir=all -enable-library-evolution -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA // Module B code generation with A.swiftinterface with library evolution. // RUN: %empty-directory(%t) -// RUN: %target-build-swift -enable-library-evolution -Xfrontend -validate-tbd-against-ir=all -emit-module-interface-path %t/A.swiftinterface -module-name A %S/Inputs/pre_specialize_module.swift -emit-library -o %t/%target-library-name(A) -swift-version 5 -// RUN: %target-swift-frontend -I %t -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B -// RUN: %target-swift-frontend -I %t -O -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B -// RUN: %target-build-swift -I %t -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA -// RUN: %target-build-swift -swift-version 5 -I %t -Xfrontend -validate-tbd-against-ir=all -enable-library-evolution -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -enable-library-evolution -Xfrontend -validate-tbd-against-ir=all -emit-module-interface-path %t/A.swiftinterface -module-name A %S/Inputs/pre_specialize_module.swift -emit-library -o %t/%target-library-name(A) -swift-version 5 +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -O -emit-ir -primary-file %S/Inputs/pre_specialize_module_B.swift -module-name B | %FileCheck %s -check-prefix=CHECK-B +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -I %t -Xfrontend -validate-tbd-against-ir=missing -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -swift-version 5 -I %t -Xfrontend -validate-tbd-against-ir=all -enable-library-evolution -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -emit-library -o %t/%target-library-name(B) -L %t -lA // Module A tests // -------------- @@ -104,39 +104,39 @@ // Fragile .swiftmodule // RUN: %empty-directory(%t) -// RUN: %target-build-swift -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -// RUN: %target-build-swift -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -// RUN: %target-swift-frontend -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C // Fragile optimized .swiftmodule // RUN: %empty-directory(%t) -// RUN: %target-build-swift -O -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -// RUN: %target-build-swift -O -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -// RUN: %target-swift-frontend -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -O -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -O -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C // Resilient .swiftmodule // RUN: %empty-directory(%t) -// RUN: %target-build-swift -enable-library-evolution -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -// RUN: %target-build-swift -enable-library-evolution -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -// RUN: %target-swift-frontend -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -enable-library-evolution -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -enable-library-evolution -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C // Resilient optimized .swiftmodule // RUN: %empty-directory(%t) -// RUN: %target-build-swift -O -enable-library-evolution -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift -// RUN: %target-build-swift -O -enable-library-evolution -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift -// RUN: %target-swift-frontend -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -O -enable-library-evolution -emit-module -emit-module-path=%t/A.swiftmodule -module-name A %S/Inputs/pre_specialize_module.swift +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -O -enable-library-evolution -I %t -emit-module -emit-module-path=%t/B.swiftmodule -module-name B %S/Inputs/pre_specialize_module_B.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C // .swiftinterface // RUN: %empty-directory(%t) -// RUN: %target-build-swift -c -enable-library-evolution -emit-module-interface-path %t/A.swiftinterface -module-name A %S/Inputs/pre_specialize_module.swift -o %t/A.o -swift-version 5 -// RUN: %target-build-swift -c -enable-library-evolution -I %t -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -o %t/B.o -swift-version 5 -// RUN: %target-swift-frontend -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -c -enable-library-evolution -emit-module-interface-path %t/A.swiftinterface -module-name A %S/Inputs/pre_specialize_module.swift -o %t/A.o -swift-version 5 +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -c -enable-library-evolution -I %t -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -o %t/B.o -swift-version 5 +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C // Optimized .swiftinterface // RUN: %empty-directory(%t) -// RUN: %target-build-swift -O -c -enable-library-evolution -emit-module-interface-path %t/A.swiftinterface -module-name A %S/Inputs/pre_specialize_module.swift -o %t/A.o -swift-version 5 -// RUN: %target-build-swift -O -c -enable-library-evolution -I %t -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -o %t/B.o -swift-version 5 -// RUN: %target-swift-frontend -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -O -c -enable-library-evolution -emit-module-interface-path %t/A.swiftinterface -module-name A %S/Inputs/pre_specialize_module.swift -o %t/A.o -swift-version 5 +// RUN: %target-build-swift -Xfrontend -enable-experimental-prespecialization -O -c -enable-library-evolution -I %t -emit-module-interface-path %t/B.swiftinterface -module-name B %S/Inputs/pre_specialize_module_B.swift -o %t/B.o -swift-version 5 +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -I %t -emit-ir -primary-file %s -module-name C | %FileCheck %s -check-prefix=CHECK-C import A import B diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_distinct_generic_class.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_distinct_generic_class.swift index 1ed2abde1ffcc..10bcfb296a3f5 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_distinct_generic_class.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_distinct_generic_class.swift @@ -69,7 +69,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[0-9A-Z_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCyAA9Argument1ACLLCySiGAA9Argument2ACLLCySSGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_different_value.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_different_value.swift index 2f85269c2504d..22c9db89768d2 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_different_value.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_different_value.swift @@ -67,7 +67,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[0-9A-Z_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCyAA9Argument1ACLLCySiGAFySSGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_same_value.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_same_value.swift index 6babf415dd10d..6caae6a57c8ef 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_same_value.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-2argument-1_distinct_use-1st_argument_generic_class-2nd_argument_same_generic_class_same_value.swift @@ -67,7 +67,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[0-9A-Z_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCyAA9Argument1ACLLCySiGAGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_con_double.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_con_double.swift index ffcd6d796368c..ecfa120c180d7 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_con_double.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_con_double.swift @@ -126,7 +126,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[0-9A-Za-z_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CySSGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_subclass_arg.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_subclass_arg.swift index 3cf9fd7cd99a2..ad39316fba1eb 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_subclass_arg.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_con_int-2nd_anc_gen-1st-arg_subclass_arg.swift @@ -118,7 +118,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[0-9A-Za-z_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CySSGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subclass_arg-2nd_anc_gen-1st-arg_con_int.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subclass_arg-2nd_anc_gen-1st-arg_con_int.swift index d2b8879a8d834..9921b6e73967d 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subclass_arg-2nd_anc_gen-1st-arg_con_int.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subclass_arg-2nd_anc_gen-1st-arg_con_int.swift @@ -122,7 +122,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCySSGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subcls_arg-2nd_anc_gen-1st-arg_subcls_arg.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subcls_arg-2nd_anc_gen-1st-arg_subcls_arg.swift index 5e41fb587464e..a5e19d935e5ad 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subcls_arg-2nd_anc_gen-1st-arg_subcls_arg.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1arg-2ancs-1distinct_use-1st_anc_gen-1arg-1st_arg_subcls_arg-2nd_anc_gen-1st-arg_subcls_arg.swift @@ -114,7 +114,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCySiGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_constant_int.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_constant_int.swift index 1b3e85f0ba59a..c5891d4d412b4 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_constant_int.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_constant_int.swift @@ -112,7 +112,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP33_4D007063F2EFC1988130B7D42A21EE4C5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CySSGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_subclass_argument.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_subclass_argument.swift index 8e08ba643b58a..91ef8215d5dff 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_subclass_argument.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_subclass_argument.swift @@ -56,7 +56,7 @@ // : }*, // : i8*, // : i8* -// : }* @_DATA__TtC4mainP33_496329636AC05466637A72F247DC6ABC9Ancestor1 to i64 +// : }* @"_DATA_$s4main9Ancestor1[[UNIQUE_ID_1]]CySiGMf to i64 // : ), // : i64 2 // : ), @@ -184,7 +184,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP33_496329636AC05466637A72F247DC6ABC5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CySiGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_superclass.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_superclass.swift index 2bc5e3cecfedb..a9f01470e49c2 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_superclass.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_generic-1argument-1st_argument_superclass.swift @@ -108,7 +108,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCyAA9Ancestor1ACLLCySSGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-external-nonresilient.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-external-nonresilient.swift index 57d845df0081d..f4eb1b81f3736 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-external-nonresilient.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-external-nonresilient.swift @@ -81,7 +81,7 @@ import TestModule // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCySiGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-fileprivate.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-fileprivate.swift index 7670c9b354281..943ccfc4d571d 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-fileprivate.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1ancestor-1distinct_use-1st_ancestor_nongeneric-fileprivate.swift @@ -160,7 +160,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCySiGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use-1st_argument_generic_class-1argument.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use-1st_argument_generic_class-1argument.swift index 00dd9d1cbeca9..54a53156a11c8 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use-1st_argument_generic_class-1argument.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use-1st_argument_generic_class-1argument.swift @@ -53,7 +53,7 @@ // : }*, // : i8*, // : i8* -// : }* @_DATA__TtC4mainP33_7FA9B79F85D716E7DB33358C0057E87D9Argument1 to i64 +// : }* @"_DATA_$s4main9Argument1[[UNIQUE_ID_1]]CySiGMf" to i64 // : ), // : i64 2 // : ), @@ -148,7 +148,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP33_7FA9B79F85D716E7DB33358C0057E87D5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CyAA9Argument1ACLLCySiGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use.swift index 1cb12fd863f9f..c04d60b8b3cc4 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use.swift @@ -51,7 +51,7 @@ // CHECK-apple-SAME: { i32, i32, [1 x { [[INT]]*, i8*, i8*, i32, i32 }] }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[0-9A-Z_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CySiGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class.swift index 165bb084d3263..073698972f209 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class.swift @@ -73,7 +73,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCyAA3BoxACLLCySiGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class_specialized_at_generic_class.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class_specialized_at_generic_class.swift index 839ad5c975ca7..ec049d0df68af 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class_specialized_at_generic_class.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_class_specialized_at_generic_class.swift @@ -72,7 +72,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCyAA3BoxACLLCyAA5InnerACLLCySiGGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_enum.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_enum.swift index 79017124c9905..e8e553c86a3ab 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_enum.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_enum.swift @@ -71,7 +71,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[a-zA-Z0-9_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CyAA6EitherACLLOySiGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_struct.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_struct.swift index 6a5d625712422..ca71bd884d584 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_struct.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-1distinct_use_generic_struct.swift @@ -71,7 +71,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_2:[a-zA-Z0-9_]+]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]CyAA4LeftACLLVySiGGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-2ancestor-1du-1st_ancestor_generic-fileprivate-2nd_ancestor_nongeneric.swift b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-2ancestor-1du-1st_ancestor_generic-fileprivate-2nd_ancestor_nongeneric.swift index d80cfd56a7cf5..d2e63cb5a268a 100644 --- a/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-2ancestor-1du-1st_ancestor_generic-fileprivate-2nd_ancestor_nongeneric.swift +++ b/test/IRGen/prespecialized-metadata/class-fileprivate-inmodule-1argument-2ancestor-1du-1st_ancestor_generic-fileprivate-2nd_ancestor_nongeneric.swift @@ -119,7 +119,7 @@ // CHECK-apple-SAME: }*, // CHECK-apple-SAME: i8*, // CHECK-apple-SAME: i8* -// CHECK-apple-SAME: }* @_DATA__TtC4mainP[[UNIQUE_ID_1]]5Value to [[INT]] +// CHECK-apple-SAME: }* @"_DATA_$s4main5Value[[UNIQUE_ID_1]]LLCySiGMf" to [[INT]] // CHECK-apple-SAME: ), // CHECK-apple-SAME: [[INT]] 2 // CHECK-apple-SAME: ), diff --git a/test/IRGen/prespecialized-metadata/class-with-differently-mangled-method-list.swift b/test/IRGen/prespecialized-metadata/class-with-differently-mangled-method-list.swift new file mode 100644 index 0000000000000..2b7835494771f --- /dev/null +++ b/test/IRGen/prespecialized-metadata/class-with-differently-mangled-method-list.swift @@ -0,0 +1,27 @@ +// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -enable-objc-interop -enable-library-evolution -prespecialize-generic-metadata -target %module-target-future -emit-ir %s | %FileCheck %s -DINT=i%target-ptrsize -DALIGNMENT=%target-alignment --check-prefix=CHECK --check-prefix=CHECK-%target-vendor + +// REQUIRES: VENDOR=apple || OS=linux-gnu +// REQUIRES: objc_interop +// UNSUPPORTED: CPU=i386 && OS=ios +// UNSUPPORTED: CPU=armv7 && OS=ios +// UNSUPPORTED: CPU=armv7s && OS=ios + +import Foundation + +struct S{} +class Clazz { + @objc func foo() {} +} + + +@inline(never) +func consume(_ t: T) { + withExtendedLifetime(t) { t in } +} + +func doit() { + // CHECK: @"_INSTANCE_METHODS_$s4main5ClazzCyAA1SVGMf" = internal constant + consume(Clazz.self) +} + +doit() diff --git a/test/IRGen/property_descriptor.sil b/test/IRGen/property_descriptor.sil index df52659869d45..7da3ce2abacba 100644 --- a/test/IRGen/property_descriptor.sil +++ b/test/IRGen/property_descriptor.sil @@ -54,7 +54,7 @@ sil_property #ExternalGeneric.rw ( // CHECK: @"$s19property_descriptor15ExternalGenericV10computedROxvpMV" = // -- 0x0108_0000 - computed, readonly, has arguments, identified by indirect // CHECK-SAME: <{ , -// CHECK-SAME: @{{got.|__imp_}}id_computed +// CHECK-SAME: @{{got.|"\\01__imp__?}}id_computed // CHECK-SAME: [[GET_COMPUTEDRO:@keypath_get[.0-9]*]]{{(\.ptrauth)?}} // CHECK-SAME: [[GET_ARG_LAYOUT_COMPUTEDRO:@keypath_get_arg_layout[.0-9]*]]{{(\.ptrauth)?}} // -- default witness table @@ -68,7 +68,7 @@ sil_property #ExternalGeneric.computedRO ( // CHECK: @"$s19property_descriptor15ExternalGenericV10computedRWxvpMV" = // -- 0x01c8_0000 - computed, settable, mutating, has arguments, indirect id // CHECK-SAME: <{ , -// CHECK-SAME: @{{got.|__imp_}}id_computed +// CHECK-SAME: @{{got.|"\\01__imp__?}}id_computed // CHECK-SAME: [[GET_COMPUTEDRW:@keypath_get[.0-9]*]]{{(\.ptrauth)?}} // CHECK-SAME: [[SET_COMPUTEDRW:@keypath_set[.0-9]*]]{{(\.ptrauth)?}} // CHECK-SAME: [[GET_ARG_LAYOUT_COMPUTEDRW:@keypath_get_arg_layout[.0-9]*]]{{(\.ptrauth)?}} @@ -83,7 +83,7 @@ sil_property #ExternalGeneric.computedRW ( // CHECK: @"$s19property_descriptor15ExternalGenericVyxqd__cSHRd__luipMV" = // -- 0x01c8_0000 - computed, settable, mutating, has arguments, indirect id // CHECK-SAME: <{ , -// CHECK-SAME: @{{got.|__imp_}}id_computed +// CHECK-SAME: @{{got.|"\\01__imp__?}}id_computed // CHECK-SAME: [[GET_SUBSCRIPT:@keypath_get[.0-9]*]]{{(\.ptrauth)?}} // CHECK-SAME: [[SET_SUBSCRIPT:@keypath_set[.0-9]*]]{{(\.ptrauth)?}} // CHECK-SAME: [[GET_ARG_LAYOUT_SUBSCRIPT:@keypath_get_arg_layout[.0-9]*]]{{(\.ptrauth)?}} diff --git a/test/IRGen/protocol_conformance_records.swift b/test/IRGen/protocol_conformance_records.swift index fce8f2d76d559..bd6bee52c0b02 100644 --- a/test/IRGen/protocol_conformance_records.swift +++ b/test/IRGen/protocol_conformance_records.swift @@ -57,7 +57,7 @@ public struct NativeGenericType: Runcible { // -- protocol descriptor // CHECK-SAME: [[RUNCIBLE]] // -- type metadata -// CHECK-SAME: @"{{got.|__imp_}}$sSiMn" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$sSiMn" // -- witness table // CHECK-SAME: @"$sSi28protocol_conformance_records8RuncibleAAWP" // -- reserved @@ -73,7 +73,7 @@ extension Int: Runcible { // -- protocol descriptor // CHECK-SAME: [[RUNCIBLE]] // -- nominal type descriptor -// CHECK-SAME: @"{{got.|__imp_}}$s16resilient_struct4SizeVMn" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s16resilient_struct4SizeVMn" // -- witness table // CHECK-SAME: @"$s16resilient_struct4SizeV28protocol_conformance_records8RuncibleADWP" // -- reserved @@ -132,9 +132,9 @@ extension NativeGenericType : Spoon where T: Spoon { // Retroactive conformance // CHECK-LABEL: @"$sSi18resilient_protocol22OtherResilientProtocol0B20_conformance_recordsMc" ={{ dllexport | protected | }}constant // -- protocol descriptor -// CHECK-SAME: @"{{got.|__imp_}}$s18resilient_protocol22OtherResilientProtocolMp" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s18resilient_protocol22OtherResilientProtocolMp" // -- nominal type descriptor -// CHECK-SAME: @"{{got.|__imp_}}$sSiMn" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$sSiMn" // -- witness table pattern // CHECK-SAME: i32 0, // -- flags diff --git a/test/IRGen/protocol_resilience.sil b/test/IRGen/protocol_resilience.sil index ca055c97834fd..60a1b3228c181 100644 --- a/test/IRGen/protocol_resilience.sil +++ b/test/IRGen/protocol_resilience.sil @@ -116,13 +116,13 @@ protocol InternalProtocol { // CHECK-SAME: i32 3, // -- type metadata for associated type -// CHECK-SAME: @"{{got.|__imp_}}$s1T18resilient_protocol24ProtocolWithRequirementsPTl" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s1T18resilient_protocol24ProtocolWithRequirementsPTl" // CHECK-SAME: @"symbolic Si" -// CHECK-SAME: @"{{got.|__imp_}}$s18resilient_protocol24ProtocolWithRequirementsP5firstyyFTq" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s18resilient_protocol24ProtocolWithRequirementsP5firstyyFTq" // CHECK-SAME: @firstWitness -// CHECK-SAME: @"{{got.|__imp_}}$s18resilient_protocol24ProtocolWithRequirementsP6secondyyFTq" +// CHECK-SAME: @"{{got.|\\01__imp__?}}$s18resilient_protocol24ProtocolWithRequirementsP6secondyyFTq" // CHECK-SAME: @secondWitness // -- number of witness table entries diff --git a/test/IRGen/protocol_resilience_descriptors.swift b/test/IRGen/protocol_resilience_descriptors.swift index 69b7bbfcc92a5..5f03cd53755bb 100644 --- a/test/IRGen/protocol_resilience_descriptors.swift +++ b/test/IRGen/protocol_resilience_descriptors.swift @@ -46,7 +46,7 @@ import resilient_protocol // Resilient witness tables // ---------------------------------------------------------------------------- // CHECK-USAGE-LABEL: $s31protocol_resilience_descriptors34ConformsToProtocolWithRequirementsVyxG010resilient_A00fgH0AAMc" = -// CHECK-USAGE-SAME: {{got.|__imp_}}$s1T18resilient_protocol24ProtocolWithRequirementsPTl +// CHECK-USAGE-SAME: {{got.|\\01__imp__?}}$s1T18resilient_protocol24ProtocolWithRequirementsPTl // CHECK-USAGE-SAME: @"symbolic x" public struct ConformsToProtocolWithRequirements : ProtocolWithRequirements { diff --git a/test/IRGen/ptrauth-partial-apply.sil b/test/IRGen/ptrauth-partial-apply.sil index 52e6a651aa0bb..ed2c59eecf25f 100644 --- a/test/IRGen/ptrauth-partial-apply.sil +++ b/test/IRGen/ptrauth-partial-apply.sil @@ -30,9 +30,9 @@ bb0(%0 : $@convention(thin) (Builtin.Int32, Builtin.Int32) -> (), %1 : $Builtin. // CHECK: [[T0:%.*]] = bitcast %swift.refcounted* %0 to [[CTXT_TY:<{ %swift.refcounted, i32, i32, i8\* }>]]* // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[CTXT_TY]], [[CTXT_TY]]* [[T0]], i32 0, i32 3 // CHECK: [[T0:%.*]] = load i8*, i8** [[SLOT]], align 8 +// CHECK: [[T1:%.*]] = ptrtoint i8** [[SLOT]] to i64 +// CHECK: [[DISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T1]], i64 7185) // CHECK: [[FN:%.*]] = bitcast i8* [[T0]] to void (i32, i32)* -// CHECK: [[T0:%.*]] = ptrtoint i8** [[SLOT]] to i64 -// CHECK: [[DISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 7185) // CHECK: call swiftcc void [[FN]](i32 {{.*}}, i32 {{.*}}) [ "ptrauth"(i32 1, i64 [[DISC]]) ] sil @test_thick_indirect : $@convention(thin) (@callee_owned (Builtin.Int32, Builtin.Int32) -> (), Builtin.Int32) -> @owned @callee_owned () -> () { @@ -58,7 +58,7 @@ bb0(%0 : $@callee_owned (Builtin.Int32, Builtin.Int32) -> (), %1 : $Builtin.Int3 // CHECK: [[T0:%.*]] = bitcast %swift.refcounted* %0 to <{ %swift.refcounted, i32, i32, %swift.refcounted*, i8* }>* // CHECK: [[SLOT:%.*]] = getelementptr inbounds <{ %swift.refcounted, i32, i32, %swift.refcounted*, i8* }>, <{ %swift.refcounted, i32, i32, %swift.refcounted*, i8* }>* [[T0]], i32 0, i32 4 // CHECK: [[T0:%.*]] = load i8*, i8** [[SLOT]], align 8 +// CHECK: [[T1:%.*]] = ptrtoint i8** [[SLOT]] to i64 +// CHECK: [[DISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T1]], i64 7185) // CHECK: [[FN:%.*]] = bitcast i8* [[T0]] to void (i32, i32, %swift.refcounted*)* -// CHECK: [[T0:%.*]] = ptrtoint i8** [[SLOT]] to i64 -// CHECK: [[DISC:%.*]] = call i64 @llvm.ptrauth.blend.i64(i64 [[T0]], i64 7185) // CHECK: call swiftcc void [[FN]](i32 {{.*}}, i32 {{.*}}, %swift.refcounted* {{.*}}) [ "ptrauth"(i32 1, i64 [[DISC]]) ] diff --git a/test/IRGen/unmanaged_objc_throw_func.swift b/test/IRGen/unmanaged_objc_throw_func.swift index 43dd9c2bce925..c8ad72381d033 100644 --- a/test/IRGen/unmanaged_objc_throw_func.swift +++ b/test/IRGen/unmanaged_objc_throw_func.swift @@ -1,5 +1,6 @@ // RUN: %target-swift-frontend -emit-ir %s | %FileCheck %s // REQUIRES: objc_interop +// REQUIRES: optimized_stdlib import Foundation @@ -67,5 +68,5 @@ import Foundation // CHECK-NEXT: %[[T12:.+]] = phi i{{32|64}} [ 0, %[[L7]] ], [ %[[T5]], %[[L2]] ] // CHECK-NEXT: %[[T13:.+]] = bitcast %T25unmanaged_objc_throw_func9SR_9035_CC* %{{.+}} to i8* // CHECK-NEXT: call void @llvm.objc.release(i8* %[[T13]]) -// CHECK-NEXT: %[[T14:.+]] = inttoptr i{{32|64}} %[[T12]] to %struct.__CFArray** -// CHECK-NEXT: ret %struct.__CFArray** %[[T14]] +// CHECK-NEXT: %[[T14:.+]] = inttoptr i{{32|64}} %[[T12]] to %struct.__CFArray* +// CHECK-NEXT: ret %struct.__CFArray* %[[T14]] diff --git a/test/IRGen/vtable_non_overridden.sil b/test/IRGen/vtable_non_overridden.sil index 39cc2dac4fd5a..617e6dbc6ea38 100644 --- a/test/IRGen/vtable_non_overridden.sil +++ b/test/IRGen/vtable_non_overridden.sil @@ -25,8 +25,8 @@ sil_vtable InternalA { } // -- we should still generate method descriptors for the elided methods -// CHECK-LABEL: @"$s21vtable_non_overridden9InternalAC3fooyyFTq" = -// CHECK-LABEL: @"$s21vtable_non_overridden9InternalAC3basyyFTq" = +// CHECK-LABEL: @"$s21vtable_non_overridden9InternalAC3fooyyFTq" = hidden constant %swift.method_descriptor {{.*}} section "{{(__TEXT,__const|\.rodata|\.rdata)}}" +// CHECK-LABEL: @"$s21vtable_non_overridden9InternalAC3basyyFTq" = hidden constant %swift.method_descriptor {{.*}} section "{{(__TEXT,__const|\.rodata|\.rdata)}}" // -- only overridden entries in internal method descriptor table // CHECK-LABEL: @"$s21vtable_non_overridden9InternalACMn" = diff --git a/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h b/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h index b9fe4c206a612..316717c1206ff 100644 --- a/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h +++ b/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h @@ -8,12 +8,19 @@ -(void)allOperationsWithCompletionHandler:(void (^)(NSArray *))completion; @end +typedef void (^CompletionHandler)(NSString * _Nullable, NSString * _Nullable_result, NSError * _Nullable); + @interface SlowServer : NSObject -(void)doSomethingSlow:(NSString *)operation completionHandler:(void (^)(NSInteger))handler; -(void)doSomethingDangerous:(NSString *)operation completionHandler:(void (^ _Nullable)(NSString *_Nullable, NSError * _Nullable))handler; -(void)checkAvailabilityWithCompletionHandler:(void (^)(BOOL isAvailable))completionHandler; +-(void)anotherExampleWithCompletionBlock:(void (^)(NSString *))block; +-(void)finalExampleWithReplyTo:(void (^)(NSString *))block; +-(void)replyingOperation:(NSString *)operation replyTo:(void (^)(NSString *))block; -(void)findAnswerAsynchronously:(void (^)(NSString *_Nullable, NSError * _Nullable))handler __attribute__((swift_name("findAnswer(completionHandler:)"))); -(BOOL)findAnswerFailinglyWithError:(NSError * _Nullable * _Nullable)error completion:(void (^)(NSString *_Nullable, NSError * _Nullable))handler __attribute__((swift_name("findAnswerFailingly(completionHandler:)"))); +-(void)findQAndAWithCompletionHandler:(void (^)(NSString *_Nullable_result, NSString *_Nullable answer, NSError * _Nullable))handler; +-(void)findQuestionableAnswersWithCompletionHandler:(CompletionHandler)handler; -(void)doSomethingFun:(NSString *)operation then:(void (^)(void))completionHandler; -(void)getFortuneAsynchronouslyWithCompletionHandler:(void (^)(NSString *_Nullable, NSError * _Nullable))handler; -(void)getMagicNumberAsynchronouslyWithSeed:(NSInteger)seed completionHandler:(void (^)(NSInteger, NSError * _Nullable))handler; @@ -27,6 +34,11 @@ -(NSInteger)doSomethingConflicted:(NSString *)operation; -(void)server:(NSString *)name restartWithCompletionHandler:(void (^)(void))block; -(void)server:(NSString *)name atPriority:(double)priority restartWithCompletionHandler:(void (^)(void))block; + +-(void)poorlyNamed:(NSString *)operation completionHandler:(void (^)(NSInteger))handler __attribute__((swift_async_name("bestName(_:)"))); + +-(void)customizedWithString:(NSString *)operation completionHandler:(void (^)(NSInteger))handler __attribute__((swift_name("customize(with:completionHandler:)"))) __attribute__((swift_async_name("customize(_:)"))); + @end @protocol RefrigeratorDelegate diff --git a/test/Interop/C/implementation-only-imports/Inputs/module.modulemap b/test/Interop/C/implementation-only-imports/Inputs/module.modulemap index dd845b25d568c..cd5b5d67c7df1 100644 --- a/test/Interop/C/implementation-only-imports/Inputs/module.modulemap +++ b/test/Interop/C/implementation-only-imports/Inputs/module.modulemap @@ -1,9 +1,9 @@ module UserA { - header "user_a.h" + header "user-a.h" export * } module UserB { - header "user_b.h" + header "user-b.h" export * } diff --git a/test/Interop/C/implementation-only-imports/Inputs/use-module-a.swift b/test/Interop/C/implementation-only-imports/Inputs/use-module-a.swift new file mode 100644 index 0000000000000..f806673847548 --- /dev/null +++ b/test/Interop/C/implementation-only-imports/Inputs/use-module-a.swift @@ -0,0 +1 @@ +@_exported import UserA diff --git a/test/Interop/C/implementation-only-imports/Inputs/use-module-b.swift b/test/Interop/C/implementation-only-imports/Inputs/use-module-b.swift new file mode 100644 index 0000000000000..7708e676ab393 --- /dev/null +++ b/test/Interop/C/implementation-only-imports/Inputs/use-module-b.swift @@ -0,0 +1 @@ +@_exported import UserB diff --git a/test/Interop/C/implementation-only-imports/Inputs/user-a.h b/test/Interop/C/implementation-only-imports/Inputs/user-a.h new file mode 100644 index 0000000000000..4f6d821f9b7fb --- /dev/null +++ b/test/Interop/C/implementation-only-imports/Inputs/user-a.h @@ -0,0 +1,6 @@ +#ifndef TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_A_H +#define TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_A_H + +#include "helper.h" + +#endif // TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_A_H diff --git a/test/Interop/C/implementation-only-imports/Inputs/user-b.h b/test/Interop/C/implementation-only-imports/Inputs/user-b.h new file mode 100644 index 0000000000000..7b7dbc7a6fd1e --- /dev/null +++ b/test/Interop/C/implementation-only-imports/Inputs/user-b.h @@ -0,0 +1,6 @@ +#ifndef TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_B_H +#define TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_B_H + +#include "helper.h" + +#endif // TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_B_H diff --git a/test/Interop/C/implementation-only-imports/Inputs/user_a.h b/test/Interop/C/implementation-only-imports/Inputs/user_a.h deleted file mode 100644 index bebbd757c5561..0000000000000 --- a/test/Interop/C/implementation-only-imports/Inputs/user_a.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USERA_H -#define TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USERA_H - -#include "helper.h" - -#endif // TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USERA_H diff --git a/test/Interop/C/implementation-only-imports/Inputs/user_b.h b/test/Interop/C/implementation-only-imports/Inputs/user_b.h deleted file mode 100644 index 2beac678bf1d2..0000000000000 --- a/test/Interop/C/implementation-only-imports/Inputs/user_b.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USERB_H -#define TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USERB_H - -#include "helper.h" - -#endif // TEST_INTEROP_C_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USERB_H diff --git a/test/Interop/C/implementation-only-imports/check-function-transitive-visibility-inversed.swift b/test/Interop/C/implementation-only-imports/check-function-transitive-visibility-inversed.swift new file mode 100644 index 0000000000000..6746f49d7a49b --- /dev/null +++ b/test/Interop/C/implementation-only-imports/check-function-transitive-visibility-inversed.swift @@ -0,0 +1,21 @@ +// RUN: %empty-directory(%t) +// RUN: mkdir %t/use_module_a %t/use_module_b +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_a/UseModuleA.swiftmodule %S/Inputs/use-module-a.swift -I %S/Inputs +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_b/UseModuleB.swiftmodule %S/Inputs/use-module-b.swift -I %S/Inputs + +// RUN: %target-swift-frontend -typecheck -swift-version 5 -I %t/use_module_a -I %t/use_module_b -I %S/Inputs %s + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-transitive-visibility.swift` +// ensures that Swift looks into the transitive visible modules as well +// when looking for the `getFortyTwo()` decl. + +import UseModuleA +@_implementationOnly import UseModuleB + +@inlinable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/C/implementation-only-imports/check-function-transitive-visibility.swift b/test/Interop/C/implementation-only-imports/check-function-transitive-visibility.swift new file mode 100644 index 0000000000000..35515bfbcde54 --- /dev/null +++ b/test/Interop/C/implementation-only-imports/check-function-transitive-visibility.swift @@ -0,0 +1,21 @@ +// RUN: %empty-directory(%t) +// RUN: mkdir %t/use_module_a %t/use_module_b +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_a/UseModuleA.swiftmodule %S/Inputs/use-module-a.swift -I %S/Inputs +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_b/UseModuleB.swiftmodule %S/Inputs/use-module-b.swift -I %S/Inputs + +// RUN: %target-swift-frontend -typecheck -swift-version 5 -I %t/use_module_a -I %t/use_module_b -I %S/Inputs %s + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-transitive-visibility-inversed.swift` +// ensures that Swift looks into the transitive visible modules as well +// when looking for the `getFortyTwo()` decl. + +import UseModuleA +@_implementationOnly import UseModuleB + +@inlinable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/C/implementation-only-imports/check-function-visibility-inversed.swift b/test/Interop/C/implementation-only-imports/check-function-visibility-inversed.swift new file mode 100644 index 0000000000000..974349324ea83 --- /dev/null +++ b/test/Interop/C/implementation-only-imports/check-function-visibility-inversed.swift @@ -0,0 +1,17 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs %s + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-visibility.swift` +// checks that the `getFortyTwo` decl can be found when at least one of the +// modules is not `@_implementationOnly`. + +import UserA +@_implementationOnly import UserB + +@_inlineable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/C/implementation-only-imports/check-function-visibility.swift b/test/Interop/C/implementation-only-imports/check-function-visibility.swift new file mode 100644 index 0000000000000..b427725e1e155 --- /dev/null +++ b/test/Interop/C/implementation-only-imports/check-function-visibility.swift @@ -0,0 +1,17 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs %s + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-visibility-inversed.swift` +// checks that the `getFortyTwo()` decl can be found when at least one of the +// modules is not `@_implementationOnly`. + +@_implementationOnly import UserA +import UserB + +@_inlineable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/C/implementation-only-imports/prefer-a-visible-symbol-over-implementation-only-ones.swift b/test/Interop/C/implementation-only-imports/prefer-a-visible-symbol-over-implementation-only-ones.swift deleted file mode 100644 index a7a04c655547c..0000000000000 --- a/test/Interop/C/implementation-only-imports/prefer-a-visible-symbol-over-implementation-only-ones.swift +++ /dev/null @@ -1,27 +0,0 @@ -// RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs %s - -// REQUIRES: SR-13785 - -// TODO: Fix @_implementationOnly to consider all symbol sources - -// If a symbol comes from two modules, one of which is marked as -// @_implementationOnly, Swift may choose the @_implementationOnly source -// and then error out due to the symbol being hidden. - -// Swift should consider all sources for the symbol and recognize that the -// symbol is not hidden behind @_implementationOnly in all modules. - -// E.g: -// In this test case, UserA and UserB both textually include `helper.h`, -// therefore both export `getFortyTwo()`. -// This test verifies that even though Swift chooses UserA.getFortyTwo(), we -// shouldn't get an error, because the symbol is also exported from UserB. - -@_implementationOnly import UserA -import UserB - -@_inlineable -public func callFortyTwo() -> CInt { - return getFortyTwo() -} diff --git a/test/Interop/C/modules/print-qualified-clang-types/Inputs/module.modulemap b/test/Interop/C/modules/print-qualified-clang-types/Inputs/module.modulemap index bc5f5f4f50bab..6e3bd2f43ad45 100644 --- a/test/Interop/C/modules/print-qualified-clang-types/Inputs/module.modulemap +++ b/test/Interop/C/modules/print-qualified-clang-types/Inputs/module.modulemap @@ -1,13 +1,7 @@ module ForeignA { - // Nest the header in a sub-module to make sure these are handled correctly. - module Sub { - header "foreign-a.h" - } + header "foreign-a.h" } module ForeignB { - // Nest the header in a sub-module to make sure these are handled correctly. - module Sub { - header "foreign-b.h" - } + header "foreign-b.h" } diff --git a/test/Interop/C/modules/print-qualified-clang-types/Inputs/textual-header.h b/test/Interop/C/modules/print-qualified-clang-types/Inputs/textual-header.h index a53012248d706..e99c9349c57a9 100644 --- a/test/Interop/C/modules/print-qualified-clang-types/Inputs/textual-header.h +++ b/test/Interop/C/modules/print-qualified-clang-types/Inputs/textual-header.h @@ -1 +1 @@ -typedef struct {} ForeignStruct; +struct ForeignStruct {}; diff --git a/test/Interop/C/modules/print-qualified-clang-types/print-qualified-clang-types.swift b/test/Interop/C/modules/print-qualified-clang-types/print-qualified-clang-types.swift index 13a78745481e8..a14102af9b264 100644 --- a/test/Interop/C/modules/print-qualified-clang-types/print-qualified-clang-types.swift +++ b/test/Interop/C/modules/print-qualified-clang-types/print-qualified-clang-types.swift @@ -34,3 +34,5 @@ // RUN: %target-swift-frontend -typecheck -swift-version 5 %t/main_module/MainModule.swiftinterface -I %t/helper_module -I %S/Inputs // CHECK: public func funcTakingForeignStruct(_ param: ForeignB.ForeignStruct) + +// REQUIRES: SR-13032 diff --git a/test/Interop/Cxx/class/constructors-irgen.swift b/test/Interop/Cxx/class/constructors-irgen.swift index 7e83794fc5e4e..6c13f75cee119 100644 --- a/test/Interop/Cxx/class/constructors-irgen.swift +++ b/test/Interop/Cxx/class/constructors-irgen.swift @@ -1,7 +1,7 @@ // Target-specific tests for C++ constructor call code generation. // RUN: %swift -module-name Swift -target x86_64-apple-macosx10.9 -dump-clang-diagnostics -I %S/Inputs -enable-cxx-interop -emit-ir %s -parse-stdlib -parse-as-library -disable-legacy-type-info | %FileCheck %s -check-prefix=ITANIUM_X64 -// RUN: %swift -module-name Swift -target armv7-none-linux-androideabi -dump-clang-diagnostics -I %S/Inputs -enable-cxx-interop -emit-ir %s -parse-stdlib -parse-as-library -disable-legacy-type-info | %FileCheck %s -check-prefix=ITANIUM_ARM +// RUN: %swift -module-name Swift -target armv7-unknown-linux-androideabi -dump-clang-diagnostics -I %S/Inputs -enable-cxx-interop -emit-ir %s -parse-stdlib -parse-as-library -disable-legacy-type-info | %FileCheck %s -check-prefix=ITANIUM_ARM // RUN: %swift -module-name Swift -target x86_64-unknown-windows-msvc -dump-clang-diagnostics -I %S/Inputs -enable-cxx-interop -emit-ir %s -parse-stdlib -parse-as-library -disable-legacy-type-info | %FileCheck %s -check-prefix=MICROSOFT_X64 import Constructors diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/decl-a.h b/test/Interop/Cxx/implementation-only-imports/Inputs/decl-a.h new file mode 100644 index 0000000000000..72998faab7e62 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/decl-a.h @@ -0,0 +1,6 @@ +#ifndef TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_DECL_A_H +#define TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_DECL_A_H + +inline int getFortySomething() { return 42; }; + +#endif // TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_DECL_A_H diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/decl-b.h b/test/Interop/Cxx/implementation-only-imports/Inputs/decl-b.h new file mode 100644 index 0000000000000..35839a51051af --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/decl-b.h @@ -0,0 +1,6 @@ +#ifndef TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_DECL_B_H +#define TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_DECL_B_H + +inline int getFortySomething() { return 46; }; + +#endif // TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_DECL_B_H diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/helper.h b/test/Interop/Cxx/implementation-only-imports/Inputs/helper.h new file mode 100644 index 0000000000000..5c4d405bb5d61 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/helper.h @@ -0,0 +1,20 @@ +#ifndef TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_HELPER_H +#define TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_HELPER_H + +inline int getFortyTwo() { return 42; } + +class MagicWrapper { +public: + int _number; + MagicWrapper(){_number = 2;}; + MagicWrapper(int number) : _number(number){}; + MagicWrapper operator - (MagicWrapper other) { + return MagicWrapper{_number - other._number}; + } +}; + +inline MagicWrapper operator + (MagicWrapper lhs, MagicWrapper rhs) { + return MagicWrapper{lhs._number + rhs._number}; +} + +#endif // TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_HELPER_H diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/module.modulemap b/test/Interop/Cxx/implementation-only-imports/Inputs/module.modulemap new file mode 100644 index 0000000000000..b3d9edc456782 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/module.modulemap @@ -0,0 +1,24 @@ +module UserA { + header "user-a.h" + export * +} + +module UserB { + header "user-b.h" + export * +} + +module UserC { + header "user-c.h" + export * +} + +module DeclA { + header "decl-a.h" + export * +} + +module DeclB { + header "decl-b.h" + export * +} diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/use-module-a.swift b/test/Interop/Cxx/implementation-only-imports/Inputs/use-module-a.swift new file mode 100644 index 0000000000000..f806673847548 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/use-module-a.swift @@ -0,0 +1 @@ +@_exported import UserA diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/use-module-b.swift b/test/Interop/Cxx/implementation-only-imports/Inputs/use-module-b.swift new file mode 100644 index 0000000000000..7708e676ab393 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/use-module-b.swift @@ -0,0 +1 @@ +@_exported import UserB diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/user-a.h b/test/Interop/Cxx/implementation-only-imports/Inputs/user-a.h new file mode 100644 index 0000000000000..fb7daccebef6d --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/user-a.h @@ -0,0 +1,6 @@ +#ifndef TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_A_H +#define TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_A_H + +#include "helper.h" + +#endif // TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_A_H diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/user-b.h b/test/Interop/Cxx/implementation-only-imports/Inputs/user-b.h new file mode 100644 index 0000000000000..1daeacb030ee3 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/user-b.h @@ -0,0 +1,6 @@ +#ifndef TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_B_H +#define TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_B_H + +#include "helper.h" + +#endif // TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_B_H diff --git a/test/Interop/Cxx/implementation-only-imports/Inputs/user-c.h b/test/Interop/Cxx/implementation-only-imports/Inputs/user-c.h new file mode 100644 index 0000000000000..db273238536a2 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/Inputs/user-c.h @@ -0,0 +1,6 @@ +#ifndef TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_C_H +#define TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_C_H + +class MagicWrapper; + +#endif // TEST_INTEROP_CXX_IMPLEMENTATION_ONLY_IMPORTS_INPUTS_USER_C_H diff --git a/test/Interop/Cxx/implementation-only-imports/check-constructor-visibility-inversed.swift b/test/Interop/Cxx/implementation-only-imports/check-constructor-visibility-inversed.swift new file mode 100644 index 0000000000000..40ed6da45458f --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-constructor-visibility-inversed.swift @@ -0,0 +1,17 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs %s -enable-cxx-interop + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-constructor-visibility.swift` checks +// that the constructor decl can be found when at least one of the +// modules is not `@_implementationOnly`. + +@_implementationOnly import UserA +import UserB + +@_inlineable +public func createAWrapper() { + let _ = MagicWrapper() +} diff --git a/test/Interop/Cxx/implementation-only-imports/check-constructor-visibility.swift b/test/Interop/Cxx/implementation-only-imports/check-constructor-visibility.swift new file mode 100644 index 0000000000000..e2fe2fe0ee3c2 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-constructor-visibility.swift @@ -0,0 +1,17 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs %s -enable-cxx-interop + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-constructor-visibility-inversed.swift` checks +// that the constructor decl can be found when at least one of the +// modules is not `@_implementationOnly`. + +import UserA +@_implementationOnly import UserB + +@_inlineable +public func createAWrapper() { + let _ = MagicWrapper() +} diff --git a/test/Interop/Cxx/implementation-only-imports/check-decls-are-identical.swift b/test/Interop/Cxx/implementation-only-imports/check-decls-are-identical.swift new file mode 100644 index 0000000000000..d43ce85a1c15b --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-decls-are-identical.swift @@ -0,0 +1,15 @@ +// RUN: %empty-directory(%t) +// RUN: not %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs -enable-cxx-interop %s 2>&1 | %FileCheck %s + +// This test checks that Swift recognizes that the DeclA and DeclB provide +// different implementations for `getFortySomething()` + +@_implementationOnly import DeclA +import DeclB + +@_inlineable +public func callFortySomething() -> CInt { + return getFortySomething() +} + +// CHECK: 'getFortySomething' has different definitions in different modules diff --git a/test/Interop/Cxx/implementation-only-imports/check-function-transitive-visibility-inversed.swift b/test/Interop/Cxx/implementation-only-imports/check-function-transitive-visibility-inversed.swift new file mode 100644 index 0000000000000..77def9dd320ae --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-function-transitive-visibility-inversed.swift @@ -0,0 +1,21 @@ +// RUN: %empty-directory(%t) +// RUN: mkdir %t/use_module_a %t/use_module_b +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_a/UseModuleA.swiftmodule %S/Inputs/use-module-a.swift -I %S/Inputs -enable-cxx-interop +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_b/UseModuleB.swiftmodule %S/Inputs/use-module-b.swift -I %S/Inputs -enable-cxx-interop + +// RUN: %target-swift-frontend -typecheck -swift-version 5 -I %t/use_module_a -I %t/use_module_b -I %S/Inputs -enable-cxx-interop %s + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-transitive-visibility.swift` +// ensures that Swift looks into the transitive visible modules as well +// when looking for the `getFortyTwo()` decl. + +@_implementationOnly import UseModuleA +import UseModuleB + +@inlinable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/Cxx/implementation-only-imports/check-function-transitive-visibility.swift b/test/Interop/Cxx/implementation-only-imports/check-function-transitive-visibility.swift new file mode 100644 index 0000000000000..efb75d73401d2 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-function-transitive-visibility.swift @@ -0,0 +1,22 @@ +// RUN: %empty-directory(%t) +// RUN: mkdir %t/use_module_a %t/use_module_b +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_a/UseModuleA.swiftmodule %S/Inputs/use-module-a.swift -I %S/Inputs -enable-cxx-interop +// RUN: %target-swift-frontend -enable-library-evolution -swift-version 5 -emit-module -o %t/use_module_b/UseModuleB.swiftmodule %S/Inputs/use-module-b.swift -I %S/Inputs -enable-cxx-interop + +// RUN: %target-swift-frontend -typecheck -swift-version 5 -I %t/use_module_a -I %t/use_module_b -I %S/Inputs -enable-cxx-interop %s + + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-transitive-visibility-inversed.swift` +// ensures that Swift looks into the transitive visible modules as well +// when looking for the `getFortyTwo()` decl. + +import UseModuleA +@_implementationOnly import UseModuleB + +@inlinable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/Cxx/implementation-only-imports/check-function-visibility-inversed.swift b/test/Interop/Cxx/implementation-only-imports/check-function-visibility-inversed.swift new file mode 100644 index 0000000000000..0f24a16bf8567 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-function-visibility-inversed.swift @@ -0,0 +1,17 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs -enable-cxx-interop %s + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-visibility.swift` +// checks that the `getFortyTwo()` decl can be found when at least one of the +// modules is not `@_implementationOnly`. + +import UserA +@_implementationOnly import UserB + +@_inlineable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/Cxx/implementation-only-imports/check-function-visibility.swift b/test/Interop/Cxx/implementation-only-imports/check-function-visibility.swift new file mode 100644 index 0000000000000..2b3d6989851d0 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-function-visibility.swift @@ -0,0 +1,17 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs -enable-cxx-interop %s + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-function-visibility-inversed.swift` +// checks that the `getFortyTwo()` decl can be found when at least one of the +// modules is not `@_implementationOnly`. + +@_implementationOnly import UserA +import UserB + +@_inlineable +public func callFortyTwo() -> CInt { + return getFortyTwo() +} diff --git a/test/Interop/Cxx/implementation-only-imports/check-operator-visibility-inversed.swift b/test/Interop/Cxx/implementation-only-imports/check-operator-visibility-inversed.swift new file mode 100644 index 0000000000000..bec834521a146 --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-operator-visibility-inversed.swift @@ -0,0 +1,29 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs %s -enable-cxx-interop + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-operator-visibility.swift` checks +// that the operator decl can be found when at least one of the +// modules is not `@_implementationOnly`. + + +import UserA +@_implementationOnly import UserB + +// Operator `+` is a non-member function. +@_inlineable +public func addWrappers() { + let wrapperA = MagicWrapper() + let wrapperB = MagicWrapper() + let _ = wrapperA + wrapperB +} + +// Operator `-` is a member function. +@_inlineable +public func subtractWrappers() { + var wrapperA = MagicWrapper() + let wrapperB = MagicWrapper() + let _ = wrapperA - wrapperB +} diff --git a/test/Interop/Cxx/implementation-only-imports/check-operator-visibility.swift b/test/Interop/Cxx/implementation-only-imports/check-operator-visibility.swift new file mode 100644 index 0000000000000..40ea146ce442e --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/check-operator-visibility.swift @@ -0,0 +1,28 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs %s -enable-cxx-interop + +// Swift should consider all sources for a decl and recognize that the +// decl is not hidden behind @_implementationOnly in all modules. + +// This test, as well as `check-operator-visibility-inversed.swift` checks +// that the operator decl can be found when at least one of the +// modules is not `@_implementationOnly`. + +@_implementationOnly import UserA +import UserB + +// Operator `+` is a non-member function. +@_inlineable +public func addWrappers() { + let wrapperA = MagicWrapper() + let wrapperB = MagicWrapper() + let _ = wrapperA + wrapperB +} + +// Operator `-` is a member function. +@_inlineable +public func subtractWrappers() { + var wrapperA = MagicWrapper() + let wrapperB = MagicWrapper() + let _ = wrapperA - wrapperB +} diff --git a/test/Interop/Cxx/implementation-only-imports/skip-forward-declarations.swift b/test/Interop/Cxx/implementation-only-imports/skip-forward-declarations.swift new file mode 100644 index 0000000000000..5655581e3df2c --- /dev/null +++ b/test/Interop/Cxx/implementation-only-imports/skip-forward-declarations.swift @@ -0,0 +1,15 @@ +// RUN: %empty-directory(%t) +// RUN: not %target-swift-frontend -emit-module -o %t/FortyTwo.swiftmodule -I %S/Inputs -enable-cxx-interop %s 2>&1 | %FileCheck %s + +// This test checks that forward declarations are not considered +// when determining the visibility of the decl. + +@_implementationOnly import UserA +import UserC + +@_inlineable +public func createAWrapper() { + let _ = MagicWrapper() +} + +// CHECK: struct 'MagicWrapper' cannot be used in an '@inlinable' function because 'UserA' was imported implementation-only diff --git a/test/Interop/Cxx/templates/Inputs/class-template-in-namespace.h b/test/Interop/Cxx/templates/Inputs/class-template-in-namespace.h new file mode 100644 index 0000000000000..f479597a846ed --- /dev/null +++ b/test/Interop/Cxx/templates/Inputs/class-template-in-namespace.h @@ -0,0 +1,8 @@ +namespace Space { + +template struct Ship; +template struct Ship {}; + +using Orbiter = Ship; + +} // namespace Space diff --git a/test/Interop/Cxx/templates/Inputs/function-templates.h b/test/Interop/Cxx/templates/Inputs/function-templates.h index 8b3dfb52f3e42..97574281ddb8b 100644 --- a/test/Interop/Cxx/templates/Inputs/function-templates.h +++ b/test/Interop/Cxx/templates/Inputs/function-templates.h @@ -21,6 +21,11 @@ template R returns_template(T a, U b) { // Same here: template void cannot_infer_template() {} +struct HasVariadicMemeber { + void test1(...) {} + void test2(int, ...) {} +}; + // TODO: We should support these types. Until then, make sure we don't crash when importing. template void testPackExpansion(Ts...) { } @@ -64,4 +69,19 @@ void cassini(T, U) { } template void magellan(T&) { } -} +} // namespace Orbiters + +// We can't import these (and may never be able to in the case of "_Atomic"), +// but don't crash while trying. +namespace Unimportable { + +template struct Dependent {}; +template void takesDependent(Dependent d) {} + +void takesAtomic(_Atomic(int) a) {} + +struct HasImposibleMember { + void memberTakesAtomic(_Atomic(int) a) {} +}; + +} // namespace Unimportable diff --git a/test/Interop/Cxx/templates/Inputs/member-templates.h b/test/Interop/Cxx/templates/Inputs/member-templates.h new file mode 100644 index 0000000000000..acbea958a4606 --- /dev/null +++ b/test/Interop/Cxx/templates/Inputs/member-templates.h @@ -0,0 +1,31 @@ +struct HasMemberTemplates { + template T add(T a, T b) { return a + b; } + + template T addTwoTemplates(T a, U b) { return a + b; } + + template int addAll(int a, T b, U c) { return a + b + c; } + + template T passThrough(T val) { return val; } + + template T passThroughConst(const T val) { return val; } + + template T passThroughOnConst(T val) const { return val; } + + template T passThroughConstOnConst(const T val) const { + return val; + } + + template void doNothingConstRef(const T &val) {} + + template void make42Ref(T &val) {} +}; + +template struct TemplateClassWithMemberTemplates { + T value; + + template void setValue(U val) { value = val; } + + TemplateClassWithMemberTemplates(T val) : value(val) {} +}; + +using IntWrapper = TemplateClassWithMemberTemplates; diff --git a/test/Interop/Cxx/templates/Inputs/module.modulemap b/test/Interop/Cxx/templates/Inputs/module.modulemap index 38b80f8a37736..2c687d92dbeca 100644 --- a/test/Interop/Cxx/templates/Inputs/module.modulemap +++ b/test/Interop/Cxx/templates/Inputs/module.modulemap @@ -57,3 +57,11 @@ module ClassTemplateTemplateParameter { module ClassTemplateWithTypedef { header "class-template-with-typedef.h" } + +module ClassTemplateInNamespace { + header "class-template-in-namespace.h" +} + +module MemberTemplates { + header "member-templates.h" +} diff --git a/test/Interop/Cxx/templates/class-template-in-namespace-module-interface.swift b/test/Interop/Cxx/templates/class-template-in-namespace-module-interface.swift new file mode 100644 index 0000000000000..afefbdb3c2f41 --- /dev/null +++ b/test/Interop/Cxx/templates/class-template-in-namespace-module-interface.swift @@ -0,0 +1,8 @@ +// RUN: %target-swift-ide-test -print-module -module-to-print=ClassTemplateInNamespace -I %S/Inputs -source-filename=x -enable-cxx-interop | %FileCheck %s + +// CHECK: enum Space { +// CHECK: struct __CxxTemplateInstN5Space4ShipIJFvbEEEE { +// CHECK: init() +// CHECK: } +// CHECK: typealias Orbiter = Space.__CxxTemplateInstN5Space4ShipIJFvbEEEE +// CHECK: } diff --git a/test/Interop/Cxx/templates/function-template-module-interface.swift b/test/Interop/Cxx/templates/function-template-module-interface.swift index b8307a70ee237..351d92bc2a5c3 100644 --- a/test/Interop/Cxx/templates/function-template-module-interface.swift +++ b/test/Interop/Cxx/templates/function-template-module-interface.swift @@ -7,6 +7,13 @@ // CHECK: func returns_template(_ a: T, _ b: U) -> R // CHECK: func cannot_infer_template() +// CHECK: struct HasVariadicMemeber { +// CHECK: @available(*, unavailable, message: "Variadic function is unavailable") +// CHECK: mutating func test1(_ varargs: Any...) +// CHECK: @available(*, unavailable, message: "Variadic function is unavailable") +// CHECK: mutating func test2(_: Int32, _ varargs: Any...) +// CHECK: } + // CHECK: func lvalueReference(_ ref: UnsafeMutablePointer) // CHECK: func constLvalueReference(_: UnsafePointer) // CHECK: func forwardingReference(_: UnsafeMutablePointer) diff --git a/test/Interop/Cxx/templates/member-templates-module-interface.swift b/test/Interop/Cxx/templates/member-templates-module-interface.swift new file mode 100644 index 0000000000000..0045dfa3c196b --- /dev/null +++ b/test/Interop/Cxx/templates/member-templates-module-interface.swift @@ -0,0 +1,21 @@ +// RUN: %target-swift-ide-test -print-module -module-to-print=MemberTemplates -I %S/Inputs -source-filename=x -enable-cxx-interop | %FileCheck %s + +// CHECK: struct HasMemberTemplates { +// CHECK: mutating func add(_ a: T, _ b: T) -> T +// CHECK: mutating func addTwoTemplates(_ a: T, _ b: U) -> T +// CHECK: mutating func addAll(_ a: Int32, _ b: T, _ c: U) -> Int32 +// CHECK: mutating func passThrough(_ val: T) -> T +// CHECK: mutating func passThroughConst(_ val: T) -> T +// CHECK: mutating func passThroughOnConst(_ val: T) -> T +// CHECK: mutating func passThroughConstOnConst(_ val: T) -> T +// CHECK: mutating func doNothingConstRef(_ val: UnsafePointer) +// CHECK: mutating func make42Ref(_ val: UnsafeMutablePointer) +// CHECK: } + +// CHECK: struct __CxxTemplateInst32TemplateClassWithMemberTemplatesIiE { +// CHECK: var value: Int32 +// CHECK: init(_ val: Int32) +// CHECK: mutating func setValue(_ val: U) +// CHECK: } + +// CHECK: typealias IntWrapper = __CxxTemplateInst32TemplateClassWithMemberTemplatesIiE diff --git a/test/Interop/Cxx/templates/member-templates-silgen.swift b/test/Interop/Cxx/templates/member-templates-silgen.swift new file mode 100644 index 0000000000000..06d79cd851931 --- /dev/null +++ b/test/Interop/Cxx/templates/member-templates-silgen.swift @@ -0,0 +1,50 @@ +// RUN: %target-swift-emit-sil %s -I %S/Inputs -enable-cxx-interop | %FileCheck %s + +// We can't yet call member functions correctly on Windows (SR-13129). +// XFAIL: OS=windows-msvc +// REQUIRES: fixing-after-30630 + +import MemberTemplates + +// CHECK-LABEL: sil hidden @$s4main9basicTestyyF : $@convention(thin) () -> () + +// CHECK: [[ADD:%.*]] = function_ref @_ZN18HasMemberTemplates3addIiEET_S1_S1_ : $@convention(c) (Int32, Int32, @inout HasMemberTemplates) -> Int32 +// CHECK: apply [[ADD]]({{.*}}) : $@convention(c) (Int32, Int32, @inout HasMemberTemplates) -> Int32 + +// CHECK: [[ADD_TWO_TEMPLATES:%.*]] = function_ref @_ZN18HasMemberTemplates15addTwoTemplatesIiiEET_S1_T0_ : $@convention(c) (Int32, Int32, @inout HasMemberTemplates) -> Int32 // user: %26 +// CHECK: apply [[ADD_TWO_TEMPLATES]]({{.*}}) : $@convention(c) (Int32, Int32, @inout HasMemberTemplates) -> Int32 + +// CHECK: [[ADD_ALL:%.*]] = function_ref @_ZN18HasMemberTemplates6addAllIiiEEiiT_T0_ : $@convention(c) (Int32, Int32, Int32, @inout HasMemberTemplates) -> Int32 // user: %39 +// CHECK: apply [[ADD_ALL]]({{.*}}) : $@convention(c) (Int32, Int32, Int32, @inout HasMemberTemplates) -> Int32 + +// CHECK: [[DO_NOTHING:%.*]] = function_ref @_ZN18HasMemberTemplates17doNothingConstRefIiEEvRKT_ : $@convention(c) (UnsafePointer, @inout HasMemberTemplates) -> () // user: %48 +// CHECK: apply [[DO_NOTHING]]({{.*}}) : $@convention(c) (UnsafePointer, @inout HasMemberTemplates) -> () + +// CHECK-LABEL: end sil function '$s4main9basicTestyyF' +func basicTest() { + var i: Int32 = 0 + var obj = HasMemberTemplates() + obj.add(i, i) + obj.addTwoTemplates(i, i) + obj.addAll(i, i, i) + obj.doNothingConstRef(&i) +} + +// CHECK-LABEL: sil hidden_external [clang HasMemberTemplates._ZN18HasMemberTemplates3addIiEET_S1_S1_] @_ZN18HasMemberTemplates3addIiEET_S1_S1_ : $@convention(c) (Int32, Int32, @inout HasMemberTemplates) -> Int32 + +// CHECK-LABEL: sil hidden_external [clang HasMemberTemplates._ZN18HasMemberTemplates15addTwoTemplatesIiiEET_S1_T0_] @_ZN18HasMemberTemplates15addTwoTemplatesIiiEET_S1_T0_ : $@convention(c) (Int32, Int32, @inout HasMemberTemplates) -> Int32 + +// CHECK-LABEL: sil hidden_external [clang HasMemberTemplates._ZN18HasMemberTemplates6addAllIiiEEiiT_T0_] @_ZN18HasMemberTemplates6addAllIiiEEiiT_T0_ : $@convention(c) (Int32, Int32, Int32, @inout HasMemberTemplates) -> Int32 + +// CHECK-LABEL: sil hidden_external [clang HasMemberTemplates._ZN18HasMemberTemplates17doNothingConstRefIiEEvRKT_] @_ZN18HasMemberTemplates17doNothingConstRefIiEEvRKT_ : $@convention(c) (UnsafePointer, @inout HasMemberTemplates) -> () + +// CHECK-LABEL: sil hidden @$s4main12testSetValueyyF : $@convention(thin) () -> () + +// CHECK: [[SET_VALUE:%.*]] = function_ref @_ZN32TemplateClassWithMemberTemplatesIiE8setValueIlEEvT_ : $@convention(c) (Int, @inout __CxxTemplateInst32TemplateClassWithMemberTemplatesIiE) -> () +// CHECK: apply [[SET_VALUE]]({{.*}}) : $@convention(c) (Int, @inout __CxxTemplateInst32TemplateClassWithMemberTemplatesIiE) -> () + +// CHECK-LABEL: end sil function '$s4main12testSetValueyyF' +func testSetValue() { + var w = IntWrapper(11) + w.setValue(42) +} diff --git a/test/Interop/Cxx/templates/member-templates.swift b/test/Interop/Cxx/templates/member-templates.swift new file mode 100644 index 0000000000000..0a062f14535d4 --- /dev/null +++ b/test/Interop/Cxx/templates/member-templates.swift @@ -0,0 +1,26 @@ +// RUN: %target-run-simple-swift(-I %S/Inputs -Xfrontend -enable-cxx-interop) +// +// REQUIRES: executable_test +// +// We can't yet call member functions correctly on Windows (SR-13129). +// XFAIL: OS=windows-msvc +// REQUIRES: fixing-after-30630 + +import MemberTemplates +import StdlibUnittest + +var TemplatesTestSuite = TestSuite("Member Templates") + +TemplatesTestSuite.test("Set value - IntWrapper") { + var w = IntWrapper(11) + w.setValue(42) + expectEqual(w.value, 42) +} + +TemplatesTestSuite.test("Templated Add") { + var h = HasMemberTemplates() + expectEqual(h.add(2, 1), 3) + expectEqual(h.addTwoTemplates(2, 1), 3) +} + +runAllTests() diff --git a/test/Interpreter/actor_class_forbid_objc_assoc_objects.swift b/test/Interpreter/actor_class_forbid_objc_assoc_objects.swift new file mode 100644 index 0000000000000..62a7333237907 --- /dev/null +++ b/test/Interpreter/actor_class_forbid_objc_assoc_objects.swift @@ -0,0 +1,178 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swiftc_driver -Xfrontend -enable-experimental-concurrency %s -o %t/out +// RUN: %target-run %t/out + +// REQUIRES: concurrency +// REQUIRES: objc_interop +// REQUIRES: executable_test + +import ObjectiveC +import _Concurrency +import StdlibUnittest + +defer { runAllTests() } + +var Tests = TestSuite("Actor.AssocObject") + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +final actor class Actor { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("final class crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +actor class Actor2 { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("non-final class crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor2() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +class Actor3 : Actor2 {} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("non-final subclass crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor3() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +final class Actor3Final : Actor2 {} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("final subclass crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor3Final() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +class Actor4 : Actor2 { + var state: T + init(state: T) { self.state = state } +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("generic subclass crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor4(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +actor class Actor5 { + var state: T + init(state: T) { self.state = state } +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("base generic class crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor5(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } + + Tests.test("base generic class metatype crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor5.self + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +class Actor6 : Actor5 { + override init(state: T) { super.init(state: state) } +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("sub-generic class base generic class crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor6(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +final class Actor6Final : Actor5 { + override init(state: T) { super.init(state: state) } +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("final sub-generic class base generic class crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor6Final(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } + + Tests.test("final sub-generic class base generic class crash when set assoc object2") + .code { + let x = Actor6Final(state: 5) + print(type(of: x)) + } + + Tests.test("final sub-generic class metatype, base generic class crash when set assoc object") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = Actor6Final.self + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +actor class ActorNSObjectSubKlass : NSObject {} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("no crash when inherit from nsobject") + .code { + let x = ActorNSObjectSubKlass() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +actor class ActorNSObjectSubKlassGeneric : NSObject { + var state: T + init(state: T) { self.state = state } +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("no crash when generic inherit from nsobject") + .code { + let x = ActorNSObjectSubKlassGeneric(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} diff --git a/test/Interpreter/actor_subclass_metatypes.swift b/test/Interpreter/actor_subclass_metatypes.swift new file mode 100644 index 0000000000000..90cdabfdb291c --- /dev/null +++ b/test/Interpreter/actor_subclass_metatypes.swift @@ -0,0 +1,46 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swiftc_driver -Xfrontend -enable-experimental-concurrency %s -o %t/out +// RUN: %target-run %t/out + +// REQUIRES: concurrency +// REQUIRES: objc_interop +// REQUIRES: executable_test + +import ObjectiveC +import _Concurrency +import StdlibUnittest + +defer { runAllTests() } + +var Tests = TestSuite("Actor.SubClass.Metatype") + +actor class Actor5 { + var state: T + init(state: T) { self.state = state } +} + +Tests.test("base generic class") + .code { + let x = Actor5(state: 5) + print(type(of: x)) +} + +class Actor6 : Actor5 { + override init(state: T) { super.init(state: state) } +} + +Tests.test("non-final sub-generic class parent generic class crash") + .code { + let x = Actor6(state: 5) + print(type(of: x)) +} + +final class Actor6Final : Actor5 { + override init(state: T) { super.init(state: state) } +} + +Tests.test("final sub-generic class parent generic class crash") + .code { + let x = Actor6Final(state: 5) + print(type(of: x)) +} diff --git a/test/Interpreter/class_forbid_objc_assoc_objects.swift b/test/Interpreter/class_forbid_objc_assoc_objects.swift new file mode 100644 index 0000000000000..37e438a2cd66a --- /dev/null +++ b/test/Interpreter/class_forbid_objc_assoc_objects.swift @@ -0,0 +1,239 @@ +// RUN: %target-run-simple-swift +// RUN: %target-run-simple-swift(-O) + +// REQUIRES: objc_interop +// REQUIRES: executable_test + +import ObjectiveC +import StdlibUnittest + +defer { runAllTests() } + +var Tests = TestSuite("AssocObject") + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +final class AllowedToHaveAssocObject { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("no crash when set assoc object, assign") { + let x = AllowedToHaveAssocObject() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_ASSIGN) + } + + Tests.test("no crash when set assoc object, copy") { + let x = AllowedToHaveAssocObject() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_COPY) + } + + Tests.test("no crash when set assoc object, copy_nonatomic") { + let x = AllowedToHaveAssocObject() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_COPY_NONATOMIC) + } + + Tests.test("no crash when set assoc object, retain") { + let x = AllowedToHaveAssocObject() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } + + Tests.test("no crash when set assoc object, retain_nonatomic") { + let x = AllowedToHaveAssocObject() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN_NONATOMIC) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +@_semantics("objc.forbidAssociatedObjects") +final class UnableToHaveAssocObjects { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("crash when set assoc object, assign") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnableToHaveAssocObjects() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_ASSIGN) + } + + Tests.test("crash when set assoc object, copy") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnableToHaveAssocObjects() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_COPY) + } + + Tests.test("crash when set assoc object, copy_nonatomic") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnableToHaveAssocObjects() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_COPY_NONATOMIC) + } + + Tests.test("crash when set assoc object, retain") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnableToHaveAssocObjects() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } + + Tests.test("crash when set assoc object, retain_nonatomic") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnableToHaveAssocObjects() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN_NONATOMIC) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +@_semantics("objc.forbidAssociatedObjects") +final class UnableToHaveAssocObjectsGeneric { + var state: T + + init(state: T) { self.state = state } +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("crash when set assoc object (generic)") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnableToHaveAssocObjectsGeneric(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +// In this case, we mark the child. This is unsound since we will get different +// answers since the type checker isn't enforcing this. + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +class UnsoundAbleToHaveAssocObjectsParentClass { +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +@_semantics("objc.forbidAssociatedObjects") +final class UnsoundUnableToHaveAssocObjectsSubClass : UnsoundAbleToHaveAssocObjectsParentClass { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("no crash when set assoc object set only on child subclass, but assoc to parent") + .code { + let x = UnsoundAbleToHaveAssocObjectsParentClass() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } + + Tests.test("crash when set assoc object set only on child subclass") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnsoundUnableToHaveAssocObjectsSubClass() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +// In this case, we mark the parent. It seems like the bit is propagated... I am +// not sure. +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +@_semantics("objc.forbidAssociatedObjects") +class UnsoundAbleToHaveAssocObjectsParentClass2 { +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +final class UnsoundUnableToHaveAssocObjectsSubClass2 : UnsoundAbleToHaveAssocObjectsParentClass2 { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("crash when set assoc object set only on parent class") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnsoundUnableToHaveAssocObjectsSubClass2() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +class UnsoundUnableToHaveAssocObjectsSubClass3 : UnsoundAbleToHaveAssocObjectsParentClass2 { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("crash when set assoc object set only on parent class, child not final") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = UnsoundUnableToHaveAssocObjectsSubClass3() + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +// More Generic Tests + +// In this case, we mark the child. This is unsound since we will get different +// answers since the type checker isn't enforcing this. +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +class GenericAbleToHaveAssocObjectsParentClass { + public var state: T + init(state: T) { self.state = state } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +@_semantics("objc.forbidAssociatedObjects") +final class GenericUnableToHaveAssocObjectsSubClass : GenericAbleToHaveAssocObjectsParentClass { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("no crash when set assoc object set only on child subclass, but assoc to parent") + .code { + let x = GenericAbleToHaveAssocObjectsParentClass(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } + + Tests.test("crash when set assoc object set only on child subclass") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = GenericUnableToHaveAssocObjectsSubClass(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +// In this case, we mark the parent. It seems like the bit is propagated... I am +// not sure. +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +@_semantics("objc.forbidAssociatedObjects") +class GenericAbleToHaveAssocObjectsParentClass2 { + public var state: T + init(state: T) { self.state = state } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +final class GenericUnableToHaveAssocObjectsSubClass2 : GenericAbleToHaveAssocObjectsParentClass2 { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("crash when set assoc object set only on parent class") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = GenericUnableToHaveAssocObjectsSubClass2(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} + +@available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) +class GenericUnableToHaveAssocObjectsSubClass3 : GenericAbleToHaveAssocObjectsParentClass2 { +} + +if #available(macOS 10.4.4, iOS 12.2, watchOS 5.2, tvOS 12.2, *) { + Tests.test("crash when set assoc object set only on parent class, child not final") + .crashOutputMatches("objc_setAssociatedObject called on instance") + .code { + expectCrashLater() + let x = GenericUnableToHaveAssocObjectsSubClass3(state: 5) + objc_setAssociatedObject(x, "myKey", "myValue", .OBJC_ASSOCIATION_RETAIN) + } +} diff --git a/test/ModuleInterface/loading-remarks.swift b/test/ModuleInterface/loading-remarks.swift new file mode 100644 index 0000000000000..a389696f369b0 --- /dev/null +++ b/test/ModuleInterface/loading-remarks.swift @@ -0,0 +1,14 @@ +/// Test the -Rmodule-loading flag. +// RUN: %empty-directory(%t) + +/// Create a simple module and interface. +// RUN: echo 'public func publicFunction() {}' > %t/TestModule.swift +// RUN: %target-swift-frontend -typecheck %t/TestModule.swift -emit-module-interface-path %t/TestModule.swiftinterface -swift-version 5 + +/// Use -Rmodule-loading in a client and look for the diagnostics output. +// RUN: %target-swift-frontend -typecheck %s -I %t -Rmodule-loading 2>&1 | %FileCheck %s + +import TestModule +// CHECK: remark: loaded module at {{.*}}SwiftShims-{{.*}}.pcm +// CHECK: remark: loaded module at {{.*}}Swift.swiftmodule{{.*}}.swiftmodule +// CHECK: remark: loaded module at {{.*}}TestModule.swiftinterface diff --git a/test/ModuleInterface/result_builders.swift b/test/ModuleInterface/result_builders.swift index 031551c1497b9..20985284e0e80 100644 --- a/test/ModuleInterface/result_builders.swift +++ b/test/ModuleInterface/result_builders.swift @@ -63,4 +63,7 @@ public protocol ProtocolWithBuilderProperty { // CHECK: @ResultBuilders.TupleBuilder var myVar: Self.Assoc { get } @TupleBuilder var myVar: Assoc { get } + + // CHECK: @ResultBuilders.TupleBuilder func myFunc(_ t1: T1, _ t2: T2) -> (T1, T2) + @TupleBuilder func myFunc(_ t1: T1, _ t2: T2) -> (T1, T2) } diff --git a/test/Parse/ConditionalCompilation/armAndroidTarget.swift b/test/Parse/ConditionalCompilation/armAndroidTarget.swift index 8dcd3bbe2b7e7..5372b7416deed 100644 --- a/test/Parse/ConditionalCompilation/armAndroidTarget.swift +++ b/test/Parse/ConditionalCompilation/armAndroidTarget.swift @@ -1,5 +1,5 @@ -// RUN: %swift -typecheck %s -verify -target armv7-none-linux-androideabi -disable-objc-interop -parse-stdlib -// RUN: %swift-ide-test -test-input-complete -source-filename=%s -target armv7-none-linux-androideabi +// RUN: %swift -typecheck %s -verify -target armv7-unknown-linux-androideabi -disable-objc-interop -parse-stdlib +// RUN: %swift-ide-test -test-input-complete -source-filename=%s -target armv7-unknown-linux-androideabi #if os(Linux) // This block should not parse. diff --git a/test/Parse/dollar_identifier.swift b/test/Parse/dollar_identifier.swift index 3a39bf8975cbe..287ca04f951bc 100644 --- a/test/Parse/dollar_identifier.swift +++ b/test/Parse/dollar_identifier.swift @@ -62,14 +62,48 @@ func escapedDollarAnd() { `$abc` = 3 } +// Test that we disallow user-defined $-prefixed identifiers. However, the error +// should not be emitted on $-prefixed identifiers that are not considered +// declarations. + func $declareWithDollar() { // expected-error{{cannot declare entity named '$declareWithDollar'}} - var $foo = 17 // expected-error{{cannot declare entity named '$foo'}} - // expected-warning@-1 {{initialization of variable '$foo' was never used; consider replacing with assignment to '_' or removing it}} + var $foo: Int { // expected-error{{cannot declare entity named '$foo'}} + get { 0 } + set($value) {} // expected-error{{cannot declare entity named '$value'}} + } func $bar() { } // expected-error{{cannot declare entity named '$bar'}} func wibble( $a: Int, // expected-error{{cannot declare entity named '$a'}} $b c: Int) { } // expected-error{{cannot declare entity named '$b'}} -} + let _: (Int) -> Int = { + [$capture = 0] // expected-error{{cannot declare entity named '$capture'}} + $a in // expected-error{{cannot declare entity named '$a'}} + $capture + } + let ($a: _, _) = (0, 0) // expected-error{{cannot declare entity named '$a'}} + $label: if true { // expected-error{{cannot declare entity named '$label'}} + break $label + } + switch 0 { + @$dollar case _: // expected-error {{unknown attribute '$dollar'}} + break + } + if #available($Dummy 9999, *) {} // expected-warning {{unrecognized platform name '$Dummy'}} + @_swift_native_objc_runtime_base($Dollar) + class $Class {} // expected-error{{cannot declare entity named '$Class'; the '$' prefix is reserved}} + enum $Enum {} // expected-error{{cannot declare entity named '$Enum'; the '$' prefix is reserved}} + struct $Struct { // expected-error{{cannot declare entity named '$Struct'; the '$' prefix is reserved}} + @_projectedValueProperty($dummy) + let property: Never + } +} +protocol $Protocol {} // expected-error {{cannot declare entity named '$Protocol'; the '$' prefix is reserved}} +precedencegroup $Precedence { // expected-error {{cannot declare entity named '$Precedence'; the '$' prefix is reserved}} + higherThan: $Precedence // expected-error {{cycle in 'higherThan' relation}} +} +infix operator **: $Precedence +#$UnknownDirective() // expected-error {{use of unknown directive '#$UnknownDirective'}} + // SR-13232 @propertyWrapper diff --git a/test/Parse/try.swift b/test/Parse/try.swift index 4fb7223f9d5df..83187df89622b 100644 --- a/test/Parse/try.swift +++ b/test/Parse/try.swift @@ -182,7 +182,8 @@ let _: Int = try? foo() // expected-error {{value of optional type 'Int?' not un class X {} func test(_: X) {} func producesObject() throws -> AnyObject { return X() } -test(try producesObject()) // expected-error {{'AnyObject' is not convertible to 'X'; did you mean to use 'as!' to force downcast?}} {{26-26= as! X}} +test(try producesObject()) // expected-error {{'AnyObject' is not convertible to 'X'}} +// expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{26-26= as! X}} _ = "a\(try maybeThrow())b" _ = try "a\(maybeThrow())b" diff --git a/test/Parse/try_swift5.swift b/test/Parse/try_swift5.swift index ec057527ed988..c7b2180b3eb7d 100644 --- a/test/Parse/try_swift5.swift +++ b/test/Parse/try_swift5.swift @@ -183,7 +183,8 @@ let _: Int = try? foo() // expected-error {{value of optional type 'Int?' not un class X {} func test(_: X) {} func producesObject() throws -> AnyObject { return X() } -test(try producesObject()) // expected-error {{'AnyObject' is not convertible to 'X'; did you mean to use 'as!' to force downcast?}} {{26-26= as! X}} +test(try producesObject()) // expected-error {{'AnyObject' is not convertible to 'X'}} +// expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{26-26= as! X}} _ = "a\(try maybeThrow())b" _ = try "a\(maybeThrow())b" @@ -263,7 +264,7 @@ let _: Int??? = try? producer.produceDoubleOptionalInt() // good let _: String = try? producer.produceDoubleOptionalInt() // expected-error {{cannot convert value of type 'Int??' to specified type 'String'}} // rdar://problem/46742002 -protocol Dummy : class {} +protocol Dummy : AnyObject {} class F { func wait() throws -> T { fatalError() } diff --git a/test/PlaygroundTransform/implicit_return_never.swift b/test/PlaygroundTransform/implicit_return_never.swift index dca3f752bd9ee..c94577632789c 100644 --- a/test/PlaygroundTransform/implicit_return_never.swift +++ b/test/PlaygroundTransform/implicit_return_never.swift @@ -13,7 +13,7 @@ func f() -> Int { fatalError() -// CRASH-CHECK: {{[fF]}}atal error: file {{.*}}/main.swift, line [[@LINE-1]] +// CRASH-CHECK: {{.*}}/main.swift:[[@LINE-1]]: Fatal error } f() diff --git a/test/PlaygroundTransform/placeholder.swift b/test/PlaygroundTransform/placeholder.swift index 734237ad1ef02..291a562e96673 100644 --- a/test/PlaygroundTransform/placeholder.swift +++ b/test/PlaygroundTransform/placeholder.swift @@ -10,12 +10,11 @@ // status doesn't reflect whether its child process crashed or not. So "not // --crash %target-run ..." always fails when testing for the iOS Simulator. // not.py also works on win32, where ! does not. -// Checking for ".[Ff]atal" because of d03a575279c. func f(crash crash: Bool) -> Int { if crash { return <#T#> - // CRASH-CHECK: {{[fF]}}atal error: attempt to evaluate editor placeholder: file {{.*}}/main.swift, line [[@LINE-1]] + // CRASH-CHECK: {{.*}}/main.swift:[[@LINE-1]]: Fatal error: attempt to evaluate editor placeholder } else { return 42 } diff --git a/test/SIL/Parser/async.sil b/test/SIL/Parser/async.sil index 6516b47029eea..d8f3db691d2fd 100644 --- a/test/SIL/Parser/async.sil +++ b/test/SIL/Parser/async.sil @@ -37,15 +37,15 @@ bb0(%fn : $@async () -> ()): sil @async_continuation : $@async () -> () { // CHECK-NEXT: bb entry: - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation $Builtin.Int32 - %c = get_async_continuation $Builtin.Int32 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation Builtin.Int32 + %c = get_async_continuation Builtin.Int32 // CHECK-NEXT: // function_ref // CHECK-NEXT: function_ref %f = function_ref @not_async_test : $@convention(thin) () -> () // CHECK-NEXT: apply apply %f() : $@convention(thin) () -> () - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeContinuation, resume [[RESUME:bb[0-9]+]] - await_async_continuation %c : $UnsafeContinuation, resume bb1 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[RESUME]]([[RVALUE:%.*]] : $Builtin.Int32): bb1(%r : $Builtin.Int32): @@ -56,15 +56,15 @@ bb1(%r : $Builtin.Int32): sil @async_continuation_throws : $@async () -> () { // CHECK-NEXT: bb entry: - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation [throws] $Builtin.Int32 - %c = get_async_continuation [throws] $Builtin.Int32 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation [throws] Builtin.Int32 + %c = get_async_continuation [throws] Builtin.Int32 // CHECK-NEXT: // function_ref // CHECK-NEXT: function_ref %f = function_ref @not_async_test : $@convention(thin) () -> () // CHECK-NEXT: apply apply %f() : $@convention(thin) () -> () - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeThrowingContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] - await_async_continuation %c : $UnsafeThrowingContinuation, resume bb1, error bb2 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1, error bb2 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[RESUME]]([[RVALUE:%.*]] : $Builtin.Int32): bb1(%r : $Builtin.Int32): @@ -86,15 +86,15 @@ sil @async_continuation_addr : $@async () -> () { entry: // CHECK: [[SLOT:%.*]] = alloc_stack %a = alloc_stack $Builtin.Int32 - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr $Builtin.Int32, [[SLOT]] : $*Builtin.Int32 - %c = get_async_continuation_addr $Builtin.Int32, %a : $*Builtin.Int32 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr Builtin.Int32, [[SLOT]] : $*Builtin.Int32 + %c = get_async_continuation_addr Builtin.Int32, %a : $*Builtin.Int32 // CHECK-NEXT: // function_ref // CHECK-NEXT: function_ref %f = function_ref @not_async_test : $@convention(thin) () -> () // CHECK-NEXT: apply apply %f() : $@convention(thin) () -> () - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeContinuation, resume [[RESUME:bb[0-9]+]] - await_async_continuation %c : $UnsafeContinuation, resume bb1 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[RESUME]]: bb1: @@ -108,15 +108,15 @@ sil @async_continuation_throws_addr : $@async () -> () { entry: // CHECK: [[SLOT:%.*]] = alloc_stack %a = alloc_stack $Builtin.Int32 - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr [throws] $Builtin.Int32, [[SLOT]] : $*Builtin.Int32 - %c = get_async_continuation_addr [throws] $Builtin.Int32, %a : $*Builtin.Int32 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr [throws] Builtin.Int32, [[SLOT]] : $*Builtin.Int32 + %c = get_async_continuation_addr [throws] Builtin.Int32, %a : $*Builtin.Int32 // CHECK-NEXT: // function_ref // CHECK-NEXT: function_ref %f = function_ref @not_async_test : $@convention(thin) () -> () // CHECK-NEXT: apply apply %f() : $@convention(thin) () -> () - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeThrowingContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] - await_async_continuation %c : $UnsafeThrowingContinuation, resume bb1, error bb2 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1, error bb2 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[RESUME]]: bb1: diff --git a/test/SIL/Serialization/async.sil b/test/SIL/Serialization/async.sil index e512f43645612..49d6bc0c11aed 100644 --- a/test/SIL/Serialization/async.sil +++ b/test/SIL/Serialization/async.sil @@ -21,10 +21,10 @@ entry: sil [serialized] @async_continuation : $@async () -> () { // CHECK-NEXT: bb entry: - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation $Builtin.Int32 - %c = get_async_continuation $Builtin.Int32 - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeContinuation, resume [[RESUME:bb[0-9]+]] - await_async_continuation %c : $UnsafeContinuation, resume bb1 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation Builtin.Int32 + %c = get_async_continuation Builtin.Int32 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[RESUME]]([[RVALUE:%.*]] : $Builtin.Int32): bb1(%r : $Builtin.Int32): @@ -35,10 +35,10 @@ bb1(%r : $Builtin.Int32): sil [serialized] @async_continuation_throws : $@async () -> () { // CHECK-NEXT: bb entry: - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation [throws] $Builtin.Int32 - %c = get_async_continuation [throws] $Builtin.Int32 - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeThrowingContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] - await_async_continuation %c : $UnsafeThrowingContinuation, resume bb1, error bb2 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation [throws] Builtin.Int32 + %c = get_async_continuation [throws] Builtin.Int32 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1, error bb2 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[ERROR]]([[EVALUE:%.*]] : $Error): bb2(%e : $Error): @@ -60,10 +60,10 @@ sil [serialized] @async_continuation_addr : $@async () -> () { entry: // CHECK: [[SLOT:%.*]] = alloc_stack %a = alloc_stack $Builtin.Int32 - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr $Builtin.Int32, [[SLOT]] : $*Builtin.Int32 - %c = get_async_continuation_addr $Builtin.Int32, %a : $*Builtin.Int32 - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeContinuation, resume [[RESUME:bb[0-9]+]] - await_async_continuation %c : $UnsafeContinuation, resume bb1 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr Builtin.Int32, [[SLOT]] : $*Builtin.Int32 + %c = get_async_continuation_addr Builtin.Int32, %a : $*Builtin.Int32 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[RESUME]]: bb1: @@ -77,10 +77,10 @@ sil [serialized] @async_continuation_throws_addr : $@async () -> () { entry: // CHECK: [[SLOT:%.*]] = alloc_stack %a = alloc_stack $Builtin.Int32 - // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr [throws] $Builtin.Int32, [[SLOT]] : $*Builtin.Int32 - %c = get_async_continuation_addr [throws] $Builtin.Int32, %a : $*Builtin.Int32 - // CHECK-NEXT: await_async_continuation [[CONT]] : $UnsafeThrowingContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] - await_async_continuation %c : $UnsafeThrowingContinuation, resume bb1, error bb2 + // CHECK-NEXT: [[CONT:%.*]] = get_async_continuation_addr [throws] Builtin.Int32, [[SLOT]] : $*Builtin.Int32 + %c = get_async_continuation_addr [throws] Builtin.Int32, %a : $*Builtin.Int32 + // CHECK-NEXT: await_async_continuation [[CONT]] : $Builtin.RawUnsafeContinuation, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] + await_async_continuation %c : $Builtin.RawUnsafeContinuation, resume bb1, error bb2 // CHECK-NEXT: {{^ *$}} // CHECK-NEXT: [[ERROR]]([[EVALUE:%.*]] : $Error): bb2(%e : $Error): diff --git a/test/SILGen/async_builtins.swift b/test/SILGen/async_builtins.swift index 0df74f6ea3faf..be4b674031301 100644 --- a/test/SILGen/async_builtins.swift +++ b/test/SILGen/async_builtins.swift @@ -38,3 +38,83 @@ public struct X { _ = Builtin.createAsyncTaskFuture(0, nil, closure) } } + +// CHECK-LABEL: sil [ossa] @$s4test26usesWithUnsafeContinuationyyYF : $@convention(thin) @async () -> () { +public func usesWithUnsafeContinuation() async { + // trivial resume type + let _: Int = await Builtin.withUnsafeContinuation { c in } + + // CHECK: [[FN:%.*]] = function_ref @$s4test26usesWithUnsafeContinuationyyYFyBcXEfU_ : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[TMP:%.*]] = convert_function [[FN]] : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () to $@convention(thin) @noescape (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[CLOSURE:%.*]] = thin_to_thick_function [[TMP]] + // CHECK: [[BOX:%.*]] = alloc_stack $Int + // CHECK: [[CC:%.*]] = get_async_continuation_addr Int, [[BOX]] : $*Int + // CHECK: apply [[CLOSURE]]([[CC]]) : $@noescape @callee_guaranteed (Builtin.RawUnsafeContinuation) -> () + // CHECK: await_async_continuation [[CC]] : $Builtin.RawUnsafeContinuation, resume bb1 + + // CHECK: bb1: + // CHECK: [[RESULT:%.*]] = load [trivial] [[BOX]] : $*Int + // CHECK: dealloc_stack [[BOX]] + + // loadable resume type + let _: String = await Builtin.withUnsafeContinuation { c in } + + // CHECK: [[FN:%.*]] = function_ref @$s4test26usesWithUnsafeContinuationyyYFyBcXEfU0_ : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[TMP:%.*]] = convert_function [[FN]] : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () to $@convention(thin) @noescape (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[CLOSURE:%.*]] = thin_to_thick_function [[TMP]] + // CHECK: [[BOX:%.*]] = alloc_stack $String + // CHECK: [[CC:%.*]] = get_async_continuation_addr String, [[BOX]] : $*String + // CHECK: apply [[CLOSURE]]([[CC]]) : $@noescape @callee_guaranteed (Builtin.RawUnsafeContinuation) -> () + // CHECK: await_async_continuation [[CC]] : $Builtin.RawUnsafeContinuation, resume bb2 + + // CHECK: bb2: + // CHECK: [[RESULT:%.*]] = load [take] [[BOX]] : $*String + // CHECK: destroy_value [[RESULT]] + // CHECK: dealloc_stack [[BOX]] + + // address-only resume type + let _: Any = await Builtin.withUnsafeContinuation { c in } + + // CHECK: [[FN:%.*]] = function_ref @$s4test26usesWithUnsafeContinuationyyYFyBcXEfU1_ : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[TMP:%.*]] = convert_function [[FN]] : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () to $@convention(thin) @noescape (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[CLOSURE:%.*]] = thin_to_thick_function [[TMP]] + // CHECK: [[BOX:%.*]] = alloc_stack $Any + // CHECK: [[CC:%.*]] = get_async_continuation_addr Any, [[BOX]] : $*Any + // CHECK: apply [[CLOSURE]]([[CC]]) : $@noescape @callee_guaranteed (Builtin.RawUnsafeContinuation) -> () + // CHECK: await_async_continuation [[CC]] : $Builtin.RawUnsafeContinuation, resume bb3 + + // CHECK: bb3: + // CHECK: [[COPY:%.*]] = alloc_stack $Any + // CHECK: copy_addr [take] [[BOX]] to [initialization] [[COPY]] + // CHECK: destroy_addr [[COPY]] + // CHECK: dealloc_stack [[COPY]] + // CHECK: dealloc_stack [[BOX]] +} + +// CHECK-LABEL: sil [ossa] @$s4test34usesWithUnsafeThrowingContinuationyyYKF : $@convention(thin) @async () -> @error Error { +public func usesWithUnsafeThrowingContinuation() async throws { + let _: Int = await try Builtin.withUnsafeThrowingContinuation { c in } + + // CHECK: [[FN:%.*]] = function_ref @$s4test34usesWithUnsafeThrowingContinuationyyYKFyBcXEfU_ : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[TMP:%.*]] = convert_function [[FN]] : $@convention(thin) (Builtin.RawUnsafeContinuation) -> () to $@convention(thin) @noescape (Builtin.RawUnsafeContinuation) -> () + // CHECK: [[CLOSURE:%.*]] = thin_to_thick_function [[TMP]] + // CHECK: [[BOX:%.*]] = alloc_stack $Int + // CHECK: [[CC:%.*]] = get_async_continuation_addr [throws] Int, [[BOX]] : $*Int + // CHECK: apply [[CLOSURE]]([[CC]]) : $@noescape @callee_guaranteed (Builtin.RawUnsafeContinuation) -> () + // CHECK: await_async_continuation [[CC]] : $Builtin.RawUnsafeContinuation, resume bb1, error bb2 + + // CHECK: bb1: + // CHECK: [[RESULT:%.*]] = load [trivial] [[BOX]] : $*Int + // CHECK: dealloc_stack [[BOX]] + + // CHECK: bb2([[ERROR:%.*]] : @owned $Error): + // CHECK: builtin "willThrow"([[ERROR]] : $Error) : $() + // CHECK: dealloc_stack [[BOX]] + // CHECK: throw [[ERROR]] +} + +// Make sure we do the right thing when the closure value is non-trivial, +// because it has captures and was formed by a partial_apply. +public func usesWithUnsafeContinuationCaptures(fn: (Builtin.RawUnsafeContinuation) -> ()) async throws { + let _: Int = await Builtin.withUnsafeContinuation { c in fn(c) } +} diff --git a/test/SILGen/async_conversion.swift b/test/SILGen/async_conversion.swift new file mode 100644 index 0000000000000..ecd98fdf44dfb --- /dev/null +++ b/test/SILGen/async_conversion.swift @@ -0,0 +1,23 @@ +// RUN: %target-swift-frontend -emit-silgen %s -module-name test -swift-version 5 -enable-experimental-concurrency | %FileCheck %s +// REQUIRES: concurrency + +func f(_: Int, _: String) -> String? { nil } + +// CHECK-LABEL: sil hidden [ossa] @$s4testAAyyF : $@convention(thin) () -> () { +func test() { + // CHECK: [[F:%.*]] = function_ref @$s4test1fySSSgSi_SStF : $@convention(thin) (Int, @guaranteed String) -> @owned Optional + // CHECK: [[THICK_F:%.*]] = thin_to_thick_function [[F]] : $@convention(thin) (Int, @guaranteed String) -> @owned Optional to $@callee_guaranteed (Int, @guaranteed String) -> @owned Optional + // CHECK: [[THUNK:%.*]] = function_ref @$sSiS2SSgIegygo_SiSSAAIegHygo_TR : $@convention(thin) @async (Int, @guaranteed String, @guaranteed @callee_guaranteed (Int, @guaranteed String) -> @owned Optional) -> @owned Optional + // CHECK: partial_apply [callee_guaranteed] [[THUNK]]([[THICK_F]]) : $@convention(thin) @async (Int, @guaranteed String, @guaranteed @callee_guaranteed (Int, @guaranteed String) -> @owned Optional) -> @owned Optional + let _: (Int, String) async -> String? = f +} + +protocol P { + func f(_: Int, _: String) async -> String? +} + +struct X: P { + // CHECK-LABEL: sil private [transparent] [thunk] [ossa] @$s4test1XVAA1PA2aDP1fySSSgSi_SStYFTW : $@convention(witness_method: P) @async (Int, @guaranteed String, @in_guaranteed X) -> @owned Optional + // CHECK: function_ref @$s4test1XV1fySSSgSi_SStF : $@convention(method) (Int, @guaranteed String, X) -> @owned Optional + func f(_: Int, _: String) -> String? { nil } +} diff --git a/test/SILGen/async_handler.swift b/test/SILGen/async_handler.swift new file mode 100644 index 0000000000000..136782e0ceb22 --- /dev/null +++ b/test/SILGen/async_handler.swift @@ -0,0 +1,59 @@ +// RUN: %target-swift-frontend -emit-silgen %s -module-name test -swift-version 5 -enable-experimental-concurrency | %FileCheck %s +// REQUIRES: concurrency + +func take(_ t: T) async { + print(t) +} + +// CHECK-LABEL: sil [ossa] @$s4test13simpleHandleryySiF : $@convention(thin) (Int) -> () { +// CHECK: [[BODYFN:%[0-9]+]] = function_ref @$s4test13simpleHandleryySiYF : $@convention(thin) @async (Int) -> () +// CHECK: [[FN:%[0-9]+]] = partial_apply [callee_guaranteed] [[BODYFN]](%0) : $@convention(thin) @async (Int) -> () +// CHECK: [[INTRINSIC:%[0-9]+]] = function_ref @$s12_Concurrency16_runAsyncHandler9operationyyyYc_tF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +// CHECK: {{.*}} = apply [[INTRINSIC]]([[FN]]) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +// CHECK: destroy_value [[FN]] : $@async @callee_guaranteed () -> () +// CHECK: } // end sil function '$s4test13simpleHandleryySiF' +@asyncHandler +public func simpleHandler(_ i: Int) { + await take(i) +} + +// CHECK-LABEL: sil [ossa] @$s4test20nonTrivialArgHandleryySSF : $@convention(thin) (@guaranteed String) -> () { +// CHECK: [[COPY:%[0-9]+]] = copy_value %0 : $String +// CHECK: [[BODYFN:%[0-9]+]] = function_ref @$s4test20nonTrivialArgHandleryySSYF : $@convention(thin) @async (@guaranteed String) -> () +// CHECK: [[FN:%[0-9]+]] = partial_apply [callee_guaranteed] [[BODYFN]]([[COPY]]) : $@convention(thin) @async (@guaranteed String) -> () +// CHECK: [[INTRINSIC:%[0-9]+]] = function_ref @$s12_Concurrency16_runAsyncHandler9operationyyyYc_tF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +// CHECK: {{.*}} = apply [[INTRINSIC]]([[FN]]) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +// CHECK: destroy_value [[FN]] : $@async @callee_guaranteed () -> () +// CHECK: } // end sil function '$s4test20nonTrivialArgHandleryySSF' +@asyncHandler +public func nonTrivialArgHandler(_ s: String) { + await take(s) +} + +// CHECK-LABEL: sil [ossa] @$s4test14genericHandleryyxlF : $@convention(thin) (@in_guaranteed T) -> () { +// CHECK: [[TMP:%[0-9]+]] = alloc_stack $T +// CHECK: copy_addr %0 to [initialization] [[TMP]] : $*T +// CHECK: [[BODYFN:%[0-9]+]] = function_ref @$s4test14genericHandleryyxYlF : $@convention(thin) @async <τ_0_0> (@in_guaranteed τ_0_0) -> () +// CHECK: [[FN:%[0-9]+]] = partial_apply [callee_guaranteed] [[BODYFN]]([[TMP]]) : $@convention(thin) @async <τ_0_0> (@in_guaranteed τ_0_0) -> () +// CHECK: [[INTRINSIC:%[0-9]+]] = function_ref @$s12_Concurrency16_runAsyncHandler9operationyyyYc_tF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +// CHECK: {{.*}} = apply [[INTRINSIC]]([[FN]]) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +// CHECK: destroy_value [[FN]] : $@async @callee_guaranteed () -> () +// CHECK: } // end sil function '$s4test14genericHandleryyxlF' +@asyncHandler +public func genericHandler(_ t: T) { + await take(t) +} + +public struct Mystruct { + // CHECK-LABEL: sil [ossa] @$s4test8MystructV13memberHandleryySiF : $@convention(method) (Int, Mystruct) -> () { + // CHECK: [[BODYFN:%[0-9]+]] = function_ref @$s4test8MystructV13memberHandleryySiYF : $@convention(method) @async (Int, Mystruct) -> () + // CHECK: [[FN:%[0-9]+]] = partial_apply [callee_guaranteed] [[BODYFN]](%0, %1) : $@convention(method) @async (Int, Mystruct) -> () + // CHECK: [[INTRINSIC:%[0-9]+]] = function_ref @$s12_Concurrency16_runAsyncHandler9operationyyyYc_tF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + // CHECK: {{.*}} = apply [[INTRINSIC]]([[FN]]) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + // CHECK: destroy_value [[FN]] : $@async @callee_guaranteed () -> () + // CHECK: } // end sil function '$s4test8MystructV13memberHandleryySiF' + @asyncHandler + public func memberHandler(_ i: Int) { + await take(i) + } +} diff --git a/test/SILGen/async_let.swift b/test/SILGen/async_let.swift new file mode 100644 index 0000000000000..8008119c75967 --- /dev/null +++ b/test/SILGen/async_let.swift @@ -0,0 +1,82 @@ +// RUN: %target-swift-frontend -emit-silgen %s -module-name test -swift-version 5 -enable-experimental-concurrency -parse-stdlib -sil-verify-all | %FileCheck %s +// REQUIRES: concurrency + +import Swift +import _Concurrency + +func getInt() async -> Int { 0 } +func getString() async -> String { "" } +func getStringThrowingly() async throws -> String { "" } +func getIntAndString() async -> (Int, String) { (5, "hello") } + +enum SomeError: Error { + case boom +} + +// CHECK-LABEL: sil hidden [ossa] @$s4test0A11AsyncLetIntSiyYF : $@convention(thin) @async () -> Int +func testAsyncLetInt() async -> Int { + // CHECK: [[I:%.*]] = mark_uninitialized [var] %0 + // CHECK: [[CLOSURE:%.*]] = function_ref @$s4test0A11AsyncLetIntSiyYFSiyYcfu_ : $@convention(thin) @async () -> Int + // CHECK: [[THICK_CLOSURE:%.*]] = thin_to_thick_function [[CLOSURE]] : $@convention(thin) @async () -> Int to $@async @callee_guaranteed () -> Int + // CHECK: [[REABSTRACT_THUNK:%.*]] = function_ref @$sSiIegHd_Sis5Error_pIegHrzo_TR : $@convention(thin) @async (@guaranteed @async @callee_guaranteed () -> Int) -> (@out Int, @error Error) + // CHECK: [[REABSTRACT_CLOSURE:%.*]] = partial_apply [callee_guaranteed] [[REABSTRACT_THUNK]]([[THICK_CLOSURE]]) : $@convention(thin) @async (@guaranteed @async @callee_guaranteed () -> Int) -> (@out Int, @error Error) + // CHECK: [[CLOSURE_ARG:%.*]] = convert_function [[REABSTRACT_CLOSURE]] : $@async @callee_guaranteed () -> (@out Int, @error Error) to $@async @callee_guaranteed @substituted <τ_0_0> () -> (@out τ_0_0, @error Error) for + // CHECK: [[RUN_CHILD_TASK:%.*]] = function_ref @$s12_Concurrency13_runChildTask9operationBoxyYKc_tYlF : $@convention(thin) @async <τ_0_0> (@guaranteed @async @callee_guaranteed @substituted <τ_0_0> () -> (@out τ_0_0, @error Error) for <τ_0_0>) -> @owned Builtin.NativeObject + // CHECK: [[CHILD_TASK:%.*]] = apply [[RUN_CHILD_TASK]]([[CLOSURE_ARG]]) : $@convention(thin) @async <τ_0_0> (@guaranteed @async @callee_guaranteed @substituted <τ_0_0> () -> (@out τ_0_0, @error Error) for <τ_0_0>) -> @owned Builtin.NativeObject + async let i = await getInt() + + // CHECK: [[FUTURE_GET:%.*]] = function_ref @$s12_Concurrency14_taskFutureGetyxBoYlF : $@convention(thin) @async <τ_0_0> (@guaranteed Builtin.NativeObject) -> @out τ_0_0 + // CHECK: [[INT_RESULT:%.*]] = alloc_stack $Int + // CHECK: apply [[FUTURE_GET]]([[INT_RESULT]], [[CHILD_TASK]]) : $@convention(thin) @async <τ_0_0> (@guaranteed Builtin.NativeObject) -> @out τ_0_0 + // CHECK: [[INT_RESULT_VALUE:%.*]] = load [trivial] [[INT_RESULT]] : $*Int + // CHECK: assign [[INT_RESULT_VALUE]] to [[I]] : $*Int + return await i + + // CHECK: [[BORROW_CHILD_TASK:%.*]] = begin_borrow [[CHILD_TASK]] : $Builtin.NativeObject + // CHECK-NEXT: builtin "cancelAsyncTask"([[BORROW_CHILD_TASK]] : $Builtin.NativeObject) : $() + // CHECK-NEXT: end_borrow [[BORROW_CHILD_TASK]] : $Builtin.NativeObject + + // CHECK: destroy_value [[CHILD_TASK]] : $Builtin.NativeObject +} + +func testAsyncLetWithThrows(cond: Bool) async throws -> String { + async let i = await getInt() + async let s = await getString() + + if cond { + throw SomeError.boom + } + + return await s +} + +// CHECK-LABEL: sil hidden [ossa] @$s4test0A14AsyncLetThrowsSSyYKF : $@convention(thin) @async () -> (@owned String, @error Error) { +func testAsyncLetThrows() async throws -> String { + async let s = await try getStringThrowingly() + + // CHECK: [[RUN_CHILD_TASK:%.*]] = function_ref @$s12_Concurrency22_taskFutureGetThrowingyxBoYKlF : $@convention(thin) @async <τ_0_0> (@guaranteed Builtin.NativeObject) -> (@out τ_0_0, @error Error) + // CHECK: try_apply [[RUN_CHILD_TASK]] + return await try s +} + +// CHECK-LABEL: sil hidden [ossa] @$s4test0A14DecomposeAwait4condSiSb_tYF : $@convention(thin) @async (Bool) -> Int { +func testDecomposeAwait(cond: Bool) async -> Int { + // CHECK: [[I_VAR:%.*]] = alloc_stack $Int, let, name "i" + // CHECK: [[I:%.*]] = mark_uninitialized [var] [[I_VAR]] : $*Int + // CHECK: [[S_VAR:%.*]] = alloc_stack $String, let, name "s" + // CHECK: [[S:%.*]] = mark_uninitialized [var] [[S_VAR]] : $*String + async let (i, s) = await getIntAndString() + + if cond { + // CHECK: [[FUTURE_GET:%.*]] = function_ref @$s12_Concurrency14_taskFutureGetyxBoYlF : $@convention(thin) @async <τ_0_0> (@guaranteed Builtin.NativeObject) -> @out τ_0_0 + // CHECK: [[TUPLE_RESULT:%.*]] = alloc_stack $(Int, String) + // CHECK: apply [[FUTURE_GET]]<(Int, String)>([[TUPLE_RESULT]], {{%.*}}) : $@convention(thin) @async <τ_0_0> (@guaranteed Builtin.NativeObject) -> @out τ_0_0 + // CHECK: [[TUPLE_RESULT_VAL:%.*]] = load [take] [[TUPLE_RESULT]] : $*(Int, String) + // CHECK: ([[FIRST_VAL:%.*]], [[SECOND_VAL:%.*]]) = destructure_tuple [[TUPLE_RESULT_VAL]] : $(Int, String) + // CHECK: assign [[FIRST_VAL]] to [[I]] : $*Int + // CHECK: assign [[SECOND_VAL]] to [[S]] : $*String + return await Int(s)! + } + + return await i +} diff --git a/test/SILGen/hop_to_executor.swift b/test/SILGen/hop_to_executor.swift index 8b9427481c188..e30478159bebe 100644 --- a/test/SILGen/hop_to_executor.swift +++ b/test/SILGen/hop_to_executor.swift @@ -1,4 +1,4 @@ -// RUN: %target-swift-frontend -emit-silgen %s -module-name test -swift-version 5 -enable-experimental-concurrency | %FileCheck %s +// RUN: %target-swift-frontend -emit-silgen %s -module-name test -swift-version 5 -enable-experimental-concurrency | %FileCheck --enable-var-scope %s // REQUIRES: concurrency @@ -34,7 +34,7 @@ actor class MyActor { // CHECK: } // end sil function '$s4test7MyActorC0A13AsyncFunctionyyYKF' func testAsyncFunction() async throws { await callee(p) - try await throwingCallee(p) + await try throwingCallee(p) } // CHECK-LABEL: sil hidden [ossa] @$s4test7MyActorC0A22ConsumingAsyncFunctionyyYF : $@convention(method) @async (@owned MyActor) -> () { @@ -118,3 +118,120 @@ struct GenericGlobalActorWithGetter { func testGenericGlobalActorWithGetter() async { } + +actor class RedActorImpl { + // CHECK-LABEL: sil hidden [ossa] @$s4test12RedActorImplC5helloyySiF : $@convention(method) (Int, @guaranteed RedActorImpl) -> () { + // CHECK-NOT: hop_to_executor + // CHECK: } // end sil function '$s4test12RedActorImplC5helloyySiF' + func hello(_ x : Int) {} +} + +actor class BlueActorImpl { +// CHECK-LABEL: sil hidden [ossa] @$s4test13BlueActorImplC4poke6personyAA03RedcD0C_tYF : $@convention(method) @async (@guaranteed RedActorImpl, @guaranteed BlueActorImpl) -> () { +// CHECK: bb0([[RED:%[0-9]+]] : @guaranteed $RedActorImpl, [[BLUE:%[0-9]+]] : @guaranteed $BlueActorImpl): +// CHECK: hop_to_executor [[BLUE]] : $BlueActorImpl +// CHECK-NOT: hop_to_executor +// CHECK: [[INTARG:%[0-9]+]] = apply {{%[0-9]+}}({{%[0-9]+}}, {{%[0-9]+}}) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int +// CHECK-NOT: hop_to_executor +// CHECK: [[METH:%[0-9]+]] = class_method [[RED]] : $RedActorImpl, #RedActorImpl.hello : (RedActorImpl) -> (Int) -> (), $@convention(method) (Int, @guaranteed RedActorImpl) -> () +// CHECK: hop_to_executor [[RED]] : $RedActorImpl +// CHECK-NEXT: {{%[0-9]+}} = apply [[METH]]([[INTARG]], [[RED]]) : $@convention(method) (Int, @guaranteed RedActorImpl) -> () +// CHECK-NEXT: hop_to_executor [[BLUE]] : $BlueActorImpl +// CHECK-NOT: hop_to_executor +// CHECK: } // end sil function '$s4test13BlueActorImplC4poke6personyAA03RedcD0C_tYF' + func poke(person red : RedActorImpl) async { + await red.hello(42) + } + +// CHECK-LABEL: sil hidden [ossa] @$s4test13BlueActorImplC14createAndGreetyyYF : $@convention(method) @async (@guaranteed BlueActorImpl) -> () { +// CHECK: bb0([[BLUE:%[0-9]+]] : @guaranteed $BlueActorImpl): +// CHECK: hop_to_executor [[BLUE]] : $BlueActorImpl +// CHECK: [[RED:%[0-9]+]] = apply {{%[0-9]+}}({{%[0-9]+}}) : $@convention(method) (@thick RedActorImpl.Type) -> @owned RedActorImpl +// CHECK: [[REDBORROW:%[0-9]+]] = begin_borrow [[RED]] : $RedActorImpl +// CHECK: [[INTARG:%[0-9]+]] = apply {{%[0-9]+}}({{%[0-9]+}}, {{%[0-9]+}}) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int +// CHECK: [[METH:%[0-9]+]] = class_method [[REDBORROW]] : $RedActorImpl, #RedActorImpl.hello : (RedActorImpl) -> (Int) -> (), $@convention(method) (Int, @guaranteed RedActorImpl) -> () +// CHECK: hop_to_executor [[REDBORROW]] : $RedActorImpl +// CHECK-NEXT: = apply [[METH]]([[INTARG]], [[REDBORROW]]) : $@convention(method) (Int, @guaranteed RedActorImpl) -> () +// CHECK-NEXT: hop_to_executor [[BLUE]] : $BlueActorImpl +// CHECK: end_borrow [[REDBORROW]] : $RedActorImpl +// CHECK: destroy_value [[RED]] : $RedActorImpl +// CHECK: } // end sil function '$s4test13BlueActorImplC14createAndGreetyyYF' + func createAndGreet() async { + let red = RedActorImpl() // <- key difference from `poke` is local construction of the actor + await red.hello(42) + } +} + +@globalActor +struct RedActor { + static var shared: RedActorImpl { RedActorImpl() } +} + +@globalActor +struct BlueActor { + static var shared: BlueActorImpl { BlueActorImpl() } +} + +// CHECK-LABEL: sil hidden [ossa] @$s4test5redFnyySiF : $@convention(thin) (Int) -> () { +// CHECK-NOT: hop_to_executor +// CHECK: } // end sil function '$s4test5redFnyySiF' +@RedActor func redFn(_ x : Int) {} + +// CHECK-LABEL: sil hidden [ossa] @$s4test6blueFnyyYF : $@convention(thin) @async () -> () { + // ---- switch to blue actor, since we're an async function ---- +// CHECK: [[MT:%[0-9]+]] = metatype $@thin BlueActor.Type +// CHECK: [[F:%[0-9]+]] = function_ref @$s4test9BlueActorV6sharedAA0bC4ImplCvgZ : $@convention(method) (@thin BlueActor.Type) -> @owned BlueActorImpl +// CHECK: [[B:%[0-9]+]] = apply [[F]]([[MT]]) : $@convention(method) (@thin BlueActor.Type) -> @owned BlueActorImpl +// CHECK: [[BLUEEXE:%[0-9]+]] = begin_borrow [[B]] : $BlueActorImpl +// CHECK: hop_to_executor [[BLUEEXE]] : $BlueActorImpl + // ---- evaluate the argument to redFn ---- +// CHECK: [[LIT:%[0-9]+]] = integer_literal $Builtin.IntLiteral, 100 +// CHECK: [[INTMT:%[0-9]+]] = metatype $@thin Int.Type +// CHECK: [[CTOR:%[0-9]+]] = function_ref @$sSi22_builtinIntegerLiteralSiBI_tcfC : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int +// CHECK: [[ARG:%[0-9]+]] = apply [[CTOR]]([[LIT]], [[INTMT]]) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int + // ---- prepare to invoke redFn ---- +// CHECK: [[CALLEE:%[0-9]+]] = function_ref @$s4test5redFnyySiF : $@convention(thin) (Int) -> () + // ---- obtain and hop to RedActor's executor ---- +// CHECK: [[REDMT:%[0-9]+]] = metatype $@thin RedActor.Type +// CHECK: [[GETTER:%[0-9]+]] = function_ref @$s4test8RedActorV6sharedAA0bC4ImplCvgZ : $@convention(method) (@thin RedActor.Type) -> @owned RedActorImpl +// CHECK: [[R:%[0-9]+]] = apply [[GETTER]]([[REDMT]]) : $@convention(method) (@thin RedActor.Type) -> @owned RedActorImpl +// CHECK: [[REDEXE:%[0-9]+]] = begin_borrow [[R]] : $RedActorImpl +// CHECK: hop_to_executor [[REDEXE]] : $RedActorImpl + // ---- now invoke redFn, hop back to Blue, and clean-up ---- +// CHECK-NEXT: {{%[0-9]+}} = apply [[CALLEE]]([[ARG]]) : $@convention(thin) (Int) -> () +// CHECK-NEXT: hop_to_executor [[BLUEEXE]] : $BlueActorImpl +// CHECK: end_borrow [[REDEXE]] : $RedActorImpl +// CHECK: destroy_value [[R]] : $RedActorImpl +// CHECK: end_borrow [[BLUEEXE]] : $BlueActorImpl +// CHECK: destroy_value [[B]] : $BlueActorImpl +// CHECK-NOT: hop_to_executor +// CHECK: } // end sil function '$s4test6blueFnyyYF' +@BlueActor func blueFn() async { + await redFn(100) +} + +// CHECK-LABEL: sil hidden [ossa] @$s4test20unspecifiedAsyncFuncyyYF : $@convention(thin) @async () -> () { +// CHECK-NOT: hop_to_executor +// CHECK: [[BORROW:%[0-9]+]] = begin_borrow {{%[0-9]+}} : $RedActorImpl +// CHECK-NEXT: hop_to_executor [[BORROW]] : $RedActorImpl +// CHECK-NEXT: {{%[0-9]+}} = apply {{%[0-9]+}}({{%[0-9]+}}) : $@convention(thin) (Int) -> () +// CHECK-NEXT: end_borrow [[BORROW]] : $RedActorImpl +// CHECK-NOT: hop_to_executor +// CHECK: } // end sil function '$s4test20unspecifiedAsyncFuncyyYF' +func unspecifiedAsyncFunc() async { + await redFn(200) +} + +// CHECK-LABEL: sil hidden [ossa] @$s4test27anotherUnspecifiedAsyncFuncyyAA12RedActorImplCYF : $@convention(thin) @async (@guaranteed RedActorImpl) -> () { +// CHECK: bb0([[RED:%[0-9]+]] : @guaranteed $RedActorImpl): +// CHECK-NOT: hop_to_executor +// CHECK: [[INTARG:%[0-9]+]] = apply {{%[0-9]+}}({{%[0-9]+}}, {{%[0-9]+}}) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int +// CHECK-NOT: hop_to_executor +// CHECK: [[METH:%[0-9]+]] = class_method [[RED]] : $RedActorImpl, #RedActorImpl.hello : (RedActorImpl) -> (Int) -> (), $@convention(method) (Int, @guaranteed RedActorImpl) -> () +// CHECK-NEXT: hop_to_executor [[RED]] : $RedActorImpl +// CHECK-NEXT: {{%[0-9]+}} = apply [[METH]]([[INTARG]], [[RED]]) : $@convention(method) (Int, @guaranteed RedActorImpl) -> () +// CHECK-NOT: hop_to_executor +// CHECK: } // end sil function '$s4test27anotherUnspecifiedAsyncFuncyyAA12RedActorImplCYF' +func anotherUnspecifiedAsyncFunc(_ red : RedActorImpl) async { + await red.hello(12); +} \ No newline at end of file diff --git a/test/SILGen/objc_async.swift b/test/SILGen/objc_async.swift index c783ff6fd3fa3..f8c5c5615d194 100644 --- a/test/SILGen/objc_async.swift +++ b/test/SILGen/objc_async.swift @@ -8,10 +8,11 @@ import ObjCConcurrency func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $Int // CHECK: [[METHOD:%.*]] = objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) (Int) -> (), SlowServer) -> () - // CHECK: [[CONT:%.*]] = get_async_continuation_addr $Int, [[RESUME_BUF]] + // CHECK: [[CONT:%.*]] = get_async_continuation_addr Int, [[RESUME_BUF]] + // CHECK: [[WRAPPED:%.*]] = struct $UnsafeContinuation ([[CONT]] : $Builtin.RawUnsafeContinuation) // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeContinuation // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] - // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] + // CHECK: store [[WRAPPED]] to [trivial] [[CONT_SLOT]] // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[INT_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation, Int) -> () // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] // CHECK: apply [[METHOD]]({{.*}}, [[BLOCK]], %0) @@ -23,10 +24,11 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $String // CHECK: [[METHOD:%.*]] = objc_method {{.*}} $@convention(objc_method) (@convention(block) (Optional, Optional) -> (), SlowServer) -> () - // CHECK: [[CONT:%.*]] = get_async_continuation_addr [throws] $String, [[RESUME_BUF]] + // CHECK: [[CONT:%.*]] = get_async_continuation_addr [throws] String, [[RESUME_BUF]] + // CHECK: [[WRAPPED:%.*]] = struct $UnsafeThrowingContinuation ([[CONT]] : $Builtin.RawUnsafeContinuation) // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeThrowingContinuation // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] - // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] + // CHECK: store [[WRAPPED]] to [trivial] [[CONT_SLOT]] // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[STRING_COMPLETION_THROW_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation, Optional, Optional) -> () // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] // CHECK: apply [[METHOD]]([[BLOCK]], %0) @@ -35,16 +37,16 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[RESULT:%.*]] = load [take] [[RESUME_BUF]] // CHECK: destroy_value [[RESULT]] // CHECK: dealloc_stack [[RESUME_BUF]] - let _: String = try await slowServer.findAnswer() + let _: String = await try slowServer.findAnswer() // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) () -> (), SlowServer) -> () // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[VOID_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation<()>) -> () await slowServer.serverRestart("somewhere") // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[NSSTRING_INT_THROW_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation<(String, Int)>, Optional, Int, Optional) -> () - let (_, _): (String, Int) = try await slowServer.findMultipleAnswers() + let (_, _): (String, Int) = await try slowServer.findMultipleAnswers() - let (_, _): (Bool, Bool) = try await slowServer.findDifferentlyFlavoredBooleans() + let (_, _): (Bool, Bool) = await try slowServer.findDifferentlyFlavoredBooleans() // CHECK: [[ERROR]]([[ERROR_VALUE:%.*]] : @owned $Error): // CHECK: dealloc_stack [[RESUME_BUF]] diff --git a/test/SILGen/objc_async_from_swift.swift b/test/SILGen/objc_async_from_swift.swift index c842b463dfc2a..029af70858f67 100644 --- a/test/SILGen/objc_async_from_swift.swift +++ b/test/SILGen/objc_async_from_swift.swift @@ -7,6 +7,7 @@ import ObjCConcurrency @objc protocol SlowServing { func requestInt() async -> Int func requestString() async -> String + func tryRequestString() async throws -> String func requestIntAndString() async -> (Int, String) func tryRequestIntAndString() async throws -> (Int, String) } @@ -17,20 +18,40 @@ func testSlowServing(p: SlowServing) async throws { let _: Int = await p.requestInt() // CHECK: objc_method {{.*}} $@convention(objc_method) <τ_0_0 where τ_0_0 : SlowServing> (@convention(block) (NSString) -> (), τ_0_0) -> () let _: String = await p.requestString() + // CHECK: objc_method {{.*}} $@convention(objc_method) <τ_0_0 where τ_0_0 : SlowServing> (@convention(block) (Optional, Optional) -> (), τ_0_0) -> () + let _: String = await try p.tryRequestString() // CHECK: objc_method {{.*}} $@convention(objc_method) <τ_0_0 where τ_0_0 : SlowServing> (@convention(block) (Int, NSString) -> (), τ_0_0) -> () let _: (Int, String) = await p.requestIntAndString() // CHECK: objc_method {{.*}} $@convention(objc_method) <τ_0_0 where τ_0_0 : SlowServing> (@convention(block) (Int, Optional, Optional) -> (), τ_0_0) -> () - let _: (Int, String) = try await p.tryRequestIntAndString() + let _: (Int, String) = await try p.tryRequestIntAndString() } -/* class SlowSwiftServer: NSObject, SlowServing { + // CHECK-LABEL: sil {{.*}} @${{.*}}10requestInt{{.*}}To : + // CHECK: [[BLOCK_COPY:%.*]] = copy_value %0 + // CHECK: [[SELF:%.*]] = copy_value %1 + // CHECK: [[CLOSURE_REF:%.*]] = function_ref [[CLOSURE_IMP:@\$.*10requestInt.*U_To]] : + // CHECK: [[CLOSURE:%.*]] = partial_apply [callee_guaranteed] [[CLOSURE_REF]]([[BLOCK_COPY]], [[SELF]]) + // CHECK: [[RUN_TASK:%.*]] = function_ref @${{.*}}29_runTaskForBridgedAsyncMethod + // CHECK: apply [[RUN_TASK]]([[CLOSURE]]) + // CHECK: sil {{.*}} [[CLOSURE_IMP]] + // CHECK: [[NATIVE_RESULT:%.*]] = apply{{.*}}@async + // CHECK: apply %0([[NATIVE_RESULT]]) func requestInt() async -> Int { return 0 } func requestString() async -> String { return "" } + // CHECK-LABEL: sil {{.*}} @${{.*}}16tryRequestString{{.*}}U_To : + // CHECK: try_apply{{.*}}@async{{.*}}, normal [[NORMAL:bb[0-9]+]], error [[ERROR:bb[0-9]+]] + // CHECK: [[NORMAL]]([[NATIVE_RESULT:%.*]] : @owned $String): + // CHECK: [[NIL_ERROR:%.*]] = enum $Optional, #Optional.none + // CHECK: apply %0({{%.*}}, [[NIL_ERROR]]) + // CHECK: [[ERROR]]([[NATIVE_RESULT:%.*]] : @owned $Error): + // CHECK: [[NIL_NSSTRING:%.*]] = enum $Optional, #Optional.none + // CHECK: apply %0([[NIL_NSSTRING]], {{%.*}}) + func tryRequestString() async throws -> String { return "" } func requestIntAndString() async -> (Int, String) { return (0, "") } func tryRequestIntAndString() async throws -> (Int, String) { return (0, "") } } -*/ + protocol NativelySlowServing { func doSomethingSlow(_: String) async -> Int diff --git a/test/SILGen/specialize_attr.swift b/test/SILGen/specialize_attr.swift index dfc55cd4894ac..f38933c3e335c 100644 --- a/test/SILGen/specialize_attr.swift +++ b/test/SILGen/specialize_attr.swift @@ -1,23 +1,23 @@ // Test .swiftmodule with library-evolution // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -module-name A -emit-module-path %t/A.swiftmodule -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module.swift -// RUN: %target-swift-frontend -I %t -module-name B -emit-module-path %t/B.swiftmodule -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module2.swift -// RUN: %target-swift-emit-silgen -I %t -module-name specialize_attr -emit-verbose-sil %s -swift-version 5 | %FileCheck %s -// RUN: %target-swift-emit-sil -I %t -sil-verify-all -O -module-name specialize_attr -emit-verbose-sil %s | %FileCheck -check-prefix=CHECK-OPT -check-prefix=CHECK-OPT-EVO %s +// RUN: %target-swift-frontend -enable-experimental-prespecialization -module-name A -emit-module-path %t/A.swiftmodule -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -module-name B -emit-module-path %t/B.swiftmodule -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module2.swift +// RUN: %target-swift-emit-silgen -enable-experimental-prespecialization -I %t -module-name specialize_attr -emit-verbose-sil %s -swift-version 5 | %FileCheck %s +// RUN: %target-swift-emit-sil -enable-experimental-prespecialization -I %t -sil-verify-all -O -module-name specialize_attr -emit-verbose-sil %s | %FileCheck -check-prefix=CHECK-OPT -check-prefix=CHECK-OPT-EVO %s // Test .swiftinterface // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -emit-module -o /dev/null -module-name A -emit-module-interface-path %t/A.swiftinterface -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module.swift -// RUN: %target-swift-frontend -emit-module -o /dev/null -I %t -module-name B -emit-module-interface-path %t/B.swiftinterface -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module2.swift -// RUN: %target-swift-emit-silgen -I %t -module-name specialize_attr -emit-verbose-sil %s -swift-version 5 | %FileCheck %s -// RUN: %target-swift-emit-sil -I %t -sil-verify-all -O -module-name specialize_attr -emit-verbose-sil %s | %FileCheck -check-prefix=CHECK-OPT -check-prefix=CHECK-OPT-EVO %s +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module -o /dev/null -module-name A -emit-module-interface-path %t/A.swiftinterface -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module -o /dev/null -I %t -module-name B -emit-module-interface-path %t/B.swiftinterface -enable-library-evolution -swift-version 5 %S/Inputs/specialize_attr_module2.swift +// RUN: %target-swift-emit-silgen -enable-experimental-prespecialization -I %t -module-name specialize_attr -emit-verbose-sil %s -swift-version 5 | %FileCheck %s +// RUN: %target-swift-emit-sil -enable-experimental-prespecialization -I %t -sil-verify-all -O -module-name specialize_attr -emit-verbose-sil %s | %FileCheck -check-prefix=CHECK-OPT -check-prefix=CHECK-OPT-EVO %s // Test .swiftmodule without library-evolution // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -module-name A -emit-module-path %t/A.swiftmodule -swift-version 5 %S/Inputs/specialize_attr_module.swift -// RUN: %target-swift-frontend -I %t -module-name B -emit-module-path %t/B.swiftmodule -swift-version 5 %S/Inputs/specialize_attr_module2.swift -// RUN: %target-swift-emit-silgen -I %t -module-name specialize_attr -emit-verbose-sil %s -swift-version 5 | %FileCheck %s -// RUN: %target-swift-emit-sil -I %t -sil-verify-all -O -module-name specialize_attr -emit-verbose-sil %s | %FileCheck -check-prefix=CHECK-OPT -check-prefix=CHECK-OPT-NOEVO %s +// RUN: %target-swift-frontend -enable-experimental-prespecialization -module-name A -emit-module-path %t/A.swiftmodule -swift-version 5 %S/Inputs/specialize_attr_module.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -module-name B -emit-module-path %t/B.swiftmodule -swift-version 5 %S/Inputs/specialize_attr_module2.swift +// RUN: %target-swift-emit-silgen -enable-experimental-prespecialization -I %t -module-name specialize_attr -emit-verbose-sil %s -swift-version 5 | %FileCheck %s +// RUN: %target-swift-emit-sil -enable-experimental-prespecialization -I %t -sil-verify-all -O -module-name specialize_attr -emit-verbose-sil %s | %FileCheck -check-prefix=CHECK-OPT -check-prefix=CHECK-OPT-NOEVO %s import A import B diff --git a/test/SILGen/synthesized_conformance_actor.swift b/test/SILGen/synthesized_conformance_actor.swift index 7abfd2c0fcd8f..c9eac2757037f 100644 --- a/test/SILGen/synthesized_conformance_actor.swift +++ b/test/SILGen/synthesized_conformance_actor.swift @@ -24,24 +24,12 @@ func buildIt() { } // A1.enqueue(partialTask:) -// CHECK-LABEL: sil [ossa] @$s29synthesized_conformance_actor2A1C7enqueue11partialTasky12_Concurrency012PartialAsyncG0V_tF : $@convention(method) (@in_guaranteed PartialAsyncTask, @guaranteed A1) -> () { -// CHECK: bb0([[PARTIAL_TASK:%.*]] : $*PartialAsyncTask, [[SELF:%.*]] : @guaranteed $A1): +// CHECK-LABEL: sil [ossa] @$s29synthesized_conformance_actor2A1C7enqueue11partialTasky12_Concurrency012PartialAsyncG0V_tF : $@convention(method) (PartialAsyncTask, @guaranteed A1) -> () { +// CHECK: bb0([[PARTIAL_TASK:%.*]] : $PartialAsyncTask, [[SELF:%.*]] : @guaranteed $A1): // CHECK: [[SELF_COPY:%.*]] = copy_value [[SELF]] : $A1 // CHECK-NEXT: [[SELF_ANY_OBJECT:%.*]] = init_existential_ref [[SELF_COPY]] : $A1 : $A1, $AnyObject -// CHECK-NEXT: [[PROPERTY_REF:%.*]] = ref_element_addr [[SELF]] : $A1, #A1.$__actor_storage -// FIXME: Need to eliminate this exclusivity check. -// CHECK-NEXT: [[DYNAMIC_ACCESS:%.*]] = begin_access [modify] [dynamic] [[PROPERTY_REF]] : $*_NativeActorQueue -// CHECK: [[ENQUEUE_FN:%.*]] = function_ref @$s12_Concurrency36_defaultActorQueueEnqueuePartialTask5actor5queue07partialG0yyXl_AA07_NativecD0VzAA0f5AsyncG0VtF : $@convention(thin) (@guaranteed AnyObject, @inout _NativeActorQueue, @in_guaranteed PartialAsyncTask) -> () -// CHECK-NEXT: apply [[ENQUEUE_FN]]([[SELF_ANY_OBJECT]], [[DYNAMIC_ACCESS]], [[PARTIAL_TASK]]) : $@convention(thin) (@guaranteed AnyObject, @inout _NativeActorQueue, @in_guaranteed PartialAsyncTask) -> () -// CHECK-NEXT: end_access [[DYNAMIC_ACCESS]] : $*_NativeActorQueue - -// variable initialization expression of A1.$__actor_storage -// CHECK-LABEL: sil [transparent] [ossa] @$s29synthesized_conformance_actor2A1C03$__C8_storage33{{.*}}12_Concurrency17_NativeActorQueueVvpfi : $@convention(thin) () -> @out _NativeActorQueue { -// CHECK: bb0([[PROPERTY:%.*]] : $*_NativeActorQueue): -// CHECK-NEXT: [[META:%.*]] = metatype $@thick A1.Type -// CHECK-NEXT: [[ERASED_META:%.*]] = init_existential_metatype [[META]] : $@thick A1.Type, $@thick AnyObject.Type -// CHECK: [[INIT_FN:%.*]] = function_ref @$s12_Concurrency24_defaultActorQueueCreateyAA07_NativecD0VyXlXpF : $@convention(thin) (@thick AnyObject.Type) -> @out _NativeActorQueue -// CHECK-NEXT: = apply [[INIT_FN]]([[PROPERTY]], [[ERASED_META]]) : $@convention(thin) (@thick AnyObject.Type) -> @out _NativeActorQueue +// CHECK: [[ENQUEUE_FN:%.*]] = function_ref @swift_defaultActor_enqueue : $@convention(thin) (PartialAsyncTask, @guaranteed AnyObject) -> () +// CHECK-NEXT: apply [[ENQUEUE_FN]]([[PARTIAL_TASK]], [[SELF_ANY_OBJECT]]) : $@convention(thin) (PartialAsyncTask, @guaranteed AnyObject) -> () // Ensure that enqueue(partialTask:) is the first slot in the vtable. // CHECK-LABEL: sil_vtable [serialized] A1 { diff --git a/test/SILGen/unsafevalue.swift b/test/SILGen/unsafevalue.swift index 1062b728810b6..b6e5f081ddbd2 100644 --- a/test/SILGen/unsafevalue.swift +++ b/test/SILGen/unsafevalue.swift @@ -45,7 +45,9 @@ public struct UnsafeValue { // CANONICAL-LABEL: sil [transparent] [serialized] @$s11unsafevalue11UnsafeValueV14unsafelyAssignACyxGxh_tcfC : $@convention(method) (@guaranteed Element, @thin UnsafeValue.Type) -> UnsafeValue { // CANONICAL: bb0([[INPUT_ELEMENT:%.*]] : $Element, // CANONICAL-NEXT: debug_value + // CANONICAL-NEXT: strong_retain [[INPUT_ELEMENT]] // CANONICAL-NEXT: [[UNMANAGED_ELEMENT:%.*]] = ref_to_unmanaged [[INPUT_ELEMENT]] + // CANONICAL-NEXT: strong_release [[INPUT_ELEMENT]] // CANONICAL-NEXT: [[RESULT:%.*]] = struct $UnsafeValue ([[UNMANAGED_ELEMENT]] : $@sil_unmanaged Element) // CANONICAL-NEXT: return [[RESULT]] // CANONICAL: } // end sil function '$s11unsafevalue11UnsafeValueV14unsafelyAssignACyxGxh_tcfC' diff --git a/test/SILOptimizer/Inputs/specialization_and_resilience_module.swift b/test/SILOptimizer/Inputs/specialization_and_resilience_module.swift new file mode 100644 index 0000000000000..0added27c0646 --- /dev/null +++ b/test/SILOptimizer/Inputs/specialization_and_resilience_module.swift @@ -0,0 +1,23 @@ +public struct Mystruct { + var x: Int + + public init(_ x: Int) { self.x = x } +} + +@inline(never) +@inlinable +public func testParam(_ t: T) { + print(t) +} + +@inline(never) +@inlinable +public func testReturn(_ a: [T]) -> T { + return a[0] +} + +public func otherFunc() { + testParam(Mystruct(27)) + print(testReturn([Mystruct(28)])) +} + diff --git a/test/SILOptimizer/allocbox_to_stack_ownership.sil b/test/SILOptimizer/allocbox_to_stack_ownership.sil index c8112ffd51d1e..c32afdda6cac2 100644 --- a/test/SILOptimizer/allocbox_to_stack_ownership.sil +++ b/test/SILOptimizer/allocbox_to_stack_ownership.sil @@ -680,6 +680,11 @@ bb0(%0 : $Int, %1 : @owned $<τ_0_0 where τ_0_0 : Count> { var S<τ_0_0> } ) %6 = function_ref @get : $@convention(method) <τ_0_0 where τ_0_0 : Count> (@in S<τ_0_0>) -> Int %7 = apply %6(%4) : $@convention(method) <τ_0_0 where τ_0_0 : Count> (@in S<τ_0_0>) -> Int %8 = apply %3(%0, %7) : $@convention(thin) (Int, Int) -> Bool + %9 = copy_value %1 : $<τ_0_0 where τ_0_0 : Count> { var S<τ_0_0> } + %10 = project_box %9 : $<τ_0_0 where τ_0_0 : Count> { var S<τ_0_0> } , 0 + copy_addr %10 to [initialization] %4 : $*S + destroy_value %9 : $<τ_0_0 where τ_0_0 : Count> { var S<τ_0_0> } + destroy_addr %4 : $*S dealloc_stack %4 : $*S destroy_value %1 : $<τ_0_0 where τ_0_0 : Count> { var S<τ_0_0> } // CHECK: return diff --git a/test/SILOptimizer/capturepromotion-wrong-lexicalscope.swift b/test/SILOptimizer/capturepromotion-wrong-lexicalscope.swift index af237eac4c4bd..9cdeef807279d 100644 --- a/test/SILOptimizer/capturepromotion-wrong-lexicalscope.swift +++ b/test/SILOptimizer/capturepromotion-wrong-lexicalscope.swift @@ -19,12 +19,12 @@ // CHECK: destroy_value %7 : ${ var Int }, loc {{.*}}:33:11, scope 3 // CHECK: %13 = partial_apply [callee_guaranteed] %10(%11) : $@convention(thin) (Int) -> Int, loc {{.*}}:33:11, scope 3 // CHECK: debug_value %13 : $@callee_guaranteed () -> Int, let, name "f", loc {{.*}}:33:7, scope 3 -// CHECK: %15 = begin_borrow %13 : $@callee_guaranteed () -> Int, loc {{.*}}:34:10, scope 3 -// CHECK: %16 = copy_value %15 : $@callee_guaranteed () -> Int, loc {{.*}}:34:10, scope 3 -// CHECK: end_borrow %15 : $@callee_guaranteed () -> Int +// There used to be a begin_borrow here. We leave an emptyline here to preserve line numbers. +// CHECK: %15 = copy_value %13 : $@callee_guaranteed () -> Int, loc {{.*}}:34:10, scope 3 +// There used to be an end_borrow here. We leave an emptyline here to preserve line numbers. // CHECK: destroy_value %13 : $@callee_guaranteed () -> Int, loc {{.*}}:35:1, scope 3 // CHECK: destroy_value %0 : ${ var Int }, loc {{.*}}:35:1, scope 3 -// CHECK: return %16 : $@callee_guaranteed () -> Int, loc {{.*}}:34:3, scope 3 +// CHECK: return %15 : $@callee_guaranteed () -> Int, loc {{.*}}:34:3, scope 3 // CHECK: } diff --git a/test/SILOptimizer/constantprop-wrongscope.swift b/test/SILOptimizer/constantprop-wrongscope.swift index 92961fc96625e..12f9d62ba7709 100644 --- a/test/SILOptimizer/constantprop-wrongscope.swift +++ b/test/SILOptimizer/constantprop-wrongscope.swift @@ -13,7 +13,7 @@ // instructions surrounding it. // CHECK: destroy_addr %7 : $*Any, loc {{.*}}:22:19, scope 2 -// CHECK: dealloc_stack %13 : $*Optional, loc {{.*}}:22:23, scope 2 +// CHECK: dealloc_stack %12 : $*Optional, loc {{.*}}:22:23, scope 2 // CHECK: dealloc_stack %7 : $*Any, loc {{.*}}:22:23, scope 2 // CHECK: dealloc_stack %6 : $*A, loc {{.*}}:22:7, scope 2 diff --git a/test/SILOptimizer/definite_init_failable_initializers.swift b/test/SILOptimizer/definite_init_failable_initializers.swift index 2b451bb83313e..6aa36614b9a76 100644 --- a/test/SILOptimizer/definite_init_failable_initializers.swift +++ b/test/SILOptimizer/definite_init_failable_initializers.swift @@ -86,10 +86,10 @@ struct FailableStruct { return nil } -// CHECK-LABEL: sil hidden @$s35definite_init_failable_initializers14FailableStructV46failAfterWholeObjectInitializationByAssignmentACSgyt_tcfC +// CHECK-LABEL: sil hidden @$s35definite_init_failable_initializers14FailableStructV46failAfterWholeObjectInitializationByAssignmentACSgyt_tcfC : // CHECK: bb0 // CHECK: [[SELF_BOX:%.*]] = alloc_stack $FailableStruct -// CHECK: [[CANARY]] = apply +// CHECK: [[CANARY:%.*]] = apply // CHECK-NEXT: [[WRITE:%.*]] = begin_access [modify] [static] [[SELF_BOX]] : $*FailableStruct // CHECK-NEXT: store [[CANARY]] to [[WRITE]] // CHECK-NEXT: end_access [[WRITE]] : $*FailableStruct @@ -566,12 +566,11 @@ struct ThrowStruct { self = ThrowStruct(noFail: ()) } -// CHECK-LABEL: sil hidden @$s35definite_init_failable_initializers11ThrowStructV25failDuringSelfReplacementACSi_tKcfC +// CHECK-LABEL: sil hidden @$s35definite_init_failable_initializers11ThrowStructV25failDuringSelfReplacementACSi_tKcfC : // CHECK: bb0(%0 : $Int, %1 : $@thin ThrowStruct.Type): // CHECK-NEXT: [[SELF_BOX:%.*]] = alloc_stack $ThrowStruct -// CHECK: [[SELF_TYPE:%.*]] = metatype $@thin ThrowStruct.Type // CHECK: [[INIT_FN:%.*]] = function_ref @$s35definite_init_failable_initializers11ThrowStructV4failACyt_tKcfC -// CHECK-NEXT: try_apply [[INIT_FN]]([[SELF_TYPE]]) +// CHECK-NEXT: try_apply [[INIT_FN]](%1) // CHECK: bb1([[NEW_SELF:%.*]] : $ThrowStruct): // CHECK-NEXT: [[WRITE:%.*]] = begin_access [modify] [static] [[SELF_BOX]] : $*ThrowStruct // CHECK-NEXT: retain_value [[NEW_SELF]] @@ -590,9 +589,8 @@ struct ThrowStruct { // CHECK-LABEL: sil hidden @$s35definite_init_failable_initializers11ThrowStructV24failAfterSelfReplacementACSi_tKcfC // CHECK: bb0(%0 : $Int, %1 : $@thin ThrowStruct.Type): // CHECK-NEXT: [[SELF_BOX:%.*]] = alloc_stack $ThrowStruct -// CHECK: [[SELF_TYPE:%.*]] = metatype $@thin ThrowStruct.Type // CHECK: [[INIT_FN:%.*]] = function_ref @$s35definite_init_failable_initializers11ThrowStructV6noFailACyt_tcfC -// CHECK-NEXT: [[NEW_SELF:%.*]] = apply [[INIT_FN]]([[SELF_TYPE]]) +// CHECK-NEXT: [[NEW_SELF:%.*]] = apply [[INIT_FN]](%1) // CHECK-NEXT: [[WRITE:%.*]] = begin_access [modify] [static] [[SELF_BOX]] : $*ThrowStruct // CHECK-NEXT: retain_value [[NEW_SELF]] // CHECK-NEXT: store [[NEW_SELF]] to [[WRITE]] diff --git a/test/SILOptimizer/definite_init_value_types.swift b/test/SILOptimizer/definite_init_value_types.swift index dc5538d43930f..52fb36b567d28 100644 --- a/test/SILOptimizer/definite_init_value_types.swift +++ b/test/SILOptimizer/definite_init_value_types.swift @@ -35,7 +35,6 @@ enum ValueEnum { // CHECK: [[BOOL:%.*]] = struct_extract %0 : $Bool, #Bool._value // CHECK-NEXT: cond_br [[BOOL]], bb1, bb2 // CHECK: bb1: - // CHECK-NEXT: [[METATYPE:%.*]] = metatype $@thin ValueEnum.Type // CHECK-NEXT: [[NEW_SELF:%.*]] = enum $ValueEnum, #ValueEnum.b!enumelt // CHECK-NEXT: [[SELF_ACCESS:%.*]] = begin_access [modify] [static] [[SELF_BOX]] // CHECK-NEXT: [[NEW_STATE:%.*]] = integer_literal $Builtin.Int1, -1 @@ -46,7 +45,6 @@ enum ValueEnum { // CHECK: bb2: // CHECK-NEXT: br bb3 // CHECK: bb3: - // CHECK-NEXT: [[METATYPE:%.*]] = metatype $@thin ValueEnum.Type // CHECK-NEXT: [[NEW_SELF:%.*]] = enum $ValueEnum, #ValueEnum.c!enumelt // CHECK-NEXT: [[SELF_ACCESS:%.*]] = begin_access [modify] [static] [[SELF_BOX]] // CHECK-NEXT: [[STATE_VALUE:%.*]] = load [[STATE]] diff --git a/test/SILOptimizer/eager_specialize.sil b/test/SILOptimizer/eager_specialize.sil index 4cf6e094999b3..dfd32399a9f35 100644 --- a/test/SILOptimizer/eager_specialize.sil +++ b/test/SILOptimizer/eager_specialize.sil @@ -1,6 +1,6 @@ -// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer %s | %FileCheck %s -// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer %s -o %t.sil && %target-swift-frontend -module-name=eager_specialize -emit-ir %t.sil | %FileCheck --check-prefix=CHECK-IRGEN --check-prefix=CHECK-IRGEN-%target-cpu %s -// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer -sil-inline-generics=true -inline %s | %FileCheck --check-prefix=CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE %s +// RUN: %target-sil-opt -enable-experimental-prespecialization -enable-sil-verify-all -eager-specializer %s | %FileCheck %s +// RUN: %target-sil-opt -enable-experimental-prespecialization -enable-sil-verify-all -eager-specializer %s -o %t.sil && %target-swift-frontend -enable-experimental-prespecialization -module-name=eager_specialize -emit-ir %t.sil | %FileCheck --check-prefix=CHECK-IRGEN --check-prefix=CHECK-IRGEN-%target-cpu %s +// RUN: %target-sil-opt -enable-experimental-prespecialization -enable-sil-verify-all -eager-specializer -sil-inline-generics=true -inline %s | %FileCheck --check-prefix=CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE %s sil_stage canonical diff --git a/test/SILOptimizer/eager_specialize_ossa.sil b/test/SILOptimizer/eager_specialize_ossa.sil index 4f93864d845b1..c2e2b4b86bd82 100644 --- a/test/SILOptimizer/eager_specialize_ossa.sil +++ b/test/SILOptimizer/eager_specialize_ossa.sil @@ -1,6 +1,6 @@ -// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer %s | %FileCheck %s -// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer %s -o %t.sil && %target-swift-frontend -module-name=eager_specialize -emit-ir %t.sil | %FileCheck --check-prefix=CHECK-IRGEN --check-prefix=CHECK-IRGEN-%target-cpu %s -// RUN: %target-sil-opt -enable-sil-verify-all -eager-specializer -sil-inline-generics=true -inline %s | %FileCheck --check-prefix=CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE %s +// RUN: %target-sil-opt -enable-experimental-prespecialization -enable-sil-verify-all -eager-specializer %s | %FileCheck %s +// RUN: %target-sil-opt -enable-experimental-prespecialization -enable-sil-verify-all -eager-specializer %s -o %t.sil && %target-swift-frontend -enable-experimental-prespecialization -module-name=eager_specialize -emit-ir %t.sil | %FileCheck --check-prefix=CHECK-IRGEN --check-prefix=CHECK-IRGEN-%target-cpu %s +// RUN: %target-sil-opt -enable-experimental-prespecialization -enable-sil-verify-all -eager-specializer -sil-inline-generics=true -inline %s | %FileCheck --check-prefix=CHECK-EAGER-SPECIALIZE-AND-GENERICS-INLINE %s // rdar://problem/65373647 // UNSUPPORTED: CPU=arm64e diff --git a/test/SILOptimizer/mandatory_combine_canon.sil b/test/SILOptimizer/mandatory_combine_canon.sil new file mode 100644 index 0000000000000..5994101e9a498 --- /dev/null +++ b/test/SILOptimizer/mandatory_combine_canon.sil @@ -0,0 +1,106 @@ +// RUN: %target-sil-opt -enable-sil-verify-all -mandatory-combine -sil-mandatory-combine-enable-canon-and-simple-dce %s | %FileCheck %s + +sil_stage canonical + +import Builtin + +// Trivial declarations + +struct MyInt { + var value: Builtin.Int64 +} + +// Generic declarations + +protocol Addable { + static var an: Self { get } +} + +// Class declarations + +class Klass { + init() + deinit +} + +class SubKlass : Klass {} + +sil @use_klass_unowned : $@convention(thin) (Klass) -> () + +// Existential declarations + +protocol Proto { + static var an: Proto { get } +} + +// Trivial support + +sil @first_of_three_ints : $@convention(thin) (MyInt, MyInt, MyInt) -> MyInt + +sil @constant_zero : $@convention(thin) () -> MyInt + +sil @identity_int : $@convention(thin) (MyInt) -> MyInt + +// Generic support + +sil @first_of_three_addables : $@convention(thin) (@in_guaranteed A, @guaranteed <τ_0_0 where τ_0_0 : Addable> { var τ_0_0 } , @guaranteed <τ_0_0 where τ_0_0 : Addable> { var τ_0_0 } ) -> @ +out A + +// Class support + +sil [exact_self_class] @klass_alloc_init : $@convention(method) (@thick Klass.Type) -> @owned Klass + +// Klass.init() +sil @klass_init : $@convention(method) (@owned Klass) -> @owned Klass +// Klass.deinit +sil @klass_deinit : $@convention(method) (@guaranteed Klass) -> @owned Builtin.NativeObject + +// Klass.__deallocating_deinit +sil @klass_dealloc_deinit : $@convention(method) (@owned Klass) -> () + +sil_vtable Klass { + #Klass.init!allocator: (Klass.Type) -> () -> Klass : @klass_alloc_init + #Klass.deinit!deallocator: @klass_dealloc_deinit +} + +sil @first_of_three_klasses : $@convention(thin) (@guaranteed Klass, @guaranteed Klass, @guaranteed Klass) -> @owned Klass + +sil @use_klass_guaranteed : $@convention(thin) (@guaranteed Klass) -> () + +// Existential support + +sil @first_of_three_protos : $@convention(thin) (@in_guaranteed Proto, @guaranteed { var Proto }, @guaranteed { var Proto }) -> @out Proto + +sil @get_proto : $@convention(thin) () -> @out Proto + +// Mixed support + +sil @proto_from_proto_and_myint : $@convention(thin) (@in_guaranteed Proto, MyInt) -> @out Proto + +sil @myint_from_myint_and_proto : $@convention(thin) (MyInt, @guaranteed { var Proto }) -> MyInt + +sil @myint_from_proto_and_myint : $@convention(thin) (@guaranteed { var Proto }, MyInt) -> MyInt + +// Enum support + +enum FakeOptional { +case none +case some(T) +} + +sil @use_fakeoptional_klass_guaranteed : $@convention(thin) (@guaranteed FakeOptional) -> () + +/////////// +// Tests // +/////////// + +// CHECK-LABEL: sil [ossa] @testUnneededDestroyOfForwardingInst : $@convention(thin) (@owned Klass) -> () { +// CHECK-NOT: unchecked_ref_cast +// CHECK: } // end sil function 'testUnneededDestroyOfForwardingInst' +sil [ossa] @testUnneededDestroyOfForwardingInst : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = unchecked_ref_cast %0 : $Klass to $Builtin.NativeObject + destroy_value %1 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} \ No newline at end of file diff --git a/test/SILOptimizer/mandatory_combiner.sil b/test/SILOptimizer/mandatory_combiner.sil index 3a150310f9712..487cdeda26307 100644 --- a/test/SILOptimizer/mandatory_combiner.sil +++ b/test/SILOptimizer/mandatory_combiner.sil @@ -75,6 +75,12 @@ sil @myint_from_myint_and_proto : $@convention(thin) (MyInt, @guaranteed { var P sil @myint_from_proto_and_myint : $@convention(thin) (@guaranteed { var Proto }, MyInt) -> MyInt +// Optional support +enum FakeOptional { +case none +case some(T) +} + /////////// // Tests // /////////// diff --git a/test/SILOptimizer/mandatory_combiner_opt.sil b/test/SILOptimizer/mandatory_combiner_opt.sil new file mode 100644 index 0000000000000..5f09e2674d652 --- /dev/null +++ b/test/SILOptimizer/mandatory_combiner_opt.sil @@ -0,0 +1,108 @@ +// RUN: %target-sil-opt -mandatory-combine -sil-mandatory-combine-enable-canon-and-simple-dce %s | %FileCheck %s + +// Tests for when the mandatory combiner is running with optimizations +// enabled. Only put tests here for functionality that only occurs when the +// Mandatory Combiner runs in between the diagnostics/perf pipeline at -O, +// -Osize. + +sil_stage canonical + +import Builtin + +// Trivial declarations + +struct MyInt { + var value: Builtin.Int64 +} + +// Generic declarations + +protocol Addable { + static var an: Self { get } +} + +// Class declarations + +class Klass { + init() + deinit +} + +// Existential declarations + +protocol Proto { + static var an: Proto { get } +} + +// Trivial support + +sil @first_of_three_ints : $@convention(thin) (MyInt, MyInt, MyInt) -> MyInt + +sil @constant_zero : $@convention(thin) () -> MyInt + +sil @identity_int : $@convention(thin) (MyInt) -> MyInt + +// Generic support + +sil @first_of_three_addables : $@convention(thin) (@in_guaranteed A, @guaranteed <τ_0_0 where τ_0_0 : Addable> { var τ_0_0 } , @guaranteed <τ_0_0 where τ_0_0 : Addable> { var τ_0_0 } ) -> @ +out A + +// Class support + +sil [exact_self_class] @klass_alloc_init : $@convention(method) (@thick Klass.Type) -> @owned Klass + +// Klass.init() +sil @klass_init : $@convention(method) (@owned Klass) -> @owned Klass +// Klass.deinit +sil @klass_deinit : $@convention(method) (@guaranteed Klass) -> @owned Builtin.NativeObject + +// Klass.__deallocating_deinit +sil @klass_dealloc_deinit : $@convention(method) (@owned Klass) -> () + +sil_vtable Klass { + #Klass.init!allocator: (Klass.Type) -> () -> Klass : @klass_alloc_init + #Klass.deinit!deallocator: @klass_dealloc_deinit +} + +sil @first_of_three_klasses : $@convention(thin) (@guaranteed Klass, @guaranteed Klass, @guaranteed Klass) -> @owned Klass + +// Existential support + +sil @first_of_three_protos : $@convention(thin) (@in_guaranteed Proto, @guaranteed { var Proto }, @guaranteed { var Proto }) -> @out Proto + +sil @get_proto : $@convention(thin) () -> @out Proto + +// Mixed support + +sil @proto_from_proto_and_myint : $@convention(thin) (@in_guaranteed Proto, MyInt) -> @out Proto + +sil @myint_from_myint_and_proto : $@convention(thin) (MyInt, @guaranteed { var Proto }) -> MyInt + +sil @myint_from_proto_and_myint : $@convention(thin) (@guaranteed { var Proto }, MyInt) -> MyInt + +// Optional support +enum FakeOptional { +case none +case some(T) +} + +/////////// +// Tests // +/////////// + + +// CHECK-LABEL: sil [ossa] @eliminate_simple_arc_traffic : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK-NOT: enum +// CHECK-NOT: end_borrow +// CHECK: } // end sil function 'eliminate_simple_arc_traffic' +sil [ossa] @eliminate_simple_arc_traffic : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = copy_value %0 : $Klass + destroy_value %1 : $Klass + %2 = enum $FakeOptional, #FakeOptional.none!enumelt + end_borrow %2 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} diff --git a/test/SILOptimizer/mem2reg_resilient.sil b/test/SILOptimizer/mem2reg_resilient.sil index a86a4575ef59f..6df88cc0b7b4d 100644 --- a/test/SILOptimizer/mem2reg_resilient.sil +++ b/test/SILOptimizer/mem2reg_resilient.sil @@ -8,13 +8,15 @@ public struct ResilientStruct { var x: AnyObject } -// CHECK-LABEL: sil @mem2reg_debug_value_addr +// CHECK-LABEL: sil @mem2reg_debug_value_addr : // CHECK: bb0(%0 : $*ResilientStruct): -// CHECK-NEXT: %1 = load %0 : $*ResilientStruct -// CHECK-NEXT: debug_value %1 : $ResilientStruct -// CHECK-NEXT: %3 = tuple () -// CHECK-NEXT: return %3 : $() - +// CHECK-NEXT: %1 = load %0 +// CHECK-NEXT: retain_value %1 +// CHECK-NEXT: debug_value %1 +// CHECK-NEXT: release_value %1 +// CHECK-NEXT: tuple () +// CHECK-NEXT: return {{%.*}} : $() +// CHECK: } // end sil function 'mem2reg_debug_value_addr' sil @mem2reg_debug_value_addr : $@convention(thin) (@in_guaranteed ResilientStruct) -> () { bb0(%0 : $*ResilientStruct): %1 = alloc_stack $ResilientStruct diff --git a/test/SILOptimizer/optionset.swift b/test/SILOptimizer/optionset.swift index 4890c8bb5ca5a..4dc97f3f39dd6 100644 --- a/test/SILOptimizer/optionset.swift +++ b/test/SILOptimizer/optionset.swift @@ -23,7 +23,7 @@ let globalTestOptions: TestOptions = [.first, .second, .third, .fourth] // CHECK-NEXT: builtin // CHECK-NEXT: integer_literal {{.*}}, 15 // CHECK-NEXT: struct $Int -// CHECK-NEXT: struct $TestOptions +// CHECK: struct $TestOptions // CHECK-NEXT: return public func returnTestOptions() -> TestOptions { return [.first, .second, .third, .fourth] @@ -32,8 +32,8 @@ public func returnTestOptions() -> TestOptions { // CHECK-LABEL: sil @{{.*}}returnEmptyTestOptions{{.*}} // CHECK-NEXT: bb0: // CHECK-NEXT: integer_literal {{.*}}, 0 -// CHECK-NEXT: builtin "onFastPath"() : $() // CHECK-NEXT: struct $Int +// CHECK: builtin "onFastPath"() : $() // CHECK-NEXT: struct $TestOptions // CHECK-NEXT: return public func returnEmptyTestOptions() -> TestOptions { diff --git a/test/SILOptimizer/ossa_rauw_tests.sil b/test/SILOptimizer/ossa_rauw_tests.sil new file mode 100644 index 0000000000000..d1ad2212f99ba --- /dev/null +++ b/test/SILOptimizer/ossa_rauw_tests.sil @@ -0,0 +1,671 @@ +// RUN: %target-sil-opt -enable-sil-verify-all -mandatory-combine -sil-mandatory-combine-enable-canon-and-simple-dce -semantic-arc-opts %s | %FileCheck %s + +// Make sure that we can perform all of these RAUW without producing ARC traffic +// that semantic arc opts can't eliminate. + +sil_stage canonical + +import Builtin + +// Trivial declarations + +struct MyInt { + var value: Builtin.Int64 +} + +// Generic declarations + +protocol Addable { + static var an: Self { get } +} + +// Class declarations + +class Klass { + init() + deinit +} + +class SubKlass : Klass {} + +sil @use_klass_unowned : $@convention(thin) (Klass) -> () + +// Existential declarations + +protocol Proto { + static var an: Proto { get } +} + +// Trivial support + +sil @first_of_three_ints : $@convention(thin) (MyInt, MyInt, MyInt) -> MyInt + +sil @constant_zero : $@convention(thin) () -> MyInt + +sil @identity_int : $@convention(thin) (MyInt) -> MyInt + +// Generic support + +sil @first_of_three_addables : $@convention(thin) (@in_guaranteed A, @guaranteed <τ_0_0 where τ_0_0 : Addable> { var τ_0_0 } , @guaranteed <τ_0_0 where τ_0_0 : Addable> { var τ_0_0 } ) -> @ +out A + +// Class support + +sil [exact_self_class] @klass_alloc_init : $@convention(method) (@thick Klass.Type) -> @owned Klass + +// Klass.init() +sil @klass_init : $@convention(method) (@owned Klass) -> @owned Klass +// Klass.deinit +sil @klass_deinit : $@convention(method) (@guaranteed Klass) -> @owned Builtin.NativeObject + +// Klass.__deallocating_deinit +sil @klass_dealloc_deinit : $@convention(method) (@owned Klass) -> () + +sil_vtable Klass { + #Klass.init!allocator: (Klass.Type) -> () -> Klass : @klass_alloc_init + #Klass.deinit!deallocator: @klass_dealloc_deinit +} + +sil @first_of_three_klasses : $@convention(thin) (@guaranteed Klass, @guaranteed Klass, @guaranteed Klass) -> @owned Klass + +sil @use_klass_guaranteed : $@convention(thin) (@guaranteed Klass) -> () + +// Existential support + +sil @first_of_three_protos : $@convention(thin) (@in_guaranteed Proto, @guaranteed { var Proto }, @guaranteed { var Proto }) -> @out Proto + +sil @get_proto : $@convention(thin) () -> @out Proto + +// Mixed support + +sil @proto_from_proto_and_myint : $@convention(thin) (@in_guaranteed Proto, MyInt) -> @out Proto + +sil @myint_from_myint_and_proto : $@convention(thin) (MyInt, @guaranteed { var Proto }) -> MyInt + +sil @myint_from_proto_and_myint : $@convention(thin) (@guaranteed { var Proto }, MyInt) -> MyInt + +// Enum support + +enum FakeOptional { +case none +case some(T) +} + +sil @use_fakeoptional_klass_guaranteed : $@convention(thin) (@guaranteed FakeOptional) -> () + +/////////// +// Tests // +/////////// + +//===--- +// None Tests +// + +// CHECK-LABEL: sil [ossa] @none_to_none_rauw : $@convention(thin) (MyInt) -> MyInt { +// CHECK: bb0 +// CHECK: return %0 +// CHECK: } // end sil function 'none_to_none_rauw' +sil [ossa] @none_to_none_rauw : $@convention(thin) (MyInt) -> MyInt { +bb0(%0 : $MyInt): + %1 = unchecked_bitwise_cast %0 : $MyInt to $Builtin.Int64 + %2 = unchecked_bitwise_cast %1 : $Builtin.Int64 to $MyInt + return %2 : $MyInt +} + +// We do not support replacing .none with non-trivial ownerships. This can only +// occur with enum cases without payloads or with trivial payload. That requires +// more infrastructure than we have currently. + +//===--- +// Owned Tests +// + +// CHECK-LABEL: sil [ossa] @owned_to_owned_rauw : $@convention(thin) (@owned Klass) -> @owned Klass { +// CHECK: bb0( +// CHECK-NEXT: return +// CHECK: } // end sil function 'owned_to_owned_rauw' +sil [ossa] @owned_to_owned_rauw : $@convention(thin) (@owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass): + %1 = unchecked_ref_cast %0 : $Klass to $SubKlass + %2 = upcast %1 : $SubKlass to $Klass + return %2 : $Klass +} + +// We get ARC traffic here today since we do not get rid of PhiArguments kept +// alive only by destroys/end_borrows. We will eventually though. +// +// CHECK-LABEL: sil [ossa] @owned_to_owned_consuming : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK: copy_value +// CHECK-NOT: enum $FakeOptional, #FakeOptional.some!enumelt +// CHECK: } // end sil function 'owned_to_owned_consuming' +sil [ossa] @owned_to_owned_consuming : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + switch_enum %0 : $FakeOptional, case #FakeOptional.some: bb1, case #FakeOptional.none: bb2 + +bb1(%0a : @owned $Klass): + %1 = enum $FakeOptional, #FakeOptional.some!enumelt, %0a : $Klass + br bb3(%1 : $FakeOptional) + +bb2: + %3 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%3 : $FakeOptional) + +bb3(%4 : @owned $FakeOptional): + destroy_value %4 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +//===--- +// Unowned Tests +// + +// CHECK-LABEL: sil [ossa] @unowned_to_owned_rauw : $@convention(thin) (@owned Klass) -> @owned Klass { +// CHECK: bb0( +// CHECK-NEXT: return +// CHECK: } // end sil function 'unowned_to_owned_rauw' +sil [ossa] @unowned_to_owned_rauw : $@convention(thin) (@owned Klass) -> @owned Klass { +bb0(%0 : @owned $Klass): + %1 = unchecked_bitwise_cast %0 : $Klass to $SubKlass + %2 = unchecked_bitwise_cast %1 : $SubKlass to $Klass + %3 = copy_value %2 : $Klass + destroy_value %0 : $Klass + return %3 : $Klass +} + +// CHECK-LABEL: sil [ossa] @unowned_to_owned_rauw_loop : $@convention(thin) (@owned Klass) -> @owned FakeOptional { +// CHECK: bb0([[ARG:%.*]] : @owned $Klass): +// CHECK-NOT: unchecked_bitwise_cast +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// +// CHECK: bb2: +// CHECK-NEXT: [[COPY:%.*]] = copy_value [[ARG]] +// CHECK-NEXT: cond_br undef, bb3, bb4 +// +// CHECK: bb3: +// CHECK-NEXT: destroy_value [[COPY]] +// CHECK-NEXT: br bb2 +// +// CHECK: bb4: +// CHECK-NEXT: [[ENUM_SOME_RESULT:%.*]] = enum $FakeOptional, #FakeOptional.some!enumelt, [[COPY]] +// CHECK-NEXT: br bb6([[ENUM_SOME_RESULT]] : $FakeOptional) +// +// CHECK: bb5: +// CHECK-NEXT: [[ENUM_NONE_RESULT:%.*]] = enum $FakeOptional, #FakeOptional.none!enumelt +// CHECK-NEXT: br bb6([[ENUM_NONE_RESULT]] : +// +// CHECK: bb6([[RESULT:%.*]] : @owned $FakeOptional): +// CHECK-NEXT: destroy_value [[ARG]] +// CHECK-NEXT: return [[RESULT]] +// CHECK: } // end sil function 'unowned_to_owned_rauw_loop' +sil [ossa] @unowned_to_owned_rauw_loop : $@convention(thin) (@owned Klass) -> @owned FakeOptional { +bb0(%0 : @owned $Klass): + cond_br undef, bbLoopPreHeader, bbEarlyExit + +bbLoopPreHeader: + br bbLoopHeader + +bbLoopHeader: + %1 = unchecked_bitwise_cast %0 : $Klass to $SubKlass + %2 = unchecked_bitwise_cast %1 : $SubKlass to $Klass + %3 = copy_value %2 : $Klass + cond_br undef, bbBackEdge, bbExitingBlock + +bbBackEdge: + destroy_value %3 : $Klass + br bbLoopHeader + +bbExitingBlock: + %4 = enum $FakeOptional, #FakeOptional.some!enumelt, %3 : $Klass + br bbExitBlock(%4 : $FakeOptional) + +bbEarlyExit: + %5 = enum $FakeOptional, #FakeOptional.none!enumelt + br bbExitBlock(%5 : $FakeOptional) + +bbExitBlock(%result : @owned $FakeOptional): + destroy_value %0 : $Klass + return %result : $FakeOptional +} + +// CHECK-LABEL: sil [ossa] @unowned_to_guaranteed_rauw : $@convention(thin) (@guaranteed Klass) -> @owned Klass { +// CHECK: bb0( +// CHECK-NEXT: copy_value +// CHECK-NEXT: return +// CHECK: } // end sil function 'unowned_to_guaranteed_rauw' +sil [ossa] @unowned_to_guaranteed_rauw : $@convention(thin) (@guaranteed Klass) -> @owned Klass { +bb0(%0 : @guaranteed $Klass): + %1 = unchecked_bitwise_cast %0 : $Klass to $SubKlass + %2 = unchecked_bitwise_cast %1 : $SubKlass to $Klass + %3 = copy_value %2 : $Klass + return %3 : $Klass +} + +// CHECK-LABEL: sil [ossa] @unowned_to_guaranteed_rauw_loop : $@convention(thin) (@guaranteed Klass) -> @owned FakeOptional { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Klass): +// CHECK-NOT: unchecked_bitwise_cast +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// +// CHECK: bb2: +// CHECK-NEXT: [[COPY:%.*]] = copy_value [[ARG]] +// CHECK-NEXT: cond_br undef, bb3, bb4 +// +// CHECK: bb3: +// CHECK-NEXT: destroy_value [[COPY]] +// CHECK-NEXT: br bb2 +// +// CHECK: bb4: +// CHECK-NEXT: [[ENUM_SOME_RESULT:%.*]] = enum $FakeOptional, #FakeOptional.some!enumelt, [[COPY]] +// CHECK-NEXT: br bb6([[ENUM_SOME_RESULT]] : $FakeOptional) +// +// CHECK: bb5: +// CHECK-NEXT: [[ENUM_NONE_RESULT:%.*]] = enum $FakeOptional, #FakeOptional.none!enumelt // user: %10 +// CHECK-NEXT: br bb6([[ENUM_NONE_RESULT]] : +// +// CHECK: bb6([[RESULT:%.*]] : @owned $FakeOptional): +// CHECK-NEXT: return [[RESULT]] +// CHECK: } // end sil function 'unowned_to_guaranteed_rauw_loop' +sil [ossa] @unowned_to_guaranteed_rauw_loop : $@convention(thin) (@guaranteed Klass) -> @owned FakeOptional { +bb0(%0 : @guaranteed $Klass): + cond_br undef, bbLoopPreHeader, bbEarlyExit + +bbLoopPreHeader: + br bbLoopHeader + +bbLoopHeader: + %1 = unchecked_bitwise_cast %0 : $Klass to $SubKlass + %2 = unchecked_bitwise_cast %1 : $SubKlass to $Klass + %3 = copy_value %2 : $Klass + cond_br undef, bbBackEdge, bbExitingBlock + +bbBackEdge: + destroy_value %3 : $Klass + br bbLoopHeader + +bbExitingBlock: + %4 = enum $FakeOptional, #FakeOptional.some!enumelt, %3 : $Klass + br bbExitBlock(%4 : $FakeOptional) + +bbEarlyExit: + %5 = enum $FakeOptional, #FakeOptional.none!enumelt + br bbExitBlock(%5 : $FakeOptional) + +bbExitBlock(%result : @owned $FakeOptional): + return %result : $FakeOptional +} + +// CHECK-LABEL: sil [ossa] @unowned_to_guaranteed_rauw_2 : $@convention(thin) (@guaranteed Klass) -> (Klass, Klass) { +// CHECK: bb0( +// CHECK-NEXT: unchecked_ownership_conversion +// CHECK-NEXT: tuple +// CHECK-NEXT: return +// CHECK: } // end sil function 'unowned_to_guaranteed_rauw_2' +sil [ossa] @unowned_to_guaranteed_rauw_2 : $@convention(thin) (@guaranteed Klass) -> (Klass, Klass) { +bb0(%0 : @guaranteed $Klass): + %1 = unchecked_bitwise_cast %0 : $Klass to $SubKlass + %2 = unchecked_bitwise_cast %1 : $SubKlass to $Klass + %3 = tuple(%2 : $Klass, %2 : $Klass) + return %3 : $(Klass, Klass) +} + +// CHECK-LABEL: sil [ossa] @unowned_to_guaranteed_rauw_2_loop : $@convention(thin) (@guaranteed Klass) -> @owned FakeOptional<(Klass, Klass)> { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Klass): +// CHECK-NOT: unchecked_bitwise_cast +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// +// CHECK: bb2: +// CHECK-NEXT: [[ARG_CONVERT:%.*]] = unchecked_ownership_conversion [[ARG]] +// CHECK-NEXT: [[TUP:%.*]] = tuple ([[ARG_CONVERT]] : $Klass, [[ARG_CONVERT]] : $Klass) +// CHECK-NEXT: [[COPY:%.*]] = copy_value [[TUP]] +// CHECK-NEXT: cond_br undef, bb3, bb4 +// +// CHECK: bb3: +// CHECK-NEXT: destroy_value [[COPY]] +// CHECK-NEXT: br bb2 +// +// CHECK: bb4: +// CHECK-NEXT: [[ENUM_SOME_RESULT:%.*]] = enum $FakeOptional<{{.*}}>, #FakeOptional.some!enumelt, [[COPY]] +// CHECK-NEXT: br bb6([[ENUM_SOME_RESULT]] : $FakeOptional<{{.*}}>) +// +// CHECK: bb5: +// CHECK-NEXT: [[ENUM_NONE_RESULT:%.*]] = enum $FakeOptional<{{.*}}>, #FakeOptional.none!enumelt +// CHECK-NEXT: br bb6([[ENUM_NONE_RESULT]] : +// +// CHECK: bb6([[RESULT:%.*]] : @owned $FakeOptional<{{.*}}>): +// CHECK-NEXT: return [[RESULT]] +// CHECK: } // end sil function 'unowned_to_guaranteed_rauw_2_loop' +sil [ossa] @unowned_to_guaranteed_rauw_2_loop : $@convention(thin) (@guaranteed Klass) -> @owned FakeOptional<(Klass, Klass)> { +bb0(%0 : @guaranteed $Klass): + cond_br undef, bbLoopPreHeader, bbEarlyExit + +bbLoopPreHeader: + br bbLoopHeader + +bbLoopHeader: + %1 = unchecked_bitwise_cast %0 : $Klass to $SubKlass + %2 = unchecked_bitwise_cast %1 : $SubKlass to $Klass + %3 = tuple(%2 : $Klass, %2 : $Klass) + %4 = copy_value %3 : $(Klass, Klass) + cond_br undef, bbBackEdge, bbExitingBlock + +bbBackEdge: + destroy_value %4 : $(Klass, Klass) + br bbLoopHeader + +bbExitingBlock: + %5 = enum $FakeOptional<(Klass, Klass)>, #FakeOptional.some!enumelt, %4 : $(Klass, Klass) + br bbExitBlock(%5 : $FakeOptional<(Klass, Klass)>) + +bbEarlyExit: + %6 = enum $FakeOptional<(Klass, Klass)>, #FakeOptional.none!enumelt + br bbExitBlock(%6 : $FakeOptional<(Klass, Klass)>) + +bbExitBlock(%result : @owned $FakeOptional<(Klass, Klass)>): + return %result : $FakeOptional<(Klass, Klass)> +} + +// CHECK-LABEL: sil [ossa] @unowned_to_guaranteed_rauw_3 : $@convention(thin) (@guaranteed Klass) -> Klass { +// CHECK: bb0( +// CHECK-NEXT: unchecked_ownership_conversion +// CHECK-NEXT: return +// CHECK: } // end sil function 'unowned_to_guaranteed_rauw_3' +sil [ossa] @unowned_to_guaranteed_rauw_3 : $@convention(thin) (@guaranteed Klass) -> Klass { +bb0(%0 : @guaranteed $Klass): + %1 = unchecked_bitwise_cast %0 : $Klass to $SubKlass + %2 = unchecked_bitwise_cast %1 : $SubKlass to $Klass + return %2 : $Klass +} + +//===--- +// Guaranteed Tests +// + +// CHECK-LABEL: sil [ossa] @guaranteed_to_guaranteed : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: unchecked_ref_cast +// CHECK: } // end sil function 'guaranteed_to_guaranteed' +sil [ossa] @guaranteed_to_guaranteed : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = unchecked_ref_cast %0 : $Klass to $SubKlass + %2 = unchecked_ref_cast %1 : $SubKlass to $Klass + %f = function_ref @use_klass_guaranteed : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%2) : $@convention(thin) (@guaranteed Klass) -> () + %9999 = tuple() + return %9999 : $() +} + +// We should have no ARC traffic despite having a loop here. +// +// CHECK-LABEL: sil [ossa] @guaranteed_to_guaranteed_loop : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: unchecked_ref_cast +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'guaranteed_to_guaranteed_loop' +sil [ossa] @guaranteed_to_guaranteed_loop : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + cond_br undef, bb1, bbSkipLoop + +bb1: + br bb1a + +bb1a: + br bb2 + +bb2: + %1 = unchecked_ref_cast %0 : $Klass to $SubKlass + %2 = unchecked_ref_cast %1 : $SubKlass to $Klass + %f = function_ref @use_klass_guaranteed : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%2) : $@convention(thin) (@guaranteed Klass) -> () + cond_br undef, bbBackEdge, bbExitingBlock + +bbBackEdge: + br bb1a + +bbSkipLoop: + br bbExit + +bbExitingBlock: + br bbExit + +bbExit: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @guaranteed_to_unowned : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: unchecked_ref_cast +// CHECK: } // end sil function 'guaranteed_to_unowned' +sil [ossa] @guaranteed_to_unowned : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = unchecked_ref_cast %0 : $Klass to $SubKlass + %2 = unchecked_ref_cast %1 : $SubKlass to $Klass + %f = function_ref @use_klass_unowned : $@convention(thin) (Klass) -> () + apply %f(%2) : $@convention(thin) (Klass) -> () + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @guaranteed_to_unowned_loop : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: unchecked_ref_cast +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'guaranteed_to_unowned_loop' +sil [ossa] @guaranteed_to_unowned_loop : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + cond_br undef, bb1, bbSkipLoop + +bb1: + br bb1a + +bb1a: + br bb2 + +bb2: + %1 = unchecked_ref_cast %0 : $Klass to $SubKlass + %2 = unchecked_ref_cast %1 : $SubKlass to $Klass + %f = function_ref @use_klass_unowned : $@convention(thin) (Klass) -> () + apply %f(%2) : $@convention(thin) (Klass) -> () + cond_br undef, bbBackEdge, bbExitingBlock + +bbBackEdge: + br bb1a + +bbSkipLoop: + br bbExit + +bbExitingBlock: + br bbExit + +bbExit: + %9999 = tuple() + return %9999 : $() +} + +// Lifetime extend borrow to %3 and insert a copy. +// +// We should have no copies in this function when we are done. +// +// Just make sure we eliminated the FakeOptional.some. +// +// CHECK-LABEL: sil [ossa] @guaranteed_to_owned_consuming : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: enum $FakeOptional, #FakeOptional.some!enumelt +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'guaranteed_to_owned_consuming' +sil [ossa] @guaranteed_to_owned_consuming : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + switch_enum %0 : $FakeOptional, case #FakeOptional.some: bb1, case #FakeOptional.none: bb2 + +bb1(%0a : @guaranteed $Klass): + %1 = enum $FakeOptional, #FakeOptional.some!enumelt, %0a : $Klass + %2 = copy_value %1 : $FakeOptional + br bb3(%2 : $FakeOptional) + +bb2: + %3 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%3 : $FakeOptional) + +bb3(%4 : @owned $FakeOptional): + destroy_value %4 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @guaranteed_to_owned_consuming_loop : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: enum $FakeOptional, #FakeOptional.some!enumelt +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'guaranteed_to_owned_consuming_loop' +sil [ossa] @guaranteed_to_owned_consuming_loop : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + cond_br undef, bbPreLoopHeader, bbEarlyExit + +bbPreLoopHeader: + br bbLoopHeader + +bbLoopHeader: + switch_enum %0 : $FakeOptional, case #FakeOptional.some: bb1, case #FakeOptional.none: bb2 + +bb1(%0a : @guaranteed $Klass): + %1 = enum $FakeOptional, #FakeOptional.some!enumelt, %0a : $Klass + %2 = copy_value %1 : $FakeOptional + br bb3(%2 : $FakeOptional) + +bb2: + %3 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%3 : $FakeOptional) + +bb3(%4 : @owned $FakeOptional): + destroy_value %4 : $FakeOptional + cond_br undef, bbBackEdge, bbLoopExitingBlock + +bbBackEdge: + br bbLoopHeader + +bbLoopExitingBlock: + br bbExit + +bbEarlyExit: + br bbExit + +bbExit: + %9999 = tuple() + return %9999 : $() +} + +// For the normal check, make sure we performed the optimization. In this case +// it means eliminating the enum instruction in bb1. +// +// Then after cleaning up show that we do not have any copy_value or +// begin_borrows left. +// +// CHECK-LABEL: sil [ossa] @guaranteed_to_guaranteed_consuming : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK: begin_borrow +// CHECK-NOT: enum $FakeOptional, #FakeOptional.some!enumelt +// CHECK: } // end sil function 'guaranteed_to_guaranteed_consuming' +// +// We eliminate all begin borrows from this example with semantic-arc. +sil [ossa] @guaranteed_to_guaranteed_consuming : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + switch_enum %0 : $FakeOptional, case #FakeOptional.some: bb1, case #FakeOptional.none: bb2 + +bb1(%0a : @guaranteed $Klass): + %1 = enum $FakeOptional, #FakeOptional.some!enumelt, %0a : $Klass + %2 = begin_borrow %1 : $FakeOptional + br bb3(%2 : $FakeOptional) + +bb2: + %3 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%3 : $FakeOptional) + +bb3(%4 : @guaranteed $FakeOptional): + end_borrow %4 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// Make sure we performed the optimization. +// +// CHECK-LABEL: sil [ossa] @guaranteed_to_guaranteed_non_consuming_deadend : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: begin_borrow +// CHECK-NOT: enum $FakeOptional, #FakeOptional.some!enumelt +// CHECK: } // end sil function 'guaranteed_to_guaranteed_non_consuming_deadend' +sil [ossa] @guaranteed_to_guaranteed_non_consuming_deadend : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + switch_enum %0 : $FakeOptional, case #FakeOptional.some: bb1, case #FakeOptional.none: bb2 + +bb1(%0a : @guaranteed $Klass): + // We are going to replace %1 with %0. + %1 = enum $FakeOptional, #FakeOptional.some!enumelt, %0a : $Klass + %f2 = function_ref @use_fakeoptional_klass_guaranteed : $@convention(thin) (@guaranteed FakeOptional) -> () + apply %f2(%1) : $@convention(thin) (@guaranteed FakeOptional) -> () + unreachable + +bb2: + %3 = enum $FakeOptional, #FakeOptional.none!enumelt + %f = function_ref @use_fakeoptional_klass_guaranteed : $@convention(thin) (@guaranteed FakeOptional) -> () + apply %f(%3) : $@convention(thin) (@guaranteed FakeOptional) -> () + unreachable +} + +// In this example we are replacing %1 with %0 inserting fix up copies. Make +// sure that we do not end up with any arc traffic! +// +// Make sure we actually eliminated the FakeOptional.some in bb1. We are +// replacing it with %0. +// +// CHECK-LABEL: sil [ossa] @guaranteed_to_guaranteed_nonconsuming_2 : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: enum $FakeOptional, #FakeOptional.some!enumelt +// CHECK-NOT: begin_borrow +// CHECK: } // end sil function 'guaranteed_to_guaranteed_nonconsuming_2' +sil [ossa] @guaranteed_to_guaranteed_nonconsuming_2 : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + switch_enum %0 : $FakeOptional, case #FakeOptional.some: bb1, case #FakeOptional.none: bb2 + +bb1(%0a : @guaranteed $Klass): + %1 = enum $FakeOptional, #FakeOptional.some!enumelt, %0a : $Klass + %f2 = function_ref @use_fakeoptional_klass_guaranteed : $@convention(thin) (@guaranteed FakeOptional) -> () + apply %f2(%1) : $@convention(thin) (@guaranteed FakeOptional) -> () + br bb3 + +bb2: + %3 = enum $FakeOptional, #FakeOptional.none!enumelt + %f = function_ref @use_fakeoptional_klass_guaranteed : $@convention(thin) (@guaranteed FakeOptional) -> () + apply %f(%3) : $@convention(thin) (@guaranteed FakeOptional) -> () + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// We do eliminate the copy_value here, but we do not eliminate the begin_borrow +// since we do not in semantic arc opts eliminate args kept alive just by +// borrows. +// +// CHECK-LABEL: sil [ossa] @guaranteed_copy_rauw_owned : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK: begin_borrow +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'guaranteed_copy_rauw_owned' +sil [ossa] @guaranteed_copy_rauw_owned : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + %0c = copy_value %0 : $FakeOptional + switch_enum %0c : $FakeOptional, case #FakeOptional.some: bb1, case #FakeOptional.none: bb2 + +bb1(%0a : @owned $Klass): + %1 = enum $FakeOptional, #FakeOptional.some!enumelt, %0a : $Klass + br bb3(%1 : $FakeOptional) + +bb2: + %3 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%3 : $FakeOptional) + +bb3(%4 : @owned $FakeOptional): + destroy_value %4 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} diff --git a/test/SILOptimizer/ownership_model_eliminator.sil b/test/SILOptimizer/ownership_model_eliminator.sil index 5a119d1d5278e..b53278f7319d8 100644 --- a/test/SILOptimizer/ownership_model_eliminator.sil +++ b/test/SILOptimizer/ownership_model_eliminator.sil @@ -14,6 +14,11 @@ case some(R) class C {} +struct PairOfInt { + var lhs : Builtin.Int32 + var rhs : Builtin.Int32 +} + // CHECK-LABEL: sil @load : $@convention(thin) (@in Builtin.NativeObject, @in Builtin.Int32) -> () { // CHECK: bb0([[ARG1:%[0-9]+]] : $*Builtin.NativeObject, [[ARG2:%[0-9]+]] : $*Builtin.Int32): // CHECK: [[LOAD2:%[0-9]+]] = load [[ARG1]] : $*Builtin.NativeObject @@ -345,11 +350,11 @@ bb0(%0a : $Builtin.Int32, %0b : $Builtin.Int32): // Just make sure that we do not crash on this function. // -// CHECK-LABEL: sil @lower_unchecked_value_cast_to_unchecked_bitwise_cast : $@convention(thin) (Builtin.Int32) -> Builtin.Int32 { +// CHECK-LABEL: sil @lower_unchecked_value_cast_to_unchecked_bitwise_cast : $@convention(thin) (PairOfInt) -> Builtin.Int64 { // CHECK: unchecked_bitwise_cast // CHECK: } // end sil function 'lower_unchecked_value_cast_to_unchecked_bitwise_cast' -sil [ossa] @lower_unchecked_value_cast_to_unchecked_bitwise_cast : $@convention(thin) (Builtin.Int32) -> Builtin.Int32 { -bb0(%0a : $Builtin.Int32): - %0b = unchecked_value_cast %0a : $Builtin.Int32 to $Builtin.Int32 - return %0b : $Builtin.Int32 +sil [ossa] @lower_unchecked_value_cast_to_unchecked_bitwise_cast : $@convention(thin) (PairOfInt) -> Builtin.Int64 { +bb0(%0a : $PairOfInt): + %0b = unchecked_value_cast %0a : $PairOfInt to $Builtin.Int64 + return %0b : $Builtin.Int64 } diff --git a/test/SILOptimizer/ownership_model_eliminator_resilience.sil b/test/SILOptimizer/ownership_model_eliminator_resilience.sil index da14a267ec0f0..fb3f1abf9fb23 100644 --- a/test/SILOptimizer/ownership_model_eliminator_resilience.sil +++ b/test/SILOptimizer/ownership_model_eliminator_resilience.sil @@ -1,4 +1,3 @@ - // RUN: %target-sil-opt -ownership-model-eliminator -enable-library-evolution %s | %FileCheck %s // copy_value and destroy_value operations are lowered away, except for @@ -36,7 +35,9 @@ bb0(%0 : @guaranteed $Saddle): // CHECK: bb0(%0 : $Saddle): // CHECK: strong_retain %0 : $Saddle // CHECK: %2 = enum $Animal, #Animal.horse!enumelt, %0 : $Saddle -// CHECK: release_value %2 : $Animal -// CHECK: %4 = tuple () -// CHECK: return %4 : $() +// CHECK: retain_value %2 +// CHECK: release_value %2 +// CHECK: release_value %2 +// CHECK: %6 = tuple () +// CHECK: return %6 : $() // CHECK: } diff --git a/test/SILOptimizer/pre_specialize.swift b/test/SILOptimizer/pre_specialize.swift index 32eadcf33d85a..4bcefac2f28cc 100644 --- a/test/SILOptimizer/pre_specialize.swift +++ b/test/SILOptimizer/pre_specialize.swift @@ -1,20 +1,20 @@ // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -emit-module-path %t/pre_specialized_module.swiftmodule %S/Inputs/pre_specialized_module.swift -// RUN: %target-swift-frontend -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT -// RUN: %target-swift-frontend -I %t -Onone -emit-sil %s | %FileCheck %s --check-prefix=NONE +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module-path %t/pre_specialized_module.swiftmodule %S/Inputs/pre_specialized_module.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -Onone -emit-sil %s | %FileCheck %s --check-prefix=NONE // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -O -emit-module-path %t/pre_specialized_module.swiftmodule %S/Inputs/pre_specialized_module.swift -// RUN: %target-swift-frontend -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -emit-module-path %t/pre_specialized_module.swiftmodule %S/Inputs/pre_specialized_module.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -O -enable-library-evolution -emit-module-path %t/pre_specialized_module.swiftmodule %S/Inputs/pre_specialized_module.swift -// RUN: %target-swift-frontend -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -enable-library-evolution -emit-module-path %t/pre_specialized_module.swiftmodule %S/Inputs/pre_specialized_module.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -O -swift-version 5 -enable-library-evolution -emit-module -o /dev/null -emit-module-interface-path %t/pre_specialized_module.swiftinterface %S/Inputs/pre_specialized_module.swift -// RUN: %target-swift-frontend -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT +// RUN: %target-swift-frontend -enable-experimental-prespecialization -O -swift-version 5 -enable-library-evolution -emit-module -o /dev/null -emit-module-interface-path %t/pre_specialized_module.swiftinterface %S/Inputs/pre_specialized_module.swift +// RUN: %target-swift-frontend -enable-experimental-prespecialization -I %t -O -emit-sil %s | %FileCheck %s --check-prefix=OPT import pre_specialized_module diff --git a/test/SILOptimizer/semantic-arc-opt-unchecked-ownership-conversion.sil b/test/SILOptimizer/semantic-arc-opt-unchecked-ownership-conversion.sil new file mode 100644 index 0000000000000..476cb7c92a14c --- /dev/null +++ b/test/SILOptimizer/semantic-arc-opt-unchecked-ownership-conversion.sil @@ -0,0 +1,248 @@ +// RUN: %target-sil-opt -module-name Swift -enable-sil-verify-all -semantic-arc-opts -sil-semantic-arc-peepholes-ownership-conversion-elim -sil-semantic-arc-peepholes-lifetime-joining %s | %FileCheck %s + +sil_stage canonical + +import Builtin + +////////////////// +// Declarations // +////////////////// + +typealias AnyObject = Builtin.AnyObject + +enum MyNever {} +enum FakeOptional { +case none +case some(T) +} + +sil [ossa] @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () +sil [ossa] @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () +sil [ossa] @unowned_user : $@convention(thin) (Builtin.NativeObject) -> () +sil [ossa] @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject +sil [ossa] @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever +sil [ossa] @inout_user : $@convention(thin) (@inout FakeOptional) -> () +sil [ossa] @get_native_object : $@convention(thin) () -> @owned Builtin.NativeObject + +struct NativeObjectPair { + var obj1 : Builtin.NativeObject + var obj2 : Builtin.NativeObject +} + +sil [ossa] @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + +struct FakeOptionalNativeObjectPairPair { + var pair1 : FakeOptional + var pair2 : FakeOptional +} +sil [ossa] @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + +sil [ossa] @get_nativeobject_pair : $@convention(thin) () -> @owned NativeObjectPair +sil [ossa] @consume_nativeobject_pair : $@convention(thin) (@owned NativeObjectPair) -> () + +protocol MyFakeAnyObject : Klass { + func myFakeMethod() +} + +final class Klass { + var base: Klass + let baseLet: Klass +} + +extension Klass : MyFakeAnyObject { + func myFakeMethod() +} +sil [ossa] @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () +sil [ossa] @unowned_klass_user : $@convention(thin) (Klass) -> () +sil [ossa] @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () +sil [ossa] @guaranteed_fakeoptional_classlet_user : $@convention(thin) (@guaranteed FakeOptional) -> () + +struct MyInt { + var value: Builtin.Int32 +} + +struct StructWithDataAndOwner { + var data : Builtin.Int32 + var owner : Klass +} + +struct StructMemberTest { + var c : Klass + var s : StructWithDataAndOwner + var t : (Builtin.Int32, StructWithDataAndOwner) +} + +class ClassLet { + @_hasStorage let aLet: Klass + @_hasStorage var aVar: Klass + @_hasStorage let aLetTuple: (Klass, Klass) + @_hasStorage let anOptionalLet: FakeOptional + + @_hasStorage let anotherLet: ClassLet +} + +class SubclassLet: ClassLet {} + +sil_global [let] @a_let_global : $Klass +sil_global @a_var_global : $Klass + +enum EnumWithIndirectCase { +case first +indirect case second(Builtin.NativeObject) +} + +struct StructWithEnumWithIndirectCaseField { + var i: Builtin.Int23 + var field : EnumWithIndirectCase +} + +sil [ossa] @get_fakeoptional_nativeobject : $@convention(thin) () -> @owned FakeOptional + +struct NativeObjectWrapper { + var innerWrapper : Builtin.NativeObject +} + +sil @owned_user_object_pair : $@convention(thin) (@owned NativeObjectPair) -> () + +/////////// +// Tests // +/////////// + +// CHECK-LABEL: sil [ossa] @guaranteed_to_unowned_positive_1 : $@convention(thin) (@owned Klass) -> () { +// CHECK-NOT: unchecked_ownership_conversion +// CHECK: } // end sil function 'guaranteed_to_unowned_positive_1' +sil [ossa] @guaranteed_to_unowned_positive_1 : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = begin_borrow %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @guaranteed to @unowned + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + end_borrow %1 : $Klass + destroy_value %0 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @guaranteed_to_unowned_unreachable : $@convention(thin) (@owned Klass) -> () { +// CHECK: begin_borrow +// CHECK-NOT: unchecked_ownership_conversion +// CHECK: } // end sil function 'guaranteed_to_unowned_unreachable' +sil [ossa] @guaranteed_to_unowned_unreachable : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = begin_borrow %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @guaranteed to @unowned + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + unreachable +} + +// CHECK-LABEL: sil [ossa] @guaranteed_to_unowned_unreachable_2 : $@convention(thin) (@owned Klass) -> () { +// CHECK-NOT: unchecked_ownership_conversion +// CHECK: } // end sil function 'guaranteed_to_unowned_unreachable_2' +sil [ossa] @guaranteed_to_unowned_unreachable_2 : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = begin_borrow %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @guaranteed to @unowned + cond_br undef, bb1, bb2 + +bb1: + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + end_borrow %1 : $Klass + br bb3 + +bb2: + unreachable + +bb3: + destroy_value %0 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// Borrow scope is too small, we fail. +// +// CHECK-LABEL: sil [ossa] @guaranteed_to_unowned_negative_1 : $@convention(thin) (@owned Klass) -> () { +// CHECK: unchecked_ownership_conversion +// CHECK: } // end sil function 'guaranteed_to_unowned_negative_1' +sil [ossa] @guaranteed_to_unowned_negative_1 : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = begin_borrow %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @guaranteed to @unowned + end_borrow %1 : $Klass + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + destroy_value %0 : $Klass + %9999 = tuple() + return %9999 : $() +} + +///////////////// +// Owned Tests // +///////////////// + +// CHECK-LABEL: sil [ossa] @owned_to_unowned_positive_1 : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: unchecked_ownership_conversion +// CHECK: } // end sil function 'owned_to_unowned_positive_1' +sil [ossa] @owned_to_unowned_positive_1 : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = copy_value %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @owned to @unowned + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + destroy_value %1 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @owned_to_unowned_unreachable : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK: copy_value +// CHECK-NOT: unchecked_ownership_conversion +// CHECK: } // end sil function 'owned_to_unowned_unreachable' +sil [ossa] @owned_to_unowned_unreachable : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = copy_value %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @owned to @unowned + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + unreachable +} + +// CHECK-LABEL: sil [ossa] @owned_to_unowned_unreachable_2 : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: unchecked_ownership_conversion +// CHECK: } // end sil function 'owned_to_unowned_unreachable_2' +sil [ossa] @owned_to_unowned_unreachable_2 : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = copy_value %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @owned to @unowned + cond_br undef, bb1, bb2 + +bb1: + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + destroy_value %1 : $Klass + br bb3 + +bb2: + unreachable + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// Borrow scope is too small, we fail. +// +// CHECK-LABEL: sil [ossa] @owned_to_unowned_negative_1 : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK: unchecked_ownership_conversion +// CHECK: } // end sil function 'owned_to_unowned_negative_1' +sil [ossa] @owned_to_unowned_negative_1 : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = copy_value %0 : $Klass + %2 = unchecked_ownership_conversion %1 : $Klass, @owned to @unowned + destroy_value %1 : $Klass + %func = function_ref @unowned_klass_user : $@convention(thin) (Klass) -> () + apply %func(%2) : $@convention(thin) (Klass) -> () + %9999 = tuple() + return %9999 : $() +} diff --git a/test/SILOptimizer/semantic-arc-opts-lifetime-joining.sil b/test/SILOptimizer/semantic-arc-opts-lifetime-joining.sil new file mode 100644 index 0000000000000..dc09e017af2fb --- /dev/null +++ b/test/SILOptimizer/semantic-arc-opts-lifetime-joining.sil @@ -0,0 +1,885 @@ +// RUN: %target-sil-opt -module-name Swift -enable-sil-verify-all -semantic-arc-opts -sil-semantic-arc-peepholes-lifetime-joining %s | %FileCheck %s +// REQUIRES: swift_stdlib_asserts + +// NOTE: Some of our tests here depend on borrow elimination /not/ running! +// Please do not add it to clean up the IR like we did in +// semanticarcopts-loadcopy-to-loadborrow! + +sil_stage canonical + +import Builtin + +////////////////// +// Declarations // +////////////////// + +typealias AnyObject = Builtin.AnyObject + +enum MyNever {} +enum FakeOptional { +case none +case some(T) +} + +sil [ossa] @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () +sil [ossa] @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () +sil [ossa] @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject +sil [ossa] @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever +sil [ossa] @inout_user : $@convention(thin) (@inout FakeOptional) -> () +sil [ossa] @get_native_object : $@convention(thin) () -> @owned Builtin.NativeObject + +struct NativeObjectPair { + var obj1 : Builtin.NativeObject + var obj2 : Builtin.NativeObject +} + +sil [ossa] @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + +struct FakeOptionalNativeObjectPairPair { + var pair1 : FakeOptional + var pair2 : FakeOptional +} +sil [ossa] @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + +sil [ossa] @get_nativeobject_pair : $@convention(thin) () -> @owned NativeObjectPair +sil [ossa] @consume_nativeobject_pair : $@convention(thin) (@owned NativeObjectPair) -> () + +protocol MyFakeAnyObject : Klass { + func myFakeMethod() +} + +final class Klass { + var base: Klass + let baseLet: Klass +} + +extension Klass : MyFakeAnyObject { + func myFakeMethod() +} +sil [ossa] @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () +sil [ossa] @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () +sil [ossa] @guaranteed_fakeoptional_classlet_user : $@convention(thin) (@guaranteed FakeOptional) -> () + +struct MyInt { + var value: Builtin.Int32 +} + +struct StructWithDataAndOwner { + var data : Builtin.Int32 + var owner : Klass +} + +struct StructMemberTest { + var c : Klass + var s : StructWithDataAndOwner + var t : (Builtin.Int32, StructWithDataAndOwner) +} + +class ClassLet { + @_hasStorage let aLet: Klass + @_hasStorage var aVar: Klass + @_hasStorage let aLetTuple: (Klass, Klass) + @_hasStorage let anOptionalLet: FakeOptional + + @_hasStorage let anotherLet: ClassLet +} + +class SubclassLet: ClassLet {} + +sil_global [let] @a_let_global : $Klass +sil_global @a_var_global : $Klass + +enum EnumWithIndirectCase { +case first +indirect case second(Builtin.NativeObject) +} + +struct StructWithEnumWithIndirectCaseField { + var i: Builtin.Int23 + var field : EnumWithIndirectCase +} + +sil [ossa] @get_fakeoptional_nativeobject : $@convention(thin) () -> @owned FakeOptional + +struct NativeObjectWrapper { + var innerWrapper : Builtin.NativeObject +} + +sil @owned_user_object_pair : $@convention(thin) (@owned NativeObjectPair) -> () + +/////////// +// Tests // +/////////// + +// CHECK-LABEL: sil [ossa] @join_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_liveranges_in_same_block_1' +sil [ossa] @join_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + destroy_value %0 : $Builtin.NativeObject + destroy_value %1 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_liveranges_in_same_block_2' +sil [ossa] @join_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + destroy_value %0 : $Builtin.NativeObject + return %1 : $Builtin.NativeObject +} + +// CHECK-LABEL: sil [ossa] @join_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_liveranges_in_same_block_3' +sil [ossa] @join_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + destroy_value %0 : $Builtin.NativeObject + %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb2 + +bb2: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_liveranges_in_same_block_4 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_liveranges_in_same_block_4' +sil [ossa] @join_liveranges_in_same_block_4 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + destroy_value %0 : $Builtin.NativeObject + %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %f(%1) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %f2 = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f2(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb2 + +bb2: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @donot_join_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'donot_join_liveranges_in_same_block_1' +sil [ossa] @donot_join_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () + destroy_value %0 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// Forwarding case. +// +// CHECK-LABEL: sil [ossa] @donot_join_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'donot_join_liveranges_in_same_block_2' +sil [ossa] @donot_join_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + destroy_value %0 : $Builtin.NativeObject + %2 = unchecked_ref_cast %1 : $Builtin.NativeObject to $Builtin.NativeObject + %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f(%2) : $@convention(thin) (@owned Builtin.NativeObject) -> () + %9999 = tuple() + return %9999 : $() +} + +// Forwarding case. We need LiveRanges for this. +// +// CHECK-LABEL: sil [ossa] @donot_join_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'donot_join_liveranges_in_same_block_3' +sil [ossa] @donot_join_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + %2 = unchecked_ref_cast %0 : $Builtin.NativeObject to $Builtin.NativeObject + destroy_value %2 : $Builtin.NativeObject + %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () + %9999 = tuple() + return %9999 : $() +} + +// Now test cases where we find our consumer is in the return block or is a +// return itself. +// +// CHECK-LABEL: sil [ossa] @join_liveranges_not_same_block_with_consuming_return : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_liveranges_not_same_block_with_consuming_return' +sil [ossa] @join_liveranges_not_same_block_with_consuming_return : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + destroy_value %0 : $Builtin.NativeObject + br bb2 + +bb2: + return %1 : $Builtin.NativeObject +} + +// CHECK-LABEL: sil [ossa] @join_liveranges_not_same_block_consumed_in_return_block : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_liveranges_not_same_block_consumed_in_return_block' +sil [ossa] @join_liveranges_not_same_block_consumed_in_return_block : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + destroy_value %0 : $Builtin.NativeObject + br bb2 + +bb2: + %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @donot_join_liveranges_not_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'donot_join_liveranges_not_same_block_1' +sil [ossa] @donot_join_liveranges_not_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + destroy_value %0 : $Builtin.NativeObject + br bb2 + +bb2: + %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @donot_join_liveranges_not_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'donot_join_liveranges_not_same_block_2' +sil [ossa] @donot_join_liveranges_not_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = copy_value %0 : $Builtin.NativeObject + br bb1 + +bb1: + %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb2 + +bb2: + destroy_value %0 : $Builtin.NativeObject + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_liverange_multiple_destroy_value : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'join_liverange_multiple_destroy_value' +sil [ossa] @join_liverange_multiple_destroy_value : $@convention(thin) () -> () { +bb0: + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + %constructingUse = function_ref @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject + %obj = apply %constructingUse() : $@convention(thin) () -> @owned Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %obj2 = copy_value %obj : $Builtin.NativeObject + destroy_value %obj : $Builtin.NativeObject + cond_br undef, bb1a, bb1b + +bb1a: + apply %consumingUse(%obj2) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb1b: + apply %consumingUse(%obj2) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + apply %consumingUse(%obj) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb4 + +bb3: + br bb4 + +bb4: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_liverange_forwarding_chain_1 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'join_liverange_forwarding_chain_1' +sil [ossa] @join_liverange_forwarding_chain_1 : $@convention(thin) () -> () { +bb0: + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + %constructingUse = function_ref @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject + %obj = apply %constructingUse() : $@convention(thin) () -> @owned Builtin.NativeObject + + %obj2 = copy_value %obj : $Builtin.NativeObject + destroy_value %obj : $Builtin.NativeObject + %obj3 = unchecked_ref_cast %obj2 : $Builtin.NativeObject to $Builtin.NativeObject + apply %consumingUse(%obj3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_liverange_multiple_destroy_value_forwarding_chain_1 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'join_liverange_multiple_destroy_value_forwarding_chain_1' +sil [ossa] @join_liverange_multiple_destroy_value_forwarding_chain_1 : $@convention(thin) () -> () { +bb0: + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + %constructingUse = function_ref @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject + %obj = apply %constructingUse() : $@convention(thin) () -> @owned Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %obj2 = copy_value %obj : $Builtin.NativeObject + destroy_value %obj : $Builtin.NativeObject + %obj3 = unchecked_ref_cast %obj2 : $Builtin.NativeObject to $Builtin.NativeObject + cond_br undef, bb1a, bb1b + +bb1a: + apply %consumingUse(%obj3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb1b: + apply %consumingUse(%obj3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + apply %consumingUse(%obj) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb4 + +bb3: + br bb4 + +bb4: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_liverange_multiple_destroy_value_forwarding_chain_2 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'join_liverange_multiple_destroy_value_forwarding_chain_2' +sil [ossa] @join_liverange_multiple_destroy_value_forwarding_chain_2 : $@convention(thin) () -> () { +bb0: + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + %constructingUse = function_ref @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject + %obj = apply %constructingUse() : $@convention(thin) () -> @owned Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %obj2 = copy_value %obj : $Builtin.NativeObject + %obj3 = unchecked_ref_cast %obj2 : $Builtin.NativeObject to $Builtin.NativeObject + destroy_value %obj : $Builtin.NativeObject + cond_br undef, bb1a, bb1b + +bb1a: + apply %consumingUse(%obj3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb1b: + apply %consumingUse(%obj3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + apply %consumingUse(%obj) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb4 + +bb3: + br bb4 + +bb4: + %9999 = tuple() + return %9999 : $() +} + +// We do not support destructures and other multiple value instructions yet... +// +// CHECK-LABEL: sil [ossa] @join_liverange_multiple_destroy_value_forwarding_chain_3 : $@convention(thin) () -> () { +// CHECK: copy_value +// CHECK: destroy_value +// CHECK: } // end sil function 'join_liverange_multiple_destroy_value_forwarding_chain_3' +sil [ossa] @join_liverange_multiple_destroy_value_forwarding_chain_3 : $@convention(thin) () -> () { +bb0: + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + %constructingUse = function_ref @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject + %obj = apply %constructingUse() : $@convention(thin) () -> @owned Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %obj2 = copy_value %obj : $Builtin.NativeObject + %obj2a = struct $NativeObjectWrapper(%obj2 : $Builtin.NativeObject) + (%obj3) = destructure_struct %obj2a : $NativeObjectWrapper + destroy_value %obj : $Builtin.NativeObject + cond_br undef, bb1a, bb1b + +bb1a: + apply %consumingUse(%obj3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb1b: + apply %consumingUse(%obj3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + apply %consumingUse(%obj) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb4 + +bb3: + br bb4 + +bb4: + %9999 = tuple() + return %9999 : $() +} + +// This case succeeds since our borrow scope does not overlap with our +// forwarding region. +// +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_succeed_1 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_succeed_1' +sil [ossa] @join_live_range_with_borrowscopes_succeed_1 : $@convention(thin) () -> () { +bb0: + %consumingUse = function_ref @owned_user_object_pair : $@convention(thin) (@owned NativeObjectPair) -> () + %constructingUse = function_ref @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + %obj = apply %constructingUse() : $@convention(thin) () -> @owned NativeObjectPair + %borrowedObj = begin_borrow %obj : $NativeObjectPair + %obj1 = struct_extract %borrowedObj : $NativeObjectPair, #NativeObjectPair.obj1 + %guaranteedUse = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %guaranteedUse(%obj1) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %borrowedObj : $NativeObjectPair + %2 = copy_value %obj : $NativeObjectPair + br bb1 + +bb1: + destroy_value %obj : $NativeObjectPair + apply %consumingUse(%2) : $@convention(thin) (@owned NativeObjectPair) -> () + br bb2 + +bb2: + %9999 = tuple() + return %9999 : $() +} + +// In this case, we validate that we can perform the optimization with +// forwarding insts. +// +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_succeed_2 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_succeed_2' +sil [ossa] @join_live_range_with_borrowscopes_succeed_2 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + %borrowedObj = begin_borrow %obj : $Klass + end_borrow %borrowedObj : $Klass + %2 = copy_value %obj : $Klass + br bb1 + +bb1: + destroy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb2 + +bb2: + %9999 = tuple() + return %9999 : $() +} + +// Here we succeed even though %2's lifetime ends at the unchecked_ref_cast, we +// are able to know that +// +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_succeed_3 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_succeed_3' +sil [ossa] @join_live_range_with_borrowscopes_succeed_3 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + %borrowedObj = begin_borrow %obj : $Klass + end_borrow %borrowedObj : $Klass + %2 = copy_value %obj : $Klass + br bb1 + +bb1: + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + destroy_value %obj : $Klass + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb2 + +bb2: + %9999 = tuple() + return %9999 : $() +} + +// In this case we fail since we don't want to have to deal with splitting the +// scope of %borrowedObj at %3. +// +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_fail_1 : $@convention(thin) () -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_fail_1' +sil [ossa] @join_live_range_with_borrowscopes_fail_1 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + %borrowedObj = begin_borrow %obj : $Klass + %2 = copy_value %obj : $Klass + br bb1 + +bb1: + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + end_borrow %borrowedObj : $Klass + destroy_value %obj : $Klass + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb2 + +bb2: + %9999 = tuple() + return %9999 : $() +} + +// This case succeeds since our borrow scope does not overlap with our +// forwarding region. +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_1 : $@convention(thin) () -> () { +bb0: + %consumingUse = function_ref @owned_user_object_pair : $@convention(thin) (@owned NativeObjectPair) -> () + %constructingUse = function_ref @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + %obj = apply %constructingUse() : $@convention(thin) () -> @owned NativeObjectPair + %borrowedObj = begin_borrow %obj : $NativeObjectPair + %obj1 = struct_extract %borrowedObj : $NativeObjectPair, #NativeObjectPair.obj1 + %guaranteedUse = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %guaranteedUse(%obj1) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %borrowedObj : $NativeObjectPair + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $NativeObjectPair + destroy_value %obj : $NativeObjectPair + apply %consumingUse(%2) : $@convention(thin) (@owned NativeObjectPair) -> () + br bb3 + +bb2: + destroy_value %obj : $NativeObjectPair + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// In this case, we validate that we can perform the optimization with +// forwarding insts. +// +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_2 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_succeed_2' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_2 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + %borrowedObj = begin_borrow %obj : $Klass + // We cheat and use end_borrow so we don't optimize this borrow. + end_borrow %borrowedObj : $Klass + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $Klass + destroy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_3 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_succeed_3' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_3 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + %borrowedObj = begin_borrow %obj : $Klass + cond_br undef, bb1, bb2 + +bb1: + end_borrow %borrowedObj : $Klass + %2 = copy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + destroy_value %obj : $Klass + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + end_borrow %borrowedObj : $Klass + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_4 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_succeed_4' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_4 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + %borrowedObj = begin_borrow %obj : $Klass + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $Klass + end_borrow %borrowedObj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + destroy_value %obj : $Klass + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + end_borrow %borrowedObj : $Klass + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_5 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_succeed_5' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_5 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + destroy_value %obj : $Klass + %4 = unchecked_ref_cast %3 : $Builtin.NativeObject to $Klass + %5 = unchecked_ref_cast %4 : $Klass to $Builtin.NativeObject + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%5) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_6 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_succeed_6' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_6 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + destroy_value %obj : $Klass + br bb1a + +bb1a: + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_7 : $@convention(thin) () -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_succeed_7' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_succeed_7 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + destroy_value %obj : $Klass + cond_br undef, bb1a, bb1b + +bb1a: + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb1c + +bb1b: + destroy_value %3 : $Builtin.NativeObject + br bb1c + +bb1c: + br bb3 + +bb2: + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_fail_2 : $@convention(thin) () -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_fail_2' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_fail_2 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + destroy_value %obj : $Klass + br bb3 + +bb2: + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// In this case we fail since we don't want to have to deal with splitting the +// scope of %borrowedObj at %3. +// +// CHECK-LABEL: sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_fail_1 : $@convention(thin) () -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'join_live_range_with_borrowscopes_multipledestroys_fail_1' +sil [ossa] @join_live_range_with_borrowscopes_multipledestroys_fail_1 : $@convention(thin) () -> () { +bb0: + %obj = alloc_ref $Klass + %borrowedObj = begin_borrow %obj : $Klass + cond_br undef, bb1, bb2 + +bb1: + %2 = copy_value %obj : $Klass + %3 = unchecked_ref_cast %2 : $Klass to $Builtin.NativeObject + end_borrow %borrowedObj : $Klass + destroy_value %obj : $Klass + %consumingUse = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %consumingUse(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + br bb3 + +bb2: + end_borrow %borrowedObj : $Klass + destroy_value %obj : $Klass + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// Make sure we leave only one copy in bb2 and no destroys +// +// CHECK-LABEL: sil [ossa] @join_test_with_forwarding_inst : $@convention(thin) () -> @owned FakeOptional { +// CHECK: bb2: +// CHECK: copy_value +// CHECK-NOT: destroy_value +// CHECK-NOT: copy_value +// CHECK: br bb3( +// CHECK: } // end sil function 'join_test_with_forwarding_inst' +sil [ossa] @join_test_with_forwarding_inst : $@convention(thin) () -> @owned FakeOptional { +bb0: + %allocStack = alloc_stack $Builtin.NativeObject + %0 = function_ref @get_fakeoptional_nativeobject : $@convention(thin) () -> @owned FakeOptional + %1 = apply %0() : $@convention(thin) () -> @owned FakeOptional + cond_br undef, bb1, bb2 + +bb1: + destroy_value %1 : $FakeOptional + %2 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%2 : $FakeOptional) + +bb2: + %3 = unchecked_enum_data %1 : $FakeOptional, #FakeOptional.some!enumelt + %4 = copy_value %3 : $Builtin.NativeObject + store %3 to [init] %allocStack : $*Builtin.NativeObject + %4c = copy_value %4 : $Builtin.NativeObject + destroy_value %4 : $Builtin.NativeObject + %5 = enum $FakeOptional, #FakeOptional.some!enumelt, %4c : $Builtin.NativeObject + destroy_addr %allocStack : $*Builtin.NativeObject + br bb3(%5 : $FakeOptional) + +bb3(%result : @owned $FakeOptional): + dealloc_stack %allocStack : $*Builtin.NativeObject + return %result : $FakeOptional +} diff --git a/test/SILOptimizer/semantic-arc-opts-loadcopy-to-loadborrow.sil b/test/SILOptimizer/semantic-arc-opts-loadcopy-to-loadborrow.sil new file mode 100644 index 0000000000000..1f5e57cac17bd --- /dev/null +++ b/test/SILOptimizer/semantic-arc-opts-loadcopy-to-loadborrow.sil @@ -0,0 +1,1452 @@ +// RUN: %target-sil-opt -module-name Swift -enable-sil-verify-all -semantic-arc-opts -sil-semantic-arc-peepholes-loadcopy-to-loadborrow -sil-semantic-arc-peepholes-redundant-borrowscope-elim %s | %FileCheck %s + +// NOTE: After we run load [copy] -> load_borrow, we run one additional round of +// borrow scope elimination since borrow scope elimination cleans up the IR a +// little bit. + +sil_stage canonical + +import Builtin + +////////////////// +// Declarations // +////////////////// + +typealias AnyObject = Builtin.AnyObject + +enum MyNever {} +enum FakeOptional { +case none +case some(T) +} + +sil @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () +sil @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () +sil @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject +sil @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever +sil @inout_user : $@convention(thin) (@inout FakeOptional) -> () +sil @get_native_object : $@convention(thin) () -> @owned Builtin.NativeObject + +struct NativeObjectPair { + var obj1 : Builtin.NativeObject + var obj2 : Builtin.NativeObject +} + +sil @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + +struct FakeOptionalNativeObjectPairPair { + var pair1 : FakeOptional + var pair2 : FakeOptional +} +sil @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + +sil @get_nativeobject_pair : $@convention(thin) () -> @owned NativeObjectPair +sil @consume_nativeobject_pair : $@convention(thin) (@owned NativeObjectPair) -> () + +protocol MyFakeAnyObject : Klass { + func myFakeMethod() +} + +final class Klass { + var base: Klass + let baseLet: Klass +} + +extension Klass : MyFakeAnyObject { + func myFakeMethod() +} +sil @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () +sil @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () +sil @guaranteed_fakeoptional_classlet_user : $@convention(thin) (@guaranteed FakeOptional) -> () + +struct MyInt { + var value: Builtin.Int32 +} + +struct StructWithDataAndOwner { + var data : Builtin.Int32 + var owner : Klass +} + +struct StructMemberTest { + var c : Klass + var s : StructWithDataAndOwner + var t : (Builtin.Int32, StructWithDataAndOwner) +} + +class ClassLet { + @_hasStorage let aLet: Klass + @_hasStorage var aVar: Klass + @_hasStorage let aLetTuple: (Klass, Klass) + @_hasStorage let anOptionalLet: FakeOptional + + @_hasStorage let anotherLet: ClassLet +} + +class SubclassLet: ClassLet {} + +sil_global [let] @a_let_global : $Klass +sil_global @a_var_global : $Klass + +enum EnumWithIndirectCase { +case first +indirect case second(Builtin.NativeObject) +} + +struct StructWithEnumWithIndirectCaseField { + var i: Builtin.Int23 + var field : EnumWithIndirectCase +} + +sil @get_fakeoptional_nativeobject : $@convention(thin) () -> @owned FakeOptional +sil @black_hole : $@convention(thin) (@guaranteed Klass) -> () + +/////////// +// Tests // +/////////// + +// Simple in_guaranteed argument load_copy. +// CHECK-LABEL: sil [ossa] @load_copy_from_in_guaranteed : $@convention(thin) (@in_guaranteed Builtin.NativeObject) -> () { +// CHECK: bb0([[ARG:%.*]] : +// CHECK: load_borrow +// CHECK: load_borrow +// CHECK: load [copy] +// CHECK: } // end sil function 'load_copy_from_in_guaranteed' +sil [ossa] @load_copy_from_in_guaranteed : $@convention(thin) (@in_guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*Builtin.NativeObject): + %g = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + // Simple same bb. + %1 = load [copy] %0 : $*Builtin.NativeObject + apply %g(%1) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %1 : $Builtin.NativeObject + + // Diamond. + %2 = load [copy] %0 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + apply %g(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %2 : $Builtin.NativeObject + br bb3 + +bb2: + destroy_value %2 : $Builtin.NativeObject + br bb3 + +bb3: + // Consuming use blocks. + %3 = load [copy] %0 : $*Builtin.NativeObject + %4 = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () + apply %4(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_base : +// CHECK: ref_element_addr +// CHECK-NEXT: load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow +// CHECK-NEXT: return +// CHECK: } // end sil function 'dont_copy_let_properties_with_guaranteed_base' +sil [ossa] @dont_copy_let_properties_with_guaranteed_base : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%x : @guaranteed $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %p = ref_element_addr %x : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + %b = begin_borrow %v : $Klass + apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () + end_borrow %b : $Klass + destroy_value %v : $Klass + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_base_and_forwarding_uses : +// CHECK: ref_element_addr +// CHECK-NEXT: load_borrow +// CHECK-NEXT: unchecked_ref_cast +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow +// CHECK-NEXT: return +// CHECK: } // end sil function 'dont_copy_let_properties_with_guaranteed_base_and_forwarding_uses' +sil [ossa] @dont_copy_let_properties_with_guaranteed_base_and_forwarding_uses : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%x : @guaranteed $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %p = ref_element_addr %x : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + %c = unchecked_ref_cast %v : $Klass to $Klass + %b = begin_borrow %c : $Klass + apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () + end_borrow %b : $Klass + destroy_value %c : $Klass + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_upcast_base : +// CHECK: ref_element_addr +// CHECK-NEXT: load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow +// CHECK-NEXT: return +// CHECK: } // end sil function 'dont_copy_let_properties_with_guaranteed_upcast_base' +sil [ossa] @dont_copy_let_properties_with_guaranteed_upcast_base : $@convention(thin) (@guaranteed SubclassLet) -> () { +bb0(%x : @guaranteed $SubclassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %u = upcast %x : $SubclassLet to $ClassLet + %p = ref_element_addr %u : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + %b = begin_borrow %v : $Klass + apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () + end_borrow %b : $Klass + destroy_value %v : $Klass + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_global : +// CHECK: global_addr +// CHECK-NEXT: load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow +// CHECK-NEXT: return +// CHECK-NEXT: } // end sil function 'dont_copy_let_global' +sil [ossa] @dont_copy_let_global : $@convention(thin) () -> () { +bb0: + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %p = global_addr @a_let_global : $*Klass + %v = load [copy] %p : $*Klass + %b = begin_borrow %v : $Klass + apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () + end_borrow %b : $Klass + destroy_value %v : $Klass + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_base_structural +// CHECK: ref_element_addr +// CHECK-NEXT: tuple_element_addr +// CHECK-NEXT: load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow +// CHECK-NEXT: return +sil [ossa] @dont_copy_let_properties_with_guaranteed_base_structural : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%x : @guaranteed $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %p = ref_element_addr %x : $ClassLet, #ClassLet.aLetTuple + %q = tuple_element_addr %p : $*(Klass, Klass), 1 + %v = load [copy] %q : $*Klass + %b = begin_borrow %v : $Klass + apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () + end_borrow %b : $Klass + destroy_value %v : $Klass + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @do_copy_var_properties_with_guaranteed_base +// CHECK: ref_element_addr +// CHECK-NEXT: load [copy] +// CHECK-NEXT: apply +// CHECK-NEXT: destroy +// CHECK-NEXT: return +sil [ossa] @do_copy_var_properties_with_guaranteed_base : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%x : @guaranteed $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %p = ref_element_addr %x : $ClassLet, #ClassLet.aVar + %v = load [copy] %p : $*Klass + apply %f(%v) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %v : $Klass + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @do_copy_var_global +// CHECK: global_addr +// CHECK-NEXT: load [copy] +// CHECK-NEXT: apply +// CHECK-NEXT: destroy +// CHECK-NEXT: return +sil [ossa] @do_copy_var_global : $@convention(thin) () -> () { +bb0: + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %p = global_addr @a_var_global : $*Klass + %v = load [copy] %p : $*Klass + apply %f(%v) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %v : $Klass + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates +// CHECK: [[OUTER:%.*]] = begin_borrow +// CHECK-NEXT: ref_element_addr +// CHECK-NEXT: [[INNER:%.*]] = load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow [[INNER]] +// CHECK-NEXT: end_borrow [[OUTER]] +// CHECK-NEXT: destroy_value +sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates : $@convention(thin) (@owned ClassLet) -> () { +bb0(%x : @owned $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %a = begin_borrow %x : $ClassLet + %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + apply %f(%v) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %v : $Klass + + end_borrow %a : $ClassLet + destroy_value %x : $ClassLet + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase : +// CHECK: load_borrow +// CHECK: } // end sil function 'dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase' +sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase : $@convention(thin) (@owned ClassLet) -> () { +bb0(%x : @owned $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %a = begin_borrow %x : $ClassLet + %p = ref_element_addr %a : $ClassLet, #ClassLet.aLetTuple + %v = load [copy] %p : $*(Klass, Klass) + (%v1, %v2) = destructure_tuple %v : $(Klass, Klass) + apply %f(%v1) : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%v2) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %v1 : $Klass + destroy_value %v2 : $Klass + end_borrow %a : $ClassLet + destroy_value %x : $ClassLet + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase_2 : +// CHECK: load_borrow +// CHECK: } // end sil function 'dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase_2' +sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase_2 : $@convention(thin) (@owned ClassLet) -> () { +bb0(%x : @owned $ClassLet): + %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + + %a = begin_borrow %x : $ClassLet + %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + %v_cast = unchecked_ref_cast %v : $Klass to $Builtin.NativeObject + apply %f(%v_cast) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %v_cast : $Builtin.NativeObject + end_borrow %a : $ClassLet + destroy_value %x : $ClassLet + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_multi_borrowed_base_that_dominates +// CHECK: [[OUTER:%.*]] = begin_borrow +// CHECK-NEXT: ref_element_addr +// CHECK-NEXT: [[INNER:%.*]] = load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow [[INNER]] +// CHECK-NEXT: end_borrow [[OUTER]] +// CHECK-NEXT: [[OUTER:%.*]] = begin_borrow +// CHECK-NEXT: ref_element_addr +// CHECK-NEXT: [[INNER:%.*]] = load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow [[INNER]] +// CHECK-NEXT: end_borrow [[OUTER]] +// CHECK-NEXT: destroy_value +sil [ossa] @dont_copy_let_properties_with_multi_borrowed_base_that_dominates : $@convention(thin) (@owned ClassLet) -> () { +bb0(%x : @owned $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %a = begin_borrow %x : $ClassLet + %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + apply %f(%v) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %v : $Klass + end_borrow %a : $ClassLet + + %b = begin_borrow %x : $ClassLet + %q = ref_element_addr %b : $ClassLet, #ClassLet.aLet + %w = load [copy] %q : $*Klass + %b2 = begin_borrow %w : $Klass + apply %f(%b2) : $@convention(thin) (@guaranteed Klass) -> () + end_borrow %b2 : $Klass + destroy_value %w : $Klass + end_borrow %b : $ClassLet + + destroy_value %x : $ClassLet + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @do_copy_let_properties_with_borrowed_base_that_does_not_dominate +// CHECK: begin_borrow +// CHECK-NEXT: ref_element_addr +// CHECK-NEXT: load [copy] +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow +// CHECK-NEXT: destroy_value +// CHECK-NEXT: apply +// CHECK-NEXT: destroy_value +sil [ossa] @do_copy_let_properties_with_borrowed_base_that_does_not_dominate : $@convention(thin) (@owned ClassLet) -> () { +bb0(%x : @owned $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %a = begin_borrow %x : $ClassLet + %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + %b = begin_borrow %v : $Klass + apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () + + // End the lifetime of the base object first... + end_borrow %a : $ClassLet + destroy_value %x : $ClassLet + + // ...then end the lifetime of the copy. + apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () + + end_borrow %b : $Klass + destroy_value %v : $Klass + + return undef : $() +} + +// CHECK-LABEL: sil [ossa] @do_or_dont_copy_let_properties_with_multi_borrowed_base_when_it_dominates_2 : +// CHECK: [[OUTER:%.*]] = begin_borrow +// CHECK-NEXT: ref_element_addr +// CHECK-NEXT: [[INNER:%.*]] = load_borrow +// CHECK-NEXT: apply +// CHECK-NEXT: end_borrow [[INNER]] +// CHECK-NEXT: end_borrow [[OUTER]] +// CHECK-NEXT: begin_borrow +// CHECK-NEXT: ref_element_addr +// CHECK-NEXT: load [copy] +// CHECK-NEXT: end_borrow +// CHECK-NEXT: destroy_value +// CHECK-NEXT: // function_ref +// CHECK-NEXT: function_ref +// CHECK-NEXT: enum +// CHECK-NEXT: apply +// CHECK-NEXT: destroy_value +// CHECK: } // end sil function 'do_or_dont_copy_let_properties_with_multi_borrowed_base_when_it_dominates_2' +sil [ossa] @do_or_dont_copy_let_properties_with_multi_borrowed_base_when_it_dominates_2 : $@convention(thin) (@owned ClassLet) -> () { +bb0(%x : @owned $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + + %a = begin_borrow %x : $ClassLet + %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet + %v = load [copy] %p : $*Klass + %c = begin_borrow %v : $Klass + apply %f(%c) : $@convention(thin) (@guaranteed Klass) -> () + end_borrow %c : $Klass + destroy_value %v : $Klass + end_borrow %a : $ClassLet + + %b = begin_borrow %x : $ClassLet + %q = ref_element_addr %b : $ClassLet, #ClassLet.aLet + %w = load [copy] %q : $*Klass + + // End the lifetime of the base object first... + end_borrow %b : $ClassLet + destroy_value %x : $ClassLet + + // ...then end the lifetime of the copy. + %f2 = function_ref @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () + %w2 = enum $FakeOptional, #FakeOptional.some!enumelt, %w : $Klass + apply %f2(%w2) : $@convention(thin) (@guaranteed FakeOptional) -> () + + destroy_value %w2 : $FakeOptional + + return undef : $() +} + +// Make sure that we put the end_borrow on the load_borrow, not LHS or RHS. +// +// CHECK-LABEL: sil [ossa] @destructure_load_copy_to_load_borrow : $@convention(thin) (@guaranteed ClassLet) -> () { +// CHECK: bb0([[ARG:%.*]] : +// CHECK: [[INTERIOR_POINTER:%.*]] = ref_element_addr [[ARG]] +// CHECK: [[BORROWED_VAL:%.*]] = load_borrow [[INTERIOR_POINTER]] +// CHECK: ([[LHS:%.*]], [[RHS:%.*]]) = destructure_tuple [[BORROWED_VAL]] +// CHECK: apply {{%.*}}([[LHS]]) +// CHECK: apply {{%.*}}([[RHS]]) +// CHECK: end_borrow [[BORROWED_VAL]] +// CHECK: } // end sil function 'destructure_load_copy_to_load_borrow' +sil [ossa] @destructure_load_copy_to_load_borrow : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aLetTuple + %2 = load [copy] %1 : $*(Klass, Klass) + (%3, %4) = destructure_tuple %2 : $(Klass, Klass) + %5 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + %6 = apply %5(%3) : $@convention(thin) (@guaranteed Klass) -> () + %7 = apply %5(%4) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + destroy_value %4 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @single_init_allocstack : $@convention(thin) (@owned Klass) -> () { +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'single_init_allocstack' +sil [ossa] @single_init_allocstack : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + %2 = load [copy] %1 : $*Klass + + %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () + + destroy_value %2 : $Klass + destroy_addr %1 : $*Klass + dealloc_stack %1 : $*Klass + + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @multiple_init_allocstack : $@convention(thin) (@owned Klass) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'multiple_init_allocstack' +sil [ossa] @multiple_init_allocstack : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %0a = copy_value %0 : $Klass + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + %2 = load [copy] %1 : $*Klass + + %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () + + destroy_value %2 : $Klass + destroy_addr %1 : $*Klass + + store %0a to [init] %1 : $*Klass + destroy_addr %1 : $*Klass + dealloc_stack %1 : $*Klass + + %9999 = tuple() + return %9999 : $() +} + +// We could support this, but for now we are keeping things simple. If we do add +// support, this test will need to be updated. +// +// CHECK-LABEL: sil [ossa] @single_init_wrongblock : $@convention(thin) (@owned Klass) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'single_init_wrongblock' +sil [ossa] @single_init_wrongblock : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + br bb1 + +bb1: + store %0 to [init] %1 : $*Klass + %2 = load [copy] %1 : $*Klass + + %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () + + destroy_value %2 : $Klass + destroy_addr %1 : $*Klass + dealloc_stack %1 : $*Klass + + %9999 = tuple() + return %9999 : $() +} + +// We could support this, but for now we are keeping things simple. If we do add +// support, this test will need to be updated. +// +// CHECK-LABEL: sil [ossa] @single_init_loadtake : $@convention(thin) (@owned Klass) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'single_init_loadtake' +sil [ossa] @single_init_loadtake : $@convention(thin) (@owned Klass) -> () { +bb0(%0 : @owned $Klass): + %1 = alloc_stack $Klass + store %0 to [init] %1 : $*Klass + %2 = load [copy] %1 : $*Klass + + %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () + + destroy_value %2 : $Klass + + %4 = load [take] %1 : $*Klass + destroy_value %4 : $Klass + dealloc_stack %1 : $*Klass + + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_1 : $@convention(thin) (@inout NativeObjectPair) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_1' +sil [ossa] @inout_argument_never_written_to_1 : $@convention(thin) (@inout NativeObjectPair) -> () { +bb0(%0 : $*NativeObjectPair): + %2 = load [copy] %0 : $*NativeObjectPair + (%3, %4) = destructure_struct %2 : $NativeObjectPair + + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%4) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + + destroy_value %3 : $Builtin.NativeObject + destroy_value %4 : $Builtin.NativeObject + + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_2 : $@convention(thin) (@inout NativeObjectPair) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_2' +sil [ossa] @inout_argument_never_written_to_2 : $@convention(thin) (@inout NativeObjectPair) -> () { +bb0(%0 : $*NativeObjectPair): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We can handle this since the store is outside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_3 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load_borrow +// CHECK: } // end sil function 'inout_argument_never_written_to_3' +sil [ossa] @inout_argument_never_written_to_3 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + store %1 to [assign] %2 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We cannot handle this since the store is inside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_4 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_4' +sil [ossa] @inout_argument_never_written_to_4 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + store %1 to [assign] %2 : $*Builtin.NativeObject + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We cannot handle this since the store is inside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_4a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_4a' +sil [ossa] @inout_argument_never_written_to_4a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + store %1 to [assign] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We can handle this since the store is outside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_5 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load_borrow +// CHECK: } // end sil function 'inout_argument_never_written_to_5' +sil [ossa] @inout_argument_never_written_to_5 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + store %1 to [assign] %2 : $*Builtin.NativeObject + %3 = load [copy] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We can handle this since the store is outside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load_borrow +// CHECK: } // end sil function 'inout_argument_never_written_to_6' +sil [ossa] @inout_argument_never_written_to_6 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + store %1 to [assign] %2 : $*Builtin.NativeObject + %3 = load [copy] %2 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + br bb3 + +bb2: + br bb3 + +bb3: + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We can handle this since the store is outside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load_borrow +// CHECK: } // end sil function 'inout_argument_never_written_to_6a' +sil [ossa] @inout_argument_never_written_to_6a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + br bb0a + +bb0a: + store %1 to [assign] %2 : $*Builtin.NativeObject + %3 = load [copy] %2 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + br bb3 + +bb2: + br bb3 + +bb3: + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We can handle this since the store is outside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6b : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load_borrow +// CHECK: } // end sil function 'inout_argument_never_written_to_6b' +sil [ossa] @inout_argument_never_written_to_6b : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + store %1 to [assign] %2 : $*Builtin.NativeObject + br bb0a + +bb0a: + %3 = load [copy] %2 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + br bb3 + +bb2: + br bb3 + +bb3: + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// We can handle this since the store is outside of our region. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6c : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +// CHECK: load_borrow +// CHECK: } // end sil function 'inout_argument_never_written_to_6c' +sil [ossa] @inout_argument_never_written_to_6c : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %1a = copy_value %1 : $Builtin.NativeObject + store %1 to [assign] %2 : $*Builtin.NativeObject + br bb0a + +bb0a: + %3 = load [copy] %2 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + br bb3 + +bb2: + br bb3 + +bb3: + destroy_value %3 : $Builtin.NativeObject + store %1a to [assign] %2 : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// This case, we can not optimize since the write scope is created around our entire load [copy]. +// +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1 : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1' +sil [ossa] @inout_argument_never_written_to_begin_access_1 : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject + %3 = load [copy] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + end_access %2a : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1a' +sil [ossa] @inout_argument_never_written_to_begin_access_1a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject + %3 = load [copy] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_access %2a : $*Builtin.NativeObject + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1b' +sil [ossa] @inout_argument_never_written_to_begin_access_1b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_access %2a : $*Builtin.NativeObject + destroy_value %3 : $Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1c' +sil [ossa] @inout_argument_never_written_to_begin_access_1c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + end_access %2a : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_2a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_2a' +sil [ossa] @inout_argument_never_written_to_begin_access_2a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + end_access %2a : $*Builtin.NativeObject + br bb3 + +bb2: + destroy_value %3 : $Builtin.NativeObject + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_2b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_2b' +sil [ossa] @inout_argument_never_written_to_begin_access_2b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %3 = load [copy] %2 : $*Builtin.NativeObject + br bb0a + +bb0a: + %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + br bb3 + +bb2: + destroy_value %3 : $Builtin.NativeObject + br bb3 + +bb3: + end_access %2a : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_2c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_2c' +sil [ossa] @inout_argument_never_written_to_begin_access_2c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): + %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 + %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject + br bb0a + +bb0a: + %3 = load [copy] %2 : $*Builtin.NativeObject + cond_br undef, bb1, bb2 + +bb1: + %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3 : $Builtin.NativeObject + br bb3 + +bb2: + destroy_value %3 : $Builtin.NativeObject + br bb3 + +bb3: + end_access %2a : $*Builtin.NativeObject + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @switch_enum_test_loadcopy_no_default : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'switch_enum_test_loadcopy_no_default' +sil [ossa] @switch_enum_test_loadcopy_no_default : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + %0a = alloc_stack $FakeOptional + store %0 to [init] %0a : $*FakeOptional + %1 = load [copy] %0a : $*FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 + +bb1(%2 : @owned $Builtin.NativeObject): + destroy_value %2 : $Builtin.NativeObject + br bb3 + +bb2: + br bb3 + +bb3: + destroy_addr %0a : $*FakeOptional + dealloc_stack %0a : $*FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @switch_enum_test_loadcopy_with_default : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'switch_enum_test_loadcopy_with_default' +sil [ossa] @switch_enum_test_loadcopy_with_default : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + %0a = alloc_stack $FakeOptional + store %0 to [init] %0a : $*FakeOptional + %1 = load [copy] %0a : $*FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 + +bb1(%2 : @owned $Builtin.NativeObject): + destroy_value %2 : $Builtin.NativeObject + br bb3 + +bb2(%3 : @owned $FakeOptional): + destroy_value %3 : $FakeOptional + br bb3 + +bb3: + destroy_addr %0a : $*FakeOptional + dealloc_stack %0a : $*FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @switch_enum_test_loadcopy_with_leaked_enum_case : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'switch_enum_test_loadcopy_with_leaked_enum_case' +sil [ossa] @switch_enum_test_loadcopy_with_leaked_enum_case : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + %0a = alloc_stack $FakeOptional + store %0 to [init] %0a : $*FakeOptional + %1 = load [copy] %0a : $*FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 + +bb1(%2 : @owned $Builtin.NativeObject): + unreachable + +bb2: + br bb3 + +bb3: + destroy_addr %0a : $*FakeOptional + dealloc_stack %0a : $*FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_functionarg : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_functionarg' +sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_functionarg : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + switch_enum %0 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 + +bb1(%1 : @guaranteed $ClassLet): + %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet + %3 = load [copy] %2 : $*Klass + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + br bb3 + +bb2: + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_beginborrow : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_beginborrow' +sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_beginborrow : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + %0a = begin_borrow %0 : $FakeOptional + switch_enum %0a : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 + +bb1(%1 : @guaranteed $ClassLet): + %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet + %3 = load [copy] %2 : $*Klass + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + br bb3 + +bb2: + br bb3 + +bb3: + end_borrow %0a : $FakeOptional + destroy_value %0 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_loadborrow : $@convention(thin) (@in_guaranteed FakeOptional) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_loadborrow' +sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_loadborrow : $@convention(thin) (@in_guaranteed FakeOptional) -> () { +bb0(%0 : $*FakeOptional): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + %0a = load_borrow %0 : $*FakeOptional + switch_enum %0a : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 + +bb1(%1 : @guaranteed $ClassLet): + %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet + %3 = load [copy] %2 : $*Klass + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + br bb3 + +bb2: + br bb3 + +bb3: + end_borrow %0a : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// TODO: We can support this in a little bit once the rest of SemanticARCOpts is +// guaranteed to be safe with guaranteed phis. +// +// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_1 : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK: load [copy] +// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_1' +sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_1 : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + cond_br undef, bb0a, bb0b + +bb0a: + %0a = begin_borrow %0 : $FakeOptional + br bb0c(%0a : $FakeOptional) + +bb0b: + %0b = begin_borrow %0 : $FakeOptional + br bb0c(%0b : $FakeOptional) + +bb0c(%0c : @guaranteed $FakeOptional): + switch_enum %0c : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 + +bb1(%1 : @guaranteed $ClassLet): + %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet + %3 = load [copy] %2 : $*Klass + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + br bb3 + +bb2: + br bb3 + +bb3: + end_borrow %0c : $FakeOptional + destroy_value %0 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// Make sure that if begin_borrow has a consuming end scope use, we can still +// eliminate load [copy]. +// +// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_2 : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_2' +sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_2 : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + cond_br undef, bb1, bb2 + +bb1: + %0a = begin_borrow %0 : $FakeOptional + br bb3(%0a : $FakeOptional) + +bb2: + %0b = begin_borrow %0 : $FakeOptional + %0b2 = unchecked_enum_data %0b : $FakeOptional, #FakeOptional.some!enumelt + %2 = ref_element_addr %0b2 : $ClassLet, #ClassLet.aLet + %3 = load [copy] %2 : $*Klass + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + br bb3(%0b : $FakeOptional) + +bb3(%0c : @guaranteed $FakeOptional): + %f2 = function_ref @guaranteed_fakeoptional_classlet_user : $@convention(thin) (@guaranteed FakeOptional) -> () + apply %f2(%0c) : $@convention(thin) (@guaranteed FakeOptional) -> () + end_borrow %0c : $FakeOptional + destroy_value %0 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_read_access : $@convention(thin) (@guaranteed ClassLet) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_read_access' +sil [ossa] @loadcopy_to_loadborrow_from_read_access : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar + %2 = begin_access [read] [dynamic] %1 : $*Klass + %3 = load [copy] %2 : $*Klass + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + end_access %2 : $*Klass + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_without_writes : $@convention(thin) (@guaranteed ClassLet) -> () { +// CHECK-NOT: load [copy] +// CHECK: load_borrow +// CHECK-NOT: load [copy] +// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_without_writes' +sil [ossa] @loadcopy_to_loadborrow_from_mut_access_without_writes : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar + %2 = begin_access [modify] [dynamic] %1 : $*Klass + %3 = load [copy] %2 : $*Klass + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + end_access %2 : $*Klass + %9999 = tuple() + return %9999 : $() +} + +// We can with time handle this case by proving that the destroy_addr is after +// the destroy_value. +// +// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes : $@convention(thin) (@guaranteed ClassLet) -> () { +// CHECK-NOT: load_borrow +// CHECK: load [copy] +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes' +sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar + %2 = begin_access [modify] [dynamic] %1 : $*Klass + %3 = load [copy] %2 : $*Klass + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + destroy_addr %2 : $*Klass + end_access %2 : $*Klass + %9999 = tuple() + return %9999 : $() +} + +// We will never be able to handle this unless we can hoist the copy before the +// destroy_addr. Once we have begin_borrows around all interior_pointers, we can +// handle this version. +// +// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_2 : $@convention(thin) (@guaranteed ClassLet) -> () { +// CHECK-NOT: load_borrow +// CHECK: load [copy] +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes_2' +sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_2 : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar + %2 = begin_access [modify] [dynamic] %1 : $*Klass + %3 = load [copy] %2 : $*Klass + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_addr %2 : $*Klass + destroy_value %3 : $Klass + end_access %2 : $*Klass + %9999 = tuple() + return %9999 : $() +} + +// We will never be able to handle this since we can't hoist the destroy_value +// before the guaranteed_klass_user. +// +// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_3 : $@convention(thin) (@guaranteed ClassLet) -> () { +// CHECK-NOT: load_borrow +// CHECK: load [copy] +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes_3' +sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_3 : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar + %2 = begin_access [modify] [dynamic] %1 : $*Klass + %3 = load [copy] %2 : $*Klass + destroy_addr %2 : $*Klass + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + end_access %2 : $*Klass + %9999 = tuple() + return %9999 : $() +} + +// We will never be able to handle this since the end_access is before the use +// of %3, so we can not form a long enough load_borrow. +// +// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_4 : $@convention(thin) (@guaranteed ClassLet) -> () { +// CHECK-NOT: load_borrow +// CHECK: load [copy] +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes_4' +sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_4 : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar + %2 = begin_access [modify] [dynamic] %1 : $*Klass + %3 = load [copy] %2 : $*Klass + end_access %2 : $*Klass + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %3 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// Make sure that we do not promote the load [copy] to a load_borrow since it +// has a use outside of the access scope. +// +// CHECK-LABEL: sil [ossa] @deadEndBlockDoNotPromote : $@convention(method) (@guaranteed ClassLet) -> () { +// CHECK: load_borrow +// CHECK: load [copy] +// CHECK: } // end sil function 'deadEndBlockDoNotPromote' +sil [ossa] @deadEndBlockDoNotPromote : $@convention(method) (@guaranteed ClassLet) -> () { +bb0(%0 : @guaranteed $ClassLet): + %4 = ref_element_addr %0 : $ClassLet, #ClassLet.anotherLet + %5 = load [copy] %4 : $*ClassLet + %6 = begin_borrow %5 : $ClassLet + %7 = ref_element_addr %6 : $ClassLet, #ClassLet.anOptionalLet + %8 = begin_access [read] [dynamic] %7 : $*FakeOptional + %9 = load [copy] %8 : $*FakeOptional + end_access %8 : $*FakeOptional + end_borrow %6 : $ClassLet + destroy_value %5 : $ClassLet + switch_enum %9 : $FakeOptional, case #FakeOptional.none!enumelt: bb1, case #FakeOptional.some!enumelt: bb2 + +bb1: + %107 = tuple () + return %107 : $() + +bb2(%39 : @owned $Klass): + unreachable +} + +// CHECK-LABEL: sil [ossa] @destructure_with_differing_lifetimes_inout_1 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'destructure_with_differing_lifetimes_inout_1' +sil [ossa] @destructure_with_differing_lifetimes_inout_1 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { +bb0(%0 : $*FakeOptionalNativeObjectPairPair): + %0a = struct_element_addr %0 : $*FakeOptionalNativeObjectPairPair, #FakeOptionalNativeObjectPairPair.pair1 + %1 = load [copy] %0a : $*FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 + +bb2(%2 : @owned $FakeOptional): + destroy_value %2 : $FakeOptional + br bbEnd + +bb1(%3 : @owned $NativeObjectPair): + (%3a, %3b) = destructure_struct %3 : $NativeObjectPair + cond_br undef, bb1a, bb1b + +bb1a: + destroy_value %3a : $Builtin.NativeObject + destroy_value %3b : $Builtin.NativeObject + br bbEnd + +bb1b: + destroy_value %3a : $Builtin.NativeObject + %f = function_ref @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + apply %f(%0) : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + destroy_value %3b : $Builtin.NativeObject + br bbEnd + +bbEnd: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @destructure_with_differing_lifetimes_inout_2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { +// CHECK-NOT: load_borrow +// CHECK: } // end sil function 'destructure_with_differing_lifetimes_inout_2' +sil [ossa] @destructure_with_differing_lifetimes_inout_2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { +bb0(%0 : $*FakeOptionalNativeObjectPairPair): + %0a = struct_element_addr %0 : $*FakeOptionalNativeObjectPairPair, #FakeOptionalNativeObjectPairPair.pair1 + %1 = load [copy] %0a : $*FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 + +bb2(%2 : @owned $FakeOptional): + destroy_value %2 : $FakeOptional + br bbEnd + +bb1(%3 : @owned $NativeObjectPair): + (%3a, %3b) = destructure_struct %3 : $NativeObjectPair + cond_br undef, bb1a, bb1b + +bb1a: + destroy_value %3a : $Builtin.NativeObject + br bb1ab + +bb1ab: + destroy_value %3b : $Builtin.NativeObject + br bbEnd + +bb1b: + destroy_value %3a : $Builtin.NativeObject + %f = function_ref @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + apply %f(%0) : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + cond_br undef, bb1ba, bb1bb + +bb1ba: + br bb1baEnd + +bb1bb: + br bb1baEnd + +bb1baEnd: + destroy_value %3b : $Builtin.NativeObject + br bbEnd + +bbEnd: + %9999 = tuple() + return %9999 : $() +} + +// Just make sure that we do not crash on this code and convert the 2nd load +// [copy] to a load_borrow. +// +// CHECK-LABEL: sil [ossa] @inproper_dead_end_block_crasher_test : $@convention(thin) (Builtin.RawPointer) -> () { +// CHECK: load_borrow +// CHECK: load_borrow +// CHECK: } // end sil function 'inproper_dead_end_block_crasher_test' +sil [ossa] @inproper_dead_end_block_crasher_test : $@convention(thin) (Builtin.RawPointer) -> () { +bb0(%0 : $Builtin.RawPointer): + %1 = pointer_to_address %0 : $Builtin.RawPointer to [strict] $*Klass + %2 = load_borrow %1 : $*Klass + %3 = ref_element_addr %2 : $Klass, #Klass.baseLet + %4 = load [copy] %3 : $*Klass + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%4) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %4 : $Klass + cond_br undef, bb1, bb2 + +bb1: + unreachable + +bb2: + unreachable +} diff --git a/test/SILOptimizer/semantic-arc-opts-redundantcopyopts.sil b/test/SILOptimizer/semantic-arc-opts-redundantcopyopts.sil new file mode 100644 index 0000000000000..275aecb8599a9 --- /dev/null +++ b/test/SILOptimizer/semantic-arc-opts-redundantcopyopts.sil @@ -0,0 +1,188 @@ +// RUN: %target-sil-opt -module-name Swift -enable-sil-verify-all -semantic-arc-opts -sil-semantic-arc-peepholes-redundant-copyvalue-elim %s | %FileCheck %s +// REQUIRES: swift_stdlib_asserts + +// NOTE: Some of our tests here depend on borrow elimination /not/ running! +// Please do not add it to clean up the IR like we did in +// semanticarcopts-loadcopy-to-loadborrow! + +sil_stage canonical + +import Builtin + +////////////////// +// Declarations // +////////////////// + +typealias AnyObject = Builtin.AnyObject + +enum MyNever {} +enum FakeOptional { +case none +case some(T) +} + +sil [ossa] @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () +sil [ossa] @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () +sil [ossa] @get_owned_obj : $@convention(thin) () -> @owned Builtin.NativeObject +sil [ossa] @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever +sil [ossa] @inout_user : $@convention(thin) (@inout FakeOptional) -> () +sil [ossa] @get_native_object : $@convention(thin) () -> @owned Builtin.NativeObject + +struct NativeObjectPair { + var obj1 : Builtin.NativeObject + var obj2 : Builtin.NativeObject +} + +sil [ossa] @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + +struct FakeOptionalNativeObjectPairPair { + var pair1 : FakeOptional + var pair2 : FakeOptional +} +sil [ossa] @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () + +sil [ossa] @get_nativeobject_pair : $@convention(thin) () -> @owned NativeObjectPair +sil [ossa] @consume_nativeobject_pair : $@convention(thin) (@owned NativeObjectPair) -> () + +protocol MyFakeAnyObject : Klass { + func myFakeMethod() +} + +final class Klass { + var base: Klass + let baseLet: Klass +} + +extension Klass : MyFakeAnyObject { + func myFakeMethod() +} +sil [ossa] @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () +sil [ossa] @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () +sil [ossa] @guaranteed_fakeoptional_classlet_user : $@convention(thin) (@guaranteed FakeOptional) -> () + +struct MyInt { + var value: Builtin.Int32 +} + +struct StructWithDataAndOwner { + var data : Builtin.Int32 + var owner : Klass +} + +struct StructMemberTest { + var c : Klass + var s : StructWithDataAndOwner + var t : (Builtin.Int32, StructWithDataAndOwner) +} + +class ClassLet { + @_hasStorage let aLet: Klass + @_hasStorage var aVar: Klass + @_hasStorage let aLetTuple: (Klass, Klass) + @_hasStorage let anOptionalLet: FakeOptional + + @_hasStorage let anotherLet: ClassLet +} + +class SubclassLet: ClassLet {} + +sil_global [let] @a_let_global : $Klass +sil_global @a_var_global : $Klass + +enum EnumWithIndirectCase { +case first +indirect case second(Builtin.NativeObject) +} + +struct StructWithEnumWithIndirectCaseField { + var i: Builtin.Int23 + var field : EnumWithIndirectCase +} + +sil [ossa] @get_fakeoptional_nativeobject : $@convention(thin) () -> @owned FakeOptional + +struct NativeObjectWrapper { + var innerWrapper : Builtin.NativeObject +} + +sil @owned_user_object_pair : $@convention(thin) (@owned NativeObjectPair) -> () + +/////////// +// Tests // +/////////// + +// Make sure we do not eliminate copies where only the destroy_value is outside +// of the lifetime of the parent value, but a begin_borrow extends the lifetime +// of the value. This is an optimization that can only be performed via lifetime +// joining. +// +// CHECK-LABEL: sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime : $@convention(thin) () -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'simple_recursive_copy_case_destroying_use_out_of_lifetime' +sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime : $@convention(thin) () -> () { +bb0: + %f = function_ref @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + %pair = apply %f() : $@convention(thin) () -> @owned NativeObjectPair + %pairBorrow = begin_borrow %pair : $NativeObjectPair + %3 = struct_extract %pairBorrow : $NativeObjectPair, #NativeObjectPair.obj1 + %gUserFun = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %gUserFun(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %pairBorrow : $NativeObjectPair + cond_br undef, bb1, bb2 + +bb1: + %1 = copy_value %pair : $NativeObjectPair + %2 = begin_borrow %1 : $NativeObjectPair + destroy_value %pair : $NativeObjectPair + %3a = struct_extract %2 : $NativeObjectPair, #NativeObjectPair.obj1 + apply %gUserFun(%3a) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $NativeObjectPair + destroy_value %1 : $NativeObjectPair + br bb3 + +bb2: + destroy_value %pair : $NativeObjectPair + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// Second version of the test that consumes the pair in case we make the +// lifetime joining smart enough to handle the original case. +// +// CHECK-LABEL: sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime_2 : $@convention(thin) () -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'simple_recursive_copy_case_destroying_use_out_of_lifetime_2' +sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime_2 : $@convention(thin) () -> () { +bb0: + %f = function_ref @get_object_pair : $@convention(thin) () -> @owned NativeObjectPair + %pair = apply %f() : $@convention(thin) () -> @owned NativeObjectPair + %pairBorrow = begin_borrow %pair : $NativeObjectPair + %3 = struct_extract %pairBorrow : $NativeObjectPair, #NativeObjectPair.obj1 + %gUserFun = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + apply %gUserFun(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %pairBorrow : $NativeObjectPair + cond_br undef, bb1, bb2 + +bb1: + %1 = copy_value %pair : $NativeObjectPair + %2 = begin_borrow %1 : $NativeObjectPair + destroy_value %pair : $NativeObjectPair + %3a = struct_extract %2 : $NativeObjectPair, #NativeObjectPair.obj1 + apply %gUserFun(%3a) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + end_borrow %2 : $NativeObjectPair + destroy_value %1 : $NativeObjectPair + br bb3 + +bb2: + %consumePair = function_ref @consume_nativeobject_pair : $@convention(thin) (@owned NativeObjectPair) -> () + apply %consumePair(%pair) : $@convention(thin) (@owned NativeObjectPair) -> () + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + diff --git a/test/SILOptimizer/semantic-arc-opts.sil b/test/SILOptimizer/semantic-arc-opts.sil index 97a7872731e10..7ad2424d2fcff 100644 --- a/test/SILOptimizer/semantic-arc-opts.sil +++ b/test/SILOptimizer/semantic-arc-opts.sil @@ -318,43 +318,6 @@ bb3: return %9999 : $() } -// Simple in_guaranteed argument load_copy. -// CHECK-LABEL: sil [ossa] @load_copy_from_in_guaranteed : $@convention(thin) (@in_guaranteed Builtin.NativeObject) -> () { -// CHECK: bb0([[ARG:%.*]] : -// CHECK: load_borrow -// CHECK: load_borrow -// CHECK: load [copy] -// CHECK: } // end sil function 'load_copy_from_in_guaranteed' -sil [ossa] @load_copy_from_in_guaranteed : $@convention(thin) (@in_guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*Builtin.NativeObject): - %g = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - // Simple same bb. - %1 = load [copy] %0 : $*Builtin.NativeObject - apply %g(%1) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %1 : $Builtin.NativeObject - - // Diamond. - %2 = load [copy] %0 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - apply %g(%2) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb2: - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb3: - // Consuming use blocks. - %3 = load [copy] %0 : $*Builtin.NativeObject - %4 = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %4(%3) : $@convention(thin) (@owned Builtin.NativeObject) -> () - %9999 = tuple() - return %9999 : $() -} - // CHECK-LABEL: sil [ossa] @destructure_test : $@convention(thin) (@guaranteed StructMemberTest) -> Builtin.Int32 { // CHECK: bb0([[ARG:%.*]] : @guaranteed $StructMemberTest): // CHECK: [[EXT:%.*]] = struct_extract [[ARG]] @@ -448,339 +411,6 @@ bb0(%0 : @guaranteed $(Klass, MyInt)): sil [ossa] @black_hole : $@convention(thin) (@guaranteed Klass) -> () -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_base -// CHECK: ref_element_addr -// CHECK-NEXT: load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow -// CHECK-NEXT: return -sil [ossa] @dont_copy_let_properties_with_guaranteed_base : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%x : @guaranteed $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %p = ref_element_addr %x : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %v : $Klass - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_base_and_forwarding_uses : -// CHECK: ref_element_addr -// CHECK-NEXT: load_borrow -// CHECK-NEXT: unchecked_ref_cast -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow -// CHECK-NEXT: return -// CHECK: } // end sil function 'dont_copy_let_properties_with_guaranteed_base_and_forwarding_uses' -sil [ossa] @dont_copy_let_properties_with_guaranteed_base_and_forwarding_uses : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%x : @guaranteed $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %p = ref_element_addr %x : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %c = unchecked_ref_cast %v : $Klass to $Klass - %b = begin_borrow %c : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %c : $Klass - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_upcast_base -// CHECK: ref_element_addr -// CHECK-NEXT: load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow -// CHECK-NEXT: return -sil [ossa] @dont_copy_let_properties_with_guaranteed_upcast_base : $@convention(thin) (@guaranteed SubclassLet) -> () { -bb0(%x : @guaranteed $SubclassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %u = upcast %x : $SubclassLet to $ClassLet - %p = ref_element_addr %u : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %v : $Klass - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_global -// CHECK: global_addr -// CHECK-NEXT: load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow -// CHECK-NEXT: return -sil [ossa] @dont_copy_let_global : $@convention(thin) () -> () { -bb0: - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %p = global_addr @a_let_global : $*Klass - %v = load [copy] %p : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %v : $Klass - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_guaranteed_base_structural -// CHECK: ref_element_addr -// CHECK-NEXT: tuple_element_addr -// CHECK-NEXT: load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow -// CHECK-NEXT: return -sil [ossa] @dont_copy_let_properties_with_guaranteed_base_structural : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%x : @guaranteed $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %p = ref_element_addr %x : $ClassLet, #ClassLet.aLetTuple - %q = tuple_element_addr %p : $*(Klass, Klass), 1 - %v = load [copy] %q : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %v : $Klass - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @do_copy_var_properties_with_guaranteed_base -// CHECK: ref_element_addr -// CHECK-NEXT: load [copy] -// CHECK-NEXT: apply -// CHECK-NEXT: destroy -// CHECK-NEXT: return -sil [ossa] @do_copy_var_properties_with_guaranteed_base : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%x : @guaranteed $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %p = ref_element_addr %x : $ClassLet, #ClassLet.aVar - %v = load [copy] %p : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %v : $Klass - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @do_copy_var_global -// CHECK: global_addr -// CHECK-NEXT: load [copy] -// CHECK-NEXT: apply -// CHECK-NEXT: destroy -// CHECK-NEXT: return -sil [ossa] @do_copy_var_global : $@convention(thin) () -> () { -bb0: - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %p = global_addr @a_var_global : $*Klass - %v = load [copy] %p : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %v : $Klass - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates -// CHECK: [[OUTER:%.*]] = begin_borrow -// CHECK-NEXT: ref_element_addr -// CHECK-NEXT: [[INNER:%.*]] = load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow [[INNER]] -// CHECK-NEXT: end_borrow [[OUTER]] -// CHECK-NEXT: destroy_value -sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates : $@convention(thin) (@owned ClassLet) -> () { -bb0(%x : @owned $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %a = begin_borrow %x : $ClassLet - %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %b : $Klass - destroy_value %v : $Klass - - end_borrow %a : $ClassLet - destroy_value %x : $ClassLet - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase : -// CHECK: load_borrow -// CHECK: } // end sil function 'dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase' -sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase : $@convention(thin) (@owned ClassLet) -> () { -bb0(%x : @owned $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %a = begin_borrow %x : $ClassLet - %p = ref_element_addr %a : $ClassLet, #ClassLet.aLetTuple - %v = load [copy] %p : $*(Klass, Klass) - (%v1, %v2) = destructure_tuple %v : $(Klass, Klass) - apply %f(%v1) : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%v2) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %v1 : $Klass - destroy_value %v2 : $Klass - end_borrow %a : $ClassLet - destroy_value %x : $ClassLet - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase_2 : -// CHECK: load_borrow -// CHECK: } // end sil function 'dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase_2' -sil [ossa] @dont_copy_let_properties_with_borrowed_base_that_dominates_projtestcase_2 : $@convention(thin) (@owned ClassLet) -> () { -bb0(%x : @owned $ClassLet): - %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - - %a = begin_borrow %x : $ClassLet - %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %v_cast = unchecked_ref_cast %v : $Klass to $Builtin.NativeObject - apply %f(%v_cast) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %v_cast : $Builtin.NativeObject - end_borrow %a : $ClassLet - destroy_value %x : $ClassLet - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @dont_copy_let_properties_with_multi_borrowed_base_that_dominates -// CHECK: [[OUTER:%.*]] = begin_borrow -// CHECK-NEXT: ref_element_addr -// CHECK-NEXT: [[INNER:%.*]] = load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow [[INNER]] -// CHECK-NEXT: end_borrow [[OUTER]] -// CHECK-NEXT: [[OUTER:%.*]] = begin_borrow -// CHECK-NEXT: ref_element_addr -// CHECK-NEXT: [[INNER:%.*]] = load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow [[INNER]] -// CHECK-NEXT: end_borrow [[OUTER]] -// CHECK-NEXT: destroy_value -sil [ossa] @dont_copy_let_properties_with_multi_borrowed_base_that_dominates : $@convention(thin) (@owned ClassLet) -> () { -bb0(%x : @owned $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %a = begin_borrow %x : $ClassLet - %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %c = begin_borrow %v : $Klass - apply %f(%c) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %c : $Klass - destroy_value %v : $Klass - end_borrow %a : $ClassLet - - %b = begin_borrow %x : $ClassLet - %q = ref_element_addr %b : $ClassLet, #ClassLet.aLet - %w = load [copy] %q : $*Klass - %d = begin_borrow %w : $Klass - apply %f(%d) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %d : $Klass - destroy_value %w : $Klass - end_borrow %b : $ClassLet - - destroy_value %x : $ClassLet - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @do_copy_let_properties_with_borrowed_base_that_does_not_dominate -// CHECK: begin_borrow -// CHECK-NEXT: ref_element_addr -// CHECK-NEXT: load [copy] -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow -// CHECK-NEXT: destroy_value -// CHECK-NEXT: apply -// CHECK-NEXT: destroy_value -sil [ossa] @do_copy_let_properties_with_borrowed_base_that_does_not_dominate : $@convention(thin) (@owned ClassLet) -> () { -bb0(%x : @owned $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %a = begin_borrow %x : $ClassLet - %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %b = begin_borrow %v : $Klass - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - - // End the lifetime of the base object first... - end_borrow %a : $ClassLet - destroy_value %x : $ClassLet - - // ...then end the lifetime of the copy. - apply %f(%b) : $@convention(thin) (@guaranteed Klass) -> () - - end_borrow %b : $Klass - destroy_value %v : $Klass - - return undef : $() -} - -// CHECK-LABEL: sil [ossa] @do_or_dont_copy_let_properties_with_multi_borrowed_base_when_it_dominates_2 : -// CHECK: [[OUTER:%.*]] = begin_borrow -// CHECK-NEXT: ref_element_addr -// CHECK-NEXT: [[INNER:%.*]] = load_borrow -// CHECK-NEXT: apply -// CHECK-NEXT: end_borrow [[INNER]] -// CHECK-NEXT: end_borrow [[OUTER]] -// CHECK-NEXT: begin_borrow -// CHECK-NEXT: ref_element_addr -// CHECK-NEXT: load [copy] -// CHECK-NEXT: end_borrow -// CHECK-NEXT: destroy_value -// CHECK-NEXT: // function_ref -// CHECK-NEXT: function_ref -// CHECK-NEXT: enum -// CHECK-NEXT: apply -// CHECK-NEXT: destroy_value -// CHECK: } // end sil function 'do_or_dont_copy_let_properties_with_multi_borrowed_base_when_it_dominates_2' -sil [ossa] @do_or_dont_copy_let_properties_with_multi_borrowed_base_when_it_dominates_2 : $@convention(thin) (@owned ClassLet) -> () { -bb0(%x : @owned $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - - %a = begin_borrow %x : $ClassLet - %p = ref_element_addr %a : $ClassLet, #ClassLet.aLet - %v = load [copy] %p : $*Klass - %c = begin_borrow %v : $Klass - apply %f(%c) : $@convention(thin) (@guaranteed Klass) -> () - end_borrow %c : $Klass - destroy_value %v : $Klass - end_borrow %a : $ClassLet - - %b = begin_borrow %x : $ClassLet - %q = ref_element_addr %b : $ClassLet, #ClassLet.aLet - %w = load [copy] %q : $*Klass - - // End the lifetime of the base object first... - end_borrow %b : $ClassLet - destroy_value %x : $ClassLet - - // ...then end the lifetime of the copy. - %f2 = function_ref @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () - %w2 = enum $FakeOptional, #FakeOptional.some!enumelt, %w : $Klass - apply %f2(%w2) : $@convention(thin) (@guaranteed FakeOptional) -> () - - destroy_value %w2 : $FakeOptional - - return undef : $() -} - // Make sure that we properly eliminate all ref count ops except for the destroy // for the @owned argument. The recursion happens since we can not eliminate the // begin_borrow without eliminating the struct_extract (which we do after we @@ -915,977 +545,97 @@ bb0: %4 = copy_value %3 : $Builtin.NativeObject end_borrow %2 : $NativeObjectPair %func = function_ref @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever - apply %func(%4) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever - unreachable -} - -// Make sure that since we have a guaranteed argument and do not need to reason -// about end_borrows, we handle this. -// -// CHECK-LABEL: sil [ossa] @guaranteed_arg_used_by_postdominating_no_return_function : $@convention(thin) (@guaranteed NativeObjectPair) -> MyNever { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'guaranteed_arg_used_by_postdominating_no_return_function' -sil [ossa] @guaranteed_arg_used_by_postdominating_no_return_function : $@convention(thin) (@guaranteed NativeObjectPair) -> MyNever { -bb0(%0 : @guaranteed $NativeObjectPair): - %3 = struct_extract %0 : $NativeObjectPair, #NativeObjectPair.obj1 - %4 = copy_value %3 : $Builtin.NativeObject - %func = function_ref @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever - apply %func(%4) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever - unreachable -} - - -// Make sure that since our borrow introducer is a begin_borrow, we do not -// eliminate the copy. -// -// CHECK-LABEL: sil [ossa] @borrowed_val_used_by_postdominating_no_return_function : $@convention(thin) (@owned NativeObjectPair) -> MyNever { -// CHECK: copy_value -// CHECK: } // end sil function 'borrowed_val_used_by_postdominating_no_return_function' -sil [ossa] @borrowed_val_used_by_postdominating_no_return_function : $@convention(thin) (@owned NativeObjectPair) -> MyNever { -bb0(%0 : @owned $NativeObjectPair): - %1 = begin_borrow %0 : $NativeObjectPair - %2 = struct_extract %1 : $NativeObjectPair, #NativeObjectPair.obj1 - %3 = copy_value %2 : $Builtin.NativeObject - %func = function_ref @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever - apply %func(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever - unreachable -} - -// Just make sure that we do not crash on this. We should be able to eliminate -// everything here. -// -// CHECK-LABEL: sil [ossa] @copy_value_with_debug_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () { -// CHECK: bb0 -// CHECK-NEXT: tuple -// CHECK-NEXT: return -// CHECK-NEXT: } // end sil function 'copy_value_with_debug_user' -sil [ossa] @copy_value_with_debug_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () { -bb0(%0 : @guaranteed $NativeObjectPair): - %1 = struct_extract %0 : $NativeObjectPair, #NativeObjectPair.obj1 - %2 = copy_value %1 : $Builtin.NativeObject - debug_value %2 : $Builtin.NativeObject, let, name "myField" - destroy_value %2 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// Just make sure we do not crash here. -// -// CHECK-LABEL: sil [ossa] @do_not_insert_end_borrow_given_deadend : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK: copy_value -// CHECK: } // end sil function 'do_not_insert_end_borrow_given_deadend' -sil [ossa] @do_not_insert_end_borrow_given_deadend : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%x : @guaranteed $ClassLet): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - %p = ref_element_addr %x : $ClassLet, #ClassLet.aLet - %v = load_borrow %p : $*Klass - %c = copy_value %v : $Klass - end_borrow %v : $Klass - apply %f(%c) : $@convention(thin) (@guaranteed Klass) -> () - cond_br undef, bb1, bb2 - -bb1: - destroy_value %c : $Klass - br bb3 - -bb2: - destroy_value %c : $Klass - br bb3 - -bb3: - unreachable -} - -// Make sure that we put the end_borrow on the load_borrow, not LHS or RHS. -// -// CHECK-LABEL: sil [ossa] @destructure_load_copy_to_load_borrow : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK: bb0([[ARG:%.*]] : -// CHECK: [[INTERIOR_POINTER:%.*]] = ref_element_addr [[ARG]] -// CHECK: [[BORROWED_VAL:%.*]] = load_borrow [[INTERIOR_POINTER]] -// CHECK: ([[LHS:%.*]], [[RHS:%.*]]) = destructure_tuple [[BORROWED_VAL]] -// CHECK: apply {{%.*}}([[LHS]]) -// CHECK: apply {{%.*}}([[RHS]]) -// CHECK: end_borrow [[BORROWED_VAL]] -// CHECK: } // end sil function 'destructure_load_copy_to_load_borrow' -sil [ossa] @destructure_load_copy_to_load_borrow : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aLetTuple - %2 = load [copy] %1 : $*(Klass, Klass) - (%3, %4) = destructure_tuple %2 : $(Klass, Klass) - %5 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - %6 = apply %5(%3) : $@convention(thin) (@guaranteed Klass) -> () - %7 = apply %5(%4) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass - destroy_value %4 : $Klass - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @single_init_allocstack : $@convention(thin) (@owned Klass) -> () { -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'single_init_allocstack' -sil [ossa] @single_init_allocstack : $@convention(thin) (@owned Klass) -> () { -bb0(%0 : @owned $Klass): - %1 = alloc_stack $Klass - store %0 to [init] %1 : $*Klass - %2 = load [copy] %1 : $*Klass - - %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () - - destroy_value %2 : $Klass - destroy_addr %1 : $*Klass - dealloc_stack %1 : $*Klass - - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @multiple_init_allocstack : $@convention(thin) (@owned Klass) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'multiple_init_allocstack' -sil [ossa] @multiple_init_allocstack : $@convention(thin) (@owned Klass) -> () { -bb0(%0 : @owned $Klass): - %0a = copy_value %0 : $Klass - %1 = alloc_stack $Klass - store %0 to [init] %1 : $*Klass - %2 = load [copy] %1 : $*Klass - - %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () - - destroy_value %2 : $Klass - destroy_addr %1 : $*Klass - - store %0a to [init] %1 : $*Klass - destroy_addr %1 : $*Klass - dealloc_stack %1 : $*Klass - - %9999 = tuple() - return %9999 : $() -} - -// We could support this, but for now we are keeping things simple. If we do add -// support, this test will need to be updated. -// -// CHECK-LABEL: sil [ossa] @single_init_wrongblock : $@convention(thin) (@owned Klass) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'single_init_wrongblock' -sil [ossa] @single_init_wrongblock : $@convention(thin) (@owned Klass) -> () { -bb0(%0 : @owned $Klass): - %1 = alloc_stack $Klass - br bb1 - -bb1: - store %0 to [init] %1 : $*Klass - %2 = load [copy] %1 : $*Klass - - %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () - - destroy_value %2 : $Klass - destroy_addr %1 : $*Klass - dealloc_stack %1 : $*Klass - - %9999 = tuple() - return %9999 : $() -} - -// We could support this, but for now we are keeping things simple. If we do add -// support, this test will need to be updated. -// -// CHECK-LABEL: sil [ossa] @single_init_loadtake : $@convention(thin) (@owned Klass) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'single_init_loadtake' -sil [ossa] @single_init_loadtake : $@convention(thin) (@owned Klass) -> () { -bb0(%0 : @owned $Klass): - %1 = alloc_stack $Klass - store %0 to [init] %1 : $*Klass - %2 = load [copy] %1 : $*Klass - - %3 = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %3(%2) : $@convention(thin) (@guaranteed Klass) -> () - - destroy_value %2 : $Klass - - %4 = load [take] %1 : $*Klass - destroy_value %4 : $Klass - dealloc_stack %1 : $*Klass - - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_1 : $@convention(thin) (@inout NativeObjectPair) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_1' -sil [ossa] @inout_argument_never_written_to_1 : $@convention(thin) (@inout NativeObjectPair) -> () { -bb0(%0 : $*NativeObjectPair): - %2 = load [copy] %0 : $*NativeObjectPair - (%3, %4) = destructure_struct %2 : $NativeObjectPair - - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%4) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - - destroy_value %3 : $Builtin.NativeObject - destroy_value %4 : $Builtin.NativeObject - - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_2 : $@convention(thin) (@inout NativeObjectPair) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_2' -sil [ossa] @inout_argument_never_written_to_2 : $@convention(thin) (@inout NativeObjectPair) -> () { -bb0(%0 : $*NativeObjectPair): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We can handle this since the store is outside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_3 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load_borrow -// CHECK: } // end sil function 'inout_argument_never_written_to_3' -sil [ossa] @inout_argument_never_written_to_3 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - store %1 to [assign] %2 : $*Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We cannot handle this since the store is inside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_4 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_4' -sil [ossa] @inout_argument_never_written_to_4 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - store %1 to [assign] %2 : $*Builtin.NativeObject - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We cannot handle this since the store is inside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_4a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_4a' -sil [ossa] @inout_argument_never_written_to_4a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - store %1 to [assign] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We can handle this since the store is outside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_5 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load_borrow -// CHECK: } // end sil function 'inout_argument_never_written_to_5' -sil [ossa] @inout_argument_never_written_to_5 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - store %1 to [assign] %2 : $*Builtin.NativeObject - %3 = load [copy] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We can handle this since the store is outside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load_borrow -// CHECK: } // end sil function 'inout_argument_never_written_to_6' -sil [ossa] @inout_argument_never_written_to_6 : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - store %1 to [assign] %2 : $*Builtin.NativeObject - %3 = load [copy] %2 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - br bb3 - -bb2: - br bb3 - -bb3: - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We can handle this since the store is outside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load_borrow -// CHECK: } // end sil function 'inout_argument_never_written_to_6a' -sil [ossa] @inout_argument_never_written_to_6a : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - br bb0a - -bb0a: - store %1 to [assign] %2 : $*Builtin.NativeObject - %3 = load [copy] %2 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - br bb3 - -bb2: - br bb3 - -bb3: - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We can handle this since the store is outside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6b : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load_borrow -// CHECK: } // end sil function 'inout_argument_never_written_to_6b' -sil [ossa] @inout_argument_never_written_to_6b : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - store %1 to [assign] %2 : $*Builtin.NativeObject - br bb0a - -bb0a: - %3 = load [copy] %2 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - br bb3 - -bb2: - br bb3 - -bb3: - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// We can handle this since the store is outside of our region. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_6c : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -// CHECK: load_borrow -// CHECK: } // end sil function 'inout_argument_never_written_to_6c' -sil [ossa] @inout_argument_never_written_to_6c : $@convention(thin) (@inout NativeObjectPair, @owned Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @owned $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %1a = copy_value %1 : $Builtin.NativeObject - store %1 to [assign] %2 : $*Builtin.NativeObject - br bb0a - -bb0a: - %3 = load [copy] %2 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - br bb3 - -bb2: - br bb3 - -bb3: - destroy_value %3 : $Builtin.NativeObject - store %1a to [assign] %2 : $*Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// This case, we can not optimize since the write scope is created around our entire load [copy]. -// -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1 : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1' -sil [ossa] @inout_argument_never_written_to_begin_access_1 : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject - %3 = load [copy] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - end_access %2a : $*Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1a' -sil [ossa] @inout_argument_never_written_to_begin_access_1a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject - %3 = load [copy] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - end_access %2a : $*Builtin.NativeObject - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1b' -sil [ossa] @inout_argument_never_written_to_begin_access_1b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - end_access %2a : $*Builtin.NativeObject - destroy_value %3 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_1c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_1c' -sil [ossa] @inout_argument_never_written_to_begin_access_1c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - end_access %2a : $*Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_2a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_2a' -sil [ossa] @inout_argument_never_written_to_begin_access_2a : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - end_access %2a : $*Builtin.NativeObject - br bb3 - -bb2: - destroy_value %3 : $Builtin.NativeObject - br bb3 - -bb3: - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_2b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_2b' -sil [ossa] @inout_argument_never_written_to_begin_access_2b : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %3 = load [copy] %2 : $*Builtin.NativeObject - br bb0a - -bb0a: - %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - br bb3 - -bb2: - destroy_value %3 : $Builtin.NativeObject - br bb3 - -bb3: - end_access %2a : $*Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @inout_argument_never_written_to_begin_access_2c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'inout_argument_never_written_to_begin_access_2c' -sil [ossa] @inout_argument_never_written_to_begin_access_2c : $@convention(thin) (@inout NativeObjectPair, @guaranteed Builtin.NativeObject) -> () { -bb0(%0 : $*NativeObjectPair, %1 : @guaranteed $Builtin.NativeObject): - %2 = struct_element_addr %0 : $*NativeObjectPair, #NativeObjectPair.obj1 - %2a = begin_access [modify] [static] %2 : $*Builtin.NativeObject - br bb0a - -bb0a: - %3 = load [copy] %2 : $*Builtin.NativeObject - cond_br undef, bb1, bb2 - -bb1: - %5 = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %5(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3 : $Builtin.NativeObject - br bb3 - -bb2: - destroy_value %3 : $Builtin.NativeObject - br bb3 - -bb3: - end_access %2a : $*Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @switch_enum_test_loadcopy_no_default : $@convention(thin) (@owned FakeOptional) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'switch_enum_test_loadcopy_no_default' -sil [ossa] @switch_enum_test_loadcopy_no_default : $@convention(thin) (@owned FakeOptional) -> () { -bb0(%0 : @owned $FakeOptional): - %0a = alloc_stack $FakeOptional - store %0 to [init] %0a : $*FakeOptional - %1 = load [copy] %0a : $*FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 - -bb1(%2 : @owned $Builtin.NativeObject): - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb2: - br bb3 - -bb3: - destroy_addr %0a : $*FakeOptional - dealloc_stack %0a : $*FakeOptional - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @switch_enum_test_loadcopy_with_default : $@convention(thin) (@owned FakeOptional) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'switch_enum_test_loadcopy_with_default' -sil [ossa] @switch_enum_test_loadcopy_with_default : $@convention(thin) (@owned FakeOptional) -> () { -bb0(%0 : @owned $FakeOptional): - %0a = alloc_stack $FakeOptional - store %0 to [init] %0a : $*FakeOptional - %1 = load [copy] %0a : $*FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 - -bb1(%2 : @owned $Builtin.NativeObject): - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb2(%3 : @owned $FakeOptional): - destroy_value %3 : $FakeOptional - br bb3 - -bb3: - destroy_addr %0a : $*FakeOptional - dealloc_stack %0a : $*FakeOptional - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @switch_enum_test_loadcopy_with_leaked_enum_case : $@convention(thin) (@owned FakeOptional) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'switch_enum_test_loadcopy_with_leaked_enum_case' -sil [ossa] @switch_enum_test_loadcopy_with_leaked_enum_case : $@convention(thin) (@owned FakeOptional) -> () { -bb0(%0 : @owned $FakeOptional): - %0a = alloc_stack $FakeOptional - store %0 to [init] %0a : $*FakeOptional - %1 = load [copy] %0a : $*FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 - -bb1(%2 : @owned $Builtin.NativeObject): - unreachable - -bb2: - br bb3 - -bb3: - destroy_addr %0a : $*FakeOptional - dealloc_stack %0a : $*FakeOptional - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_no_default : $@convention(thin) (@guaranteed FakeOptional) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'switch_enum_test_copyvalue_no_default' -sil [ossa] @switch_enum_test_copyvalue_no_default : $@convention(thin) (@guaranteed FakeOptional) -> () { -bb0(%0 : @guaranteed $FakeOptional): - %1 = copy_value %0 : $FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 - -bb1(%2 : @owned $Builtin.NativeObject): - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb2: - br bb3 - -bb3: - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_with_default : $@convention(thin) (@guaranteed FakeOptional) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'switch_enum_test_copyvalue_with_default' -sil [ossa] @switch_enum_test_copyvalue_with_default : $@convention(thin) (@guaranteed FakeOptional) -> () { -bb0(%0 : @guaranteed $FakeOptional): - %1 = copy_value %0 : $FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 - -bb1(%2 : @owned $Builtin.NativeObject): - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb2(%3 : @owned $FakeOptional): - destroy_value %3 : $FakeOptional - br bb3 - -bb3: - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_with_default_and_extract : $@convention(thin) (@guaranteed FakeOptional) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'switch_enum_test_copyvalue_with_default_and_extract' -sil [ossa] @switch_enum_test_copyvalue_with_default_and_extract : $@convention(thin) (@guaranteed FakeOptional) -> () { -bb0(%0 : @guaranteed $FakeOptional): - %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - %1 = copy_value %0 : $FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 - -bb1(%2 : @owned $Builtin.NativeObject): - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb2(%3 : @owned $FakeOptional): - %3a = unchecked_enum_data %3 : $FakeOptional, #FakeOptional.some!enumelt - apply %f(%3a) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3a : $Builtin.NativeObject - br bb3 - -bb3: - %9999 = tuple() - return %9999 : $() -} - -// TODO: We currently are unable to get rid of the begin_borrow. We should be -// able to with appropriate analysis. -// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_with_borrow : $@convention(thin) (@owned FakeOptional) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'switch_enum_test_copyvalue_with_borrow' -sil [ossa] @switch_enum_test_copyvalue_with_borrow : $@convention(thin) (@owned FakeOptional) -> () { -bb0(%0 : @owned $FakeOptional): - %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - %0a = begin_borrow %0 : $FakeOptional - %1 = copy_value %0a : $FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 - -bb1(%2 : @owned $Builtin.NativeObject): - destroy_value %2 : $Builtin.NativeObject - br bb3 - -bb2(%3 : @owned $FakeOptional): - %3a = unchecked_enum_data %3 : $FakeOptional, #FakeOptional.some!enumelt - apply %f(%3a) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - destroy_value %3a : $Builtin.NativeObject - br bb3 - -bb3: - end_borrow %0a : $FakeOptional - destroy_value %0 : $FakeOptional - %9999 = tuple() - return %9999 : $() -} - -// TODO: We can support this with time. -// -// CHECK-LABEL: sil [ossa] @do_eliminate_begin_borrow_consumed_by_guaranteed_phi : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK: begin_borrow -// CHECK: } // end sil function 'do_eliminate_begin_borrow_consumed_by_guaranteed_phi' -sil [ossa] @do_eliminate_begin_borrow_consumed_by_guaranteed_phi : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = begin_borrow %0 : $Builtin.NativeObject - br bb1(%1 : $Builtin.NativeObject) - -bb1(%2 : @guaranteed $Builtin.NativeObject): - end_borrow %2 : $Builtin.NativeObject - destroy_value %0 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @join_simple_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'join_simple_liveranges_in_same_block_1' -sil [ossa] @join_simple_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - destroy_value %0 : $Builtin.NativeObject - destroy_value %1 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @join_simple_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'join_simple_liveranges_in_same_block_2' -sil [ossa] @join_simple_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - destroy_value %0 : $Builtin.NativeObject - return %1 : $Builtin.NativeObject -} - -// CHECK-LABEL: sil [ossa] @join_simple_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'join_simple_liveranges_in_same_block_3' -sil [ossa] @join_simple_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 - -bb1: - destroy_value %0 : $Builtin.NativeObject - %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () - br bb2 - -bb2: - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @join_simple_liveranges_in_same_block_4 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'join_simple_liveranges_in_same_block_4' -sil [ossa] @join_simple_liveranges_in_same_block_4 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 - -bb1: - destroy_value %0 : $Builtin.NativeObject - %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - apply %f(%1) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () - %f2 = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f2(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () - br bb2 - -bb2: - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @donot_join_simple_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK: copy_value -// CHECK: } // end sil function 'donot_join_simple_liveranges_in_same_block_1' -sil [ossa] @donot_join_simple_liveranges_in_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 - -bb1: - %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () - destroy_value %0 : $Builtin.NativeObject - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @donot_join_simple_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'donot_join_simple_liveranges_in_same_block_2' -sil [ossa] @donot_join_simple_liveranges_in_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 - -bb1: - destroy_value %0 : $Builtin.NativeObject - %2 = unchecked_ref_cast %1 : $Builtin.NativeObject to $Builtin.NativeObject - %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f(%2) : $@convention(thin) (@owned Builtin.NativeObject) -> () - %9999 = tuple() - return %9999 : $() -} - -// Forwarding case. We need LiveRanges for this. -// -// CHECK-LABEL: sil [ossa] @donot_join_simple_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK: copy_value -// CHECK: } // end sil function 'donot_join_simple_liveranges_in_same_block_3' -sil [ossa] @donot_join_simple_liveranges_in_same_block_3 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 - -bb1: - %2 = unchecked_ref_cast %0 : $Builtin.NativeObject to $Builtin.NativeObject - destroy_value %2 : $Builtin.NativeObject - %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () - %9999 = tuple() - return %9999 : $() -} - -// Now test cases where we find our consumer is in the return block or is a -// return itself. -// -// CHECK-LABEL: sil [ossa] @join_simple_liveranges_not_same_block_with_consuming_return : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'join_simple_liveranges_not_same_block_with_consuming_return' -sil [ossa] @join_simple_liveranges_not_same_block_with_consuming_return : $@convention(thin) (@owned Builtin.NativeObject) -> @owned Builtin.NativeObject { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 - -bb1: - destroy_value %0 : $Builtin.NativeObject - br bb2 - -bb2: - return %1 : $Builtin.NativeObject -} - -// CHECK-LABEL: sil [ossa] @join_simple_liveranges_not_same_block_consumed_in_return_block : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK-NOT: copy_value -// CHECK: } // end sil function 'join_simple_liveranges_not_same_block_consumed_in_return_block' -sil [ossa] @join_simple_liveranges_not_same_block_consumed_in_return_block : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 - -bb1: - destroy_value %0 : $Builtin.NativeObject - br bb2 - -bb2: - %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () - %9999 = tuple() - return %9999 : $() + apply %func(%4) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever + unreachable } -// CHECK-LABEL: sil [ossa] @donot_join_simple_liveranges_not_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -// CHECK: copy_value -// CHECK: } // end sil function 'donot_join_simple_liveranges_not_same_block_1' -sil [ossa] @donot_join_simple_liveranges_not_same_block_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 +// Make sure that since we have a guaranteed argument and do not need to reason +// about end_borrows, we handle this. +// +// CHECK-LABEL: sil [ossa] @guaranteed_arg_used_by_postdominating_no_return_function : $@convention(thin) (@guaranteed NativeObjectPair) -> MyNever { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'guaranteed_arg_used_by_postdominating_no_return_function' +sil [ossa] @guaranteed_arg_used_by_postdominating_no_return_function : $@convention(thin) (@guaranteed NativeObjectPair) -> MyNever { +bb0(%0 : @guaranteed $NativeObjectPair): + %3 = struct_extract %0 : $NativeObjectPair, #NativeObjectPair.obj1 + %4 = copy_value %3 : $Builtin.NativeObject + %func = function_ref @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever + apply %func(%4) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever + unreachable +} -bb1: - destroy_value %0 : $Builtin.NativeObject - br bb2 -bb2: - %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () - br bb3 +// Make sure that since our borrow introducer is a begin_borrow, we do not +// eliminate the copy. +// +// CHECK-LABEL: sil [ossa] @borrowed_val_used_by_postdominating_no_return_function : $@convention(thin) (@owned NativeObjectPair) -> MyNever { +// CHECK: copy_value +// CHECK: } // end sil function 'borrowed_val_used_by_postdominating_no_return_function' +sil [ossa] @borrowed_val_used_by_postdominating_no_return_function : $@convention(thin) (@owned NativeObjectPair) -> MyNever { +bb0(%0 : @owned $NativeObjectPair): + %1 = begin_borrow %0 : $NativeObjectPair + %2 = struct_extract %1 : $NativeObjectPair, #NativeObjectPair.obj1 + %3 = copy_value %2 : $Builtin.NativeObject + %func = function_ref @unreachable_guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever + apply %func(%3) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> MyNever + unreachable +} -bb3: +// Just make sure that we do not crash on this. We should be able to eliminate +// everything here. +// +// CHECK-LABEL: sil [ossa] @copy_value_with_debug_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () { +// CHECK: bb0 +// CHECK-NEXT: tuple +// CHECK-NEXT: return +// CHECK-NEXT: } // end sil function 'copy_value_with_debug_user' +sil [ossa] @copy_value_with_debug_user : $@convention(thin) (@guaranteed NativeObjectPair) -> () { +bb0(%0 : @guaranteed $NativeObjectPair): + %1 = struct_extract %0 : $NativeObjectPair, #NativeObjectPair.obj1 + %2 = copy_value %1 : $Builtin.NativeObject + debug_value %2 : $Builtin.NativeObject, let, name "myField" + destroy_value %2 : $Builtin.NativeObject %9999 = tuple() return %9999 : $() } -// CHECK-LABEL: sil [ossa] @donot_join_simple_liveranges_not_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// Just make sure we do not crash here. +// +// CHECK-LABEL: sil [ossa] @do_not_insert_end_borrow_given_deadend : $@convention(thin) (@guaranteed ClassLet) -> () { // CHECK: copy_value -// CHECK: } // end sil function 'donot_join_simple_liveranges_not_same_block_2' -sil [ossa] @donot_join_simple_liveranges_not_same_block_2 : $@convention(thin) (@owned Builtin.NativeObject) -> () { -bb0(%0 : @owned $Builtin.NativeObject): - %1 = copy_value %0 : $Builtin.NativeObject - br bb1 +// CHECK: } // end sil function 'do_not_insert_end_borrow_given_deadend' +sil [ossa] @do_not_insert_end_borrow_given_deadend : $@convention(thin) (@guaranteed ClassLet) -> () { +bb0(%x : @guaranteed $ClassLet): + %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () + %p = ref_element_addr %x : $ClassLet, #ClassLet.aLet + %v = load_borrow %p : $*Klass + %c = copy_value %v : $Klass + end_borrow %v : $Klass + apply %f(%c) : $@convention(thin) (@guaranteed Klass) -> () + cond_br undef, bb1, bb2 bb1: - %f = function_ref @owned_user : $@convention(thin) (@owned Builtin.NativeObject) -> () - apply %f(%1) : $@convention(thin) (@owned Builtin.NativeObject) -> () - br bb2 + destroy_value %c : $Klass + br bb3 bb2: - destroy_value %0 : $Builtin.NativeObject + destroy_value %c : $Klass br bb3 bb3: - %9999 = tuple() - return %9999 : $() + unreachable } -// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_functionarg : $@convention(thin) (@guaranteed FakeOptional) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_functionarg' -sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_functionarg : $@convention(thin) (@guaranteed FakeOptional) -> () { -bb0(%0 : @guaranteed $FakeOptional): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - switch_enum %0 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 +// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_no_default : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'switch_enum_test_copyvalue_no_default' +sil [ossa] @switch_enum_test_copyvalue_no_default : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + %1 = copy_value %0 : $FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 -bb1(%1 : @guaranteed $ClassLet): - %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet - %3 = load [copy] %2 : $*Klass - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass +bb1(%2 : @owned $Builtin.NativeObject): + destroy_value %2 : $Builtin.NativeObject br bb3 bb2: @@ -1896,257 +646,93 @@ bb3: return %9999 : $() } -// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_beginborrow : $@convention(thin) (@owned FakeOptional) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_beginborrow' -sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_beginborrow : $@convention(thin) (@owned FakeOptional) -> () { -bb0(%0 : @owned $FakeOptional): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - %0a = begin_borrow %0 : $FakeOptional - switch_enum %0a : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 - -bb1(%1 : @guaranteed $ClassLet): - %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet - %3 = load [copy] %2 : $*Klass - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass +// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_with_default : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'switch_enum_test_copyvalue_with_default' +sil [ossa] @switch_enum_test_copyvalue_with_default : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + %1 = copy_value %0 : $FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 + +bb1(%2 : @owned $Builtin.NativeObject): + destroy_value %2 : $Builtin.NativeObject br bb3 -bb2: +bb2(%3 : @owned $FakeOptional): + destroy_value %3 : $FakeOptional br bb3 bb3: - end_borrow %0a : $FakeOptional - destroy_value %0 : $FakeOptional %9999 = tuple() return %9999 : $() } -// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_loadborrow : $@convention(thin) (@in_guaranteed FakeOptional) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_loadborrow' -sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_loadborrow : $@convention(thin) (@in_guaranteed FakeOptional) -> () { -bb0(%0 : $*FakeOptional): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - %0a = load_borrow %0 : $*FakeOptional - switch_enum %0a : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 - -bb1(%1 : @guaranteed $ClassLet): - %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet - %3 = load [copy] %2 : $*Klass - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass +// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_with_default_and_extract : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'switch_enum_test_copyvalue_with_default_and_extract' +sil [ossa] @switch_enum_test_copyvalue_with_default_and_extract : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %1 = copy_value %0 : $FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 + +bb1(%2 : @owned $Builtin.NativeObject): + destroy_value %2 : $Builtin.NativeObject br bb3 -bb2: +bb2(%3 : @owned $FakeOptional): + %3a = unchecked_enum_data %3 : $FakeOptional, #FakeOptional.some!enumelt + apply %f(%3a) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3a : $Builtin.NativeObject br bb3 bb3: - end_borrow %0a : $FakeOptional %9999 = tuple() return %9999 : $() } -// TODO: We can support this in a little bit once the rest of SemanticARCOpts is -// guaranteed to be safe with guaranteed phis. -// -// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_1 : $@convention(thin) (@owned FakeOptional) -> () { -// CHECK: load [copy] -// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_1' -sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_1 : $@convention(thin) (@owned FakeOptional) -> () { -bb0(%0 : @owned $FakeOptional): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - cond_br undef, bb0a, bb0b - -bb0a: - %0a = begin_borrow %0 : $FakeOptional - br bb0c(%0a : $FakeOptional) - -bb0b: - %0b = begin_borrow %0 : $FakeOptional - br bb0c(%0b : $FakeOptional) - -bb0c(%0c : @guaranteed $FakeOptional): - switch_enum %0c : $FakeOptional, case #FakeOptional.some!enumelt: bb1, case #FakeOptional.none!enumelt: bb2 +// TODO: We currently are unable to get rid of the begin_borrow. We should be +// able to with appropriate analysis. +// CHECK-LABEL: sil [ossa] @switch_enum_test_copyvalue_with_borrow : $@convention(thin) (@owned FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'switch_enum_test_copyvalue_with_borrow' +sil [ossa] @switch_enum_test_copyvalue_with_borrow : $@convention(thin) (@owned FakeOptional) -> () { +bb0(%0 : @owned $FakeOptional): + %f = function_ref @guaranteed_user : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + %0a = begin_borrow %0 : $FakeOptional + %1 = copy_value %0a : $FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 -bb1(%1 : @guaranteed $ClassLet): - %2 = ref_element_addr %1 : $ClassLet, #ClassLet.aLet - %3 = load [copy] %2 : $*Klass - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass +bb1(%2 : @owned $Builtin.NativeObject): + destroy_value %2 : $Builtin.NativeObject br bb3 -bb2: +bb2(%3 : @owned $FakeOptional): + %3a = unchecked_enum_data %3 : $FakeOptional, #FakeOptional.some!enumelt + apply %f(%3a) : $@convention(thin) (@guaranteed Builtin.NativeObject) -> () + destroy_value %3a : $Builtin.NativeObject br bb3 bb3: - end_borrow %0c : $FakeOptional - destroy_value %0 : $FakeOptional - %9999 = tuple() - return %9999 : $() -} - -// Make sure that if begin_borrow has a consuming end scope use, we can still -// eliminate load [copy]. -// -// CHECK-LABEL: sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_2 : $@convention(thin) (@owned FakeOptional) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_2' -sil [ossa] @convert_load_copy_to_load_borrow_despite_switch_enum_guaranteedphi_2 : $@convention(thin) (@owned FakeOptional) -> () { -bb0(%0 : @owned $FakeOptional): - %f = function_ref @black_hole : $@convention(thin) (@guaranteed Klass) -> () - cond_br undef, bb1, bb2 - -bb1: - %0a = begin_borrow %0 : $FakeOptional - br bb3(%0a : $FakeOptional) - -bb2: - %0b = begin_borrow %0 : $FakeOptional - %0b2 = unchecked_enum_data %0b : $FakeOptional, #FakeOptional.some!enumelt - %2 = ref_element_addr %0b2 : $ClassLet, #ClassLet.aLet - %3 = load [copy] %2 : $*Klass - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass - br bb3(%0b : $FakeOptional) - -bb3(%0c : @guaranteed $FakeOptional): - %f2 = function_ref @guaranteed_fakeoptional_classlet_user : $@convention(thin) (@guaranteed FakeOptional) -> () - apply %f2(%0c) : $@convention(thin) (@guaranteed FakeOptional) -> () - end_borrow %0c : $FakeOptional - destroy_value %0 : $FakeOptional - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_read_access : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_read_access' -sil [ossa] @loadcopy_to_loadborrow_from_read_access : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar - %2 = begin_access [read] [dynamic] %1 : $*Klass - %3 = load [copy] %2 : $*Klass - %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass - end_access %2 : $*Klass - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_without_writes : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK-NOT: load [copy] -// CHECK: load_borrow -// CHECK-NOT: load [copy] -// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_without_writes' -sil [ossa] @loadcopy_to_loadborrow_from_mut_access_without_writes : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar - %2 = begin_access [modify] [dynamic] %1 : $*Klass - %3 = load [copy] %2 : $*Klass - %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass - end_access %2 : $*Klass - %9999 = tuple() - return %9999 : $() -} - -// We can with time handle this case by proving that the destroy_addr is after -// the destroy_value. -// -// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK-NOT: load_borrow -// CHECK: load [copy] -// CHECK-NOT: load_borrow -// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes' -sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar - %2 = begin_access [modify] [dynamic] %1 : $*Klass - %3 = load [copy] %2 : $*Klass - %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass - destroy_addr %2 : $*Klass - end_access %2 : $*Klass - %9999 = tuple() - return %9999 : $() -} - -// We will never be able to handle this unless we can hoist the copy before the -// destroy_addr. Once we have begin_borrows around all interior_pointers, we can -// handle this version. -// -// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_2 : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK-NOT: load_borrow -// CHECK: load [copy] -// CHECK-NOT: load_borrow -// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes_2' -sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_2 : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar - %2 = begin_access [modify] [dynamic] %1 : $*Klass - %3 = load [copy] %2 : $*Klass - %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_addr %2 : $*Klass - destroy_value %3 : $Klass - end_access %2 : $*Klass + end_borrow %0a : $FakeOptional + destroy_value %0 : $FakeOptional %9999 = tuple() return %9999 : $() } -// We will never be able to handle this since we can't hoist the destroy_value -// before the guaranteed_klass_user. +// TODO: We can support this with time. // -// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_3 : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK-NOT: load_borrow -// CHECK: load [copy] -// CHECK-NOT: load_borrow -// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes_3' -sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_3 : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar - %2 = begin_access [modify] [dynamic] %1 : $*Klass - %3 = load [copy] %2 : $*Klass - destroy_addr %2 : $*Klass - %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass - end_access %2 : $*Klass - %9999 = tuple() - return %9999 : $() -} +// CHECK-LABEL: sil [ossa] @do_eliminate_begin_borrow_consumed_by_guaranteed_phi : $@convention(thin) (@owned Builtin.NativeObject) -> () { +// CHECK: begin_borrow +// CHECK: } // end sil function 'do_eliminate_begin_borrow_consumed_by_guaranteed_phi' +sil [ossa] @do_eliminate_begin_borrow_consumed_by_guaranteed_phi : $@convention(thin) (@owned Builtin.NativeObject) -> () { +bb0(%0 : @owned $Builtin.NativeObject): + %1 = begin_borrow %0 : $Builtin.NativeObject + br bb1(%1 : $Builtin.NativeObject) -// We will never be able to handle this since the end_access is before the use -// of %3, so we can not form a long enough load_borrow. -// -// CHECK-LABEL: sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_4 : $@convention(thin) (@guaranteed ClassLet) -> () { -// CHECK-NOT: load_borrow -// CHECK: load [copy] -// CHECK-NOT: load_borrow -// CHECK: } // end sil function 'loadcopy_to_loadborrow_from_mut_access_with_writes_4' -sil [ossa] @loadcopy_to_loadborrow_from_mut_access_with_writes_4 : $@convention(thin) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %1 = ref_element_addr %0 : $ClassLet, #ClassLet.aVar - %2 = begin_access [modify] [dynamic] %1 : $*Klass - %3 = load [copy] %2 : $*Klass - end_access %2 : $*Klass - %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%3) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %3 : $Klass +bb1(%2 : @guaranteed $Builtin.NativeObject): + end_borrow %2 : $Builtin.NativeObject + destroy_value %0 : $Builtin.NativeObject %9999 = tuple() return %9999 : $() } @@ -2565,34 +1151,6 @@ bb6: return %9999 : $() } -// Make sure that we do not promote the load [copy] to a load_borrow since it -// has a use outside of the access scope. -// -// CHECK-LABEL: sil [ossa] @deadEndBlockDoNotPromote : $@convention(method) (@guaranteed ClassLet) -> () { -// CHECK: load_borrow -// CHECK: load [copy] -// CHECK: } // end sil function 'deadEndBlockDoNotPromote' -sil [ossa] @deadEndBlockDoNotPromote : $@convention(method) (@guaranteed ClassLet) -> () { -bb0(%0 : @guaranteed $ClassLet): - %4 = ref_element_addr %0 : $ClassLet, #ClassLet.anotherLet - %5 = load [copy] %4 : $*ClassLet - %6 = begin_borrow %5 : $ClassLet - %7 = ref_element_addr %6 : $ClassLet, #ClassLet.anOptionalLet - %8 = begin_access [read] [dynamic] %7 : $*FakeOptional - %9 = load [copy] %8 : $*FakeOptional - end_access %8 : $*FakeOptional - end_borrow %6 : $ClassLet - destroy_value %5 : $ClassLet - switch_enum %9 : $FakeOptional, case #FakeOptional.none!enumelt: bb1, case #FakeOptional.some!enumelt: bb2 - -bb1: - %107 = tuple () - return %107 : $() - -bb2(%39 : @owned $Klass): - unreachable -} - // CHECK-LABEL: sil [ossa] @struct_with_multiple_nontrivial_operands : $@convention(thin) (@guaranteed Builtin.NativeObject, @guaranteed Builtin.NativeObject) -> () { // CHECK-NOT: copy_value // CHECK: } // end sil function 'struct_with_multiple_nontrivial_operands' @@ -2662,86 +1220,6 @@ bb6: br bb5 } -// CHECK-LABEL: sil [ossa] @destructure_with_differing_lifetimes_inout_1 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { -// CHECK-NOT: load_borrow -// CHECK: } // end sil function 'destructure_with_differing_lifetimes_inout_1' -sil [ossa] @destructure_with_differing_lifetimes_inout_1 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { -bb0(%0 : $*FakeOptionalNativeObjectPairPair): - %0a = struct_element_addr %0 : $*FakeOptionalNativeObjectPairPair, #FakeOptionalNativeObjectPairPair.pair1 - %1 = load [copy] %0a : $*FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 - -bb2(%2 : @owned $FakeOptional): - destroy_value %2 : $FakeOptional - br bbEnd - -bb1(%3 : @owned $NativeObjectPair): - (%3a, %3b) = destructure_struct %3 : $NativeObjectPair - cond_br undef, bb1a, bb1b - -bb1a: - destroy_value %3a : $Builtin.NativeObject - destroy_value %3b : $Builtin.NativeObject - br bbEnd - -bb1b: - destroy_value %3a : $Builtin.NativeObject - %f = function_ref @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () - apply %f(%0) : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () - destroy_value %3b : $Builtin.NativeObject - br bbEnd - -bbEnd: - %9999 = tuple() - return %9999 : $() -} - -// CHECK-LABEL: sil [ossa] @destructure_with_differing_lifetimes_inout_2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { -// CHECK-NOT: load_borrow -// CHECK: } // end sil function 'destructure_with_differing_lifetimes_inout_2' -sil [ossa] @destructure_with_differing_lifetimes_inout_2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () { -bb0(%0 : $*FakeOptionalNativeObjectPairPair): - %0a = struct_element_addr %0 : $*FakeOptionalNativeObjectPairPair, #FakeOptionalNativeObjectPairPair.pair1 - %1 = load [copy] %0a : $*FakeOptional - switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt: bb1, default bb2 - -bb2(%2 : @owned $FakeOptional): - destroy_value %2 : $FakeOptional - br bbEnd - -bb1(%3 : @owned $NativeObjectPair): - (%3a, %3b) = destructure_struct %3 : $NativeObjectPair - cond_br undef, bb1a, bb1b - -bb1a: - destroy_value %3a : $Builtin.NativeObject - br bb1ab - -bb1ab: - destroy_value %3b : $Builtin.NativeObject - br bbEnd - -bb1b: - destroy_value %3a : $Builtin.NativeObject - %f = function_ref @inout_user2 : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () - apply %f(%0) : $@convention(thin) (@inout FakeOptionalNativeObjectPairPair) -> () - cond_br undef, bb1ba, bb1bb - -bb1ba: - br bb1baEnd - -bb1bb: - br bb1baEnd - -bb1baEnd: - destroy_value %3b : $Builtin.NativeObject - br bbEnd - -bbEnd: - %9999 = tuple() - return %9999 : $() -} - // CHECK-LABEL: sil [ossa] @enum_with_indirect_case_projectbox_copyvalue_deadend : $@convention(thin) (@guaranteed StructWithEnumWithIndirectCaseField) -> () { // CHECK-NOT: copy_value // CHECK: } // end sil function 'enum_with_indirect_case_projectbox_copyvalue_deadend' @@ -2812,67 +1290,6 @@ bb2(%2 : @owned ${ var Builtin.NativeObject }): unreachable } -// Just make sure that we do not crash on this code and convert the 2nd load -// [copy] to a load_borrow. -// -// CHECK-LABEL: sil [ossa] @inproper_dead_end_block_crasher_test : $@convention(thin) (Builtin.RawPointer) -> () { -// CHECK: load_borrow -// CHECK: load_borrow -// CHECK: } // end sil function 'inproper_dead_end_block_crasher_test' -sil [ossa] @inproper_dead_end_block_crasher_test : $@convention(thin) (Builtin.RawPointer) -> () { -bb0(%0 : $Builtin.RawPointer): - %1 = pointer_to_address %0 : $Builtin.RawPointer to [strict] $*Klass - %2 = load_borrow %1 : $*Klass - %3 = ref_element_addr %2 : $Klass, #Klass.baseLet - %4 = load [copy] %3 : $*Klass - %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () - apply %f(%4) : $@convention(thin) (@guaranteed Klass) -> () - destroy_value %4 : $Klass - cond_br undef, bb1, bb2 - -bb1: - unreachable - -bb2: - unreachable -} - -// Make sure we leave only one copy in bb2 and no destroys -// -// CHECK-LABEL: sil [ossa] @join_test_with_forwarding_inst : $@convention(thin) () -> @owned FakeOptional { -// CHECK: bb2: -// CHECK: copy_value -// CHECK-NOT: destroy_value -// CHECK-NOT: copy_value -// CHECK: br bb3( -// CHECK: } // end sil function 'join_test_with_forwarding_inst' -sil [ossa] @join_test_with_forwarding_inst : $@convention(thin) () -> @owned FakeOptional { -bb0: - %allocStack = alloc_stack $Builtin.NativeObject - %0 = function_ref @get_fakeoptional_nativeobject : $@convention(thin) () -> @owned FakeOptional - %1 = apply %0() : $@convention(thin) () -> @owned FakeOptional - cond_br undef, bb1, bb2 - -bb1: - destroy_value %1 : $FakeOptional - %2 = enum $FakeOptional, #FakeOptional.none!enumelt - br bb3(%2 : $FakeOptional) - -bb2: - %3 = unchecked_enum_data %1 : $FakeOptional, #FakeOptional.some!enumelt - %4 = copy_value %3 : $Builtin.NativeObject - store %3 to [init] %allocStack : $*Builtin.NativeObject - %4c = copy_value %4 : $Builtin.NativeObject - destroy_value %4 : $Builtin.NativeObject - %5 = enum $FakeOptional, #FakeOptional.some!enumelt, %4c : $Builtin.NativeObject - destroy_addr %allocStack : $*Builtin.NativeObject - br bb3(%5 : $FakeOptional) - -bb3(%result : @owned $FakeOptional): - dealloc_stack %allocStack : $*Builtin.NativeObject - return %result : $FakeOptional -} - // CHECK-LABEL: sil [ossa] @simple_recursive_copy_case : $@convention(thin) () -> () { // CHECK-NOT: copy_value // CHECK: } // end sil function 'simple_recursive_copy_case' @@ -3003,12 +1420,12 @@ bb0: return %9999 : $() } -// Make sure we do not eliminate copies where only the destroy_value is outside -// of the lifetime of the parent value, but a begin_borrow extends the lifetime -// of the value. +// We can eliminate the ARC traffic here due to lifetime joining. There is a +// test in semantic-arc-opts-redundantcopyopts.sil that shows said pass does not +// optimize this pattern. // // CHECK-LABEL: sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime : $@convention(thin) () -> () { -// CHECK: copy_value +// CHECK-NOT: copy_value // CHECK: } // end sil function 'simple_recursive_copy_case_destroying_use_out_of_lifetime' sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime : $@convention(thin) () -> () { bb0: @@ -3040,11 +1457,10 @@ bb3: return %9999 : $() } -// Second version of the test that consumes the pair in case we make the -// lifetime joining smart enough to handle the original case. +// We handle this with lifetime joining. // // CHECK-LABEL: sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime_2 : $@convention(thin) () -> () { -// CHECK: copy_value +// CHECK-NOT: copy_value // CHECK: } // end sil function 'simple_recursive_copy_case_destroying_use_out_of_lifetime_2' sil [ossa] @simple_recursive_copy_case_destroying_use_out_of_lifetime_2 : $@convention(thin) () -> () { bb0: diff --git a/test/SILOptimizer/silgen_cleanup.sil b/test/SILOptimizer/silgen_cleanup.sil index e51198847ea00..6ea530e327902 100644 --- a/test/SILOptimizer/silgen_cleanup.sil +++ b/test/SILOptimizer/silgen_cleanup.sil @@ -4,8 +4,27 @@ import Builtin sil_stage raw -import Swift -import SwiftShims +class Klass {} +class SubKlass : Klass {} + +sil @use_klass_guaranteed : $@convention(thin) (@guaranteed Klass) -> () +sil @use_klass_owned : $@convention(thin) (@owned Klass) -> () +sil @use_klass_unowned : $@convention(thin) (Klass) -> () + +enum FakeOptional { +case none +case some(T) +} + +sil @use_fakeoptional_klass_guaranteed : $@convention(thin) (@guaranteed FakeOptional) -> () + +struct Int { + var _value : Builtin.Int32 +} + +struct UInt8 { + var _value : Builtin.Int8 +} // CHECK-LABEL: sil [ossa] @struct_extract_load_to_load_struct_element_addr // CHECK: bb0([[IN:%[0-9]+]] : $*UInt8): @@ -91,30 +110,26 @@ bb0(%0 : $*(Builtin.Int8, Builtin.Int8)): struct X1 { @_hasStorage @_hasInitialValue let a: Int { get } - @_hasStorage @_hasInitialValue var obj1: AnyObject { get set } - @_hasStorage @_hasInitialValue var obj2: AnyObject { get set } - init(a: Int, obj1: AnyObject, obj2: AnyObject) + @_hasStorage @_hasInitialValue var obj1: Builtin.NativeObject { get set } + @_hasStorage @_hasInitialValue var obj2: Builtin.NativeObject { get set } + init(a: Int, obj1: Builtin.NativeObject, obj2: Builtin.NativeObject) } -// CHECK-LABEL: sil private [ossa] @testLoadNontrivial : $@convention(thin) (@inout_aliasable X1) -> (Int, @owned AnyObject, @owned AnyObject) { +// CHECK-LABEL: sil private [ossa] @testLoadNontrivial : $@convention(thin) (@inout_aliasable X1) -> (Int, @owned Builtin.NativeObject, @owned Builtin.NativeObject) { // CHECK-LABEL: bb0(%0 : $*X1): // CHECK: [[ACCESS:%.*]] = begin_access [read] [unknown] %0 : $*X1 // CHECK: [[AA:%.*]] = struct_element_addr [[ACCESS]] : $*X1, #X1.a // CHECK: load [trivial] [[AA]] : $*Int // CHECK: [[OA1:%.*]] = struct_element_addr [[ACCESS]] : $*X1, #X1.obj1 -// CHECK: [[OV1:%.*]] = load [copy] [[OA1]] : $*AnyObject +// CHECK: [[OV1:%.*]] = load [copy] [[OA1]] : $*Builtin.NativeObject // CHECK: [[OA2:%.*]] = struct_element_addr [[ACCESS]] : $*X1, #X1.obj2 -// CHECK: [[OV2:%.*]] = load [copy] [[OA2]] : $*AnyObject +// CHECK: [[OV2:%.*]] = load [copy] [[OA2]] : $*Builtin.NativeObject // CHECK: end_access [[ACCESS]] : $*X1 -// CHECK: [[B1:%.*]] = begin_borrow [[OV1]] : $AnyObject -// CHECK: copy_value [[B1]] : $AnyObject -// CHECK: end_borrow [[B1]] : $AnyObject -// CHECK: [[B2:%.*]] = begin_borrow [[OV2]] : $AnyObject -// CHECK: copy_value [[B2]] : $AnyObject -// CHECK: end_borrow [[B2]] : $AnyObject +// CHECK: copy_value [[OV1]] : $Builtin.NativeObject +// CHECK: copy_value [[OV2]] : $Builtin.NativeObject // CHECK: return // CHECK-LABEL: } // end sil function 'testLoadNontrivial' -sil private [ossa] @testLoadNontrivial : $@convention(thin) (@inout_aliasable X1) -> (Int, @owned AnyObject, @owned AnyObject) { +sil private [ossa] @testLoadNontrivial : $@convention(thin) (@inout_aliasable X1) -> (Int, @owned Builtin.NativeObject, @owned Builtin.NativeObject) { bb0(%0 : $*X1): %access = begin_access [read] [unknown] %0 : $*X1 %load = load [copy] %access : $*X1 @@ -126,44 +141,77 @@ bb0(%0 : $*X1): %borrow1 = begin_borrow %load : $X1 %o1 = struct_extract %borrow1 : $X1, #X1.obj1 - %copy1 = copy_value %o1 : $AnyObject + %copy1 = copy_value %o1 : $Builtin.NativeObject end_borrow %borrow1 : $X1 %borrow2 = begin_borrow %load : $X1 %o2 = struct_extract %borrow2 : $X1, #X1.obj2 - %copy2 = copy_value %o2 : $AnyObject + %copy2 = copy_value %o2 : $Builtin.NativeObject end_borrow %borrow2 : $X1 destroy_value %load : $X1 - %result = tuple (%a : $Int, %copy1 : $AnyObject, %copy2 : $AnyObject) - return %result : $(Int, AnyObject, AnyObject) + %result = tuple (%a : $Int, %copy1 : $Builtin.NativeObject, %copy2 : $Builtin.NativeObject) + return %result : $(Int, Builtin.NativeObject, Builtin.NativeObject) } +// CHECK-LABEL: sil private [ossa] @testLoadBorrowNontrivial : $@convention(thin) (@in_guaranteed X1) -> (Int, @owned Builtin.NativeObject, @owned Builtin.NativeObject) { +// CHECK: bb0([[ADDRESS:%.*]] : $*X1): +// CHECK: [[AA:%.*]] = struct_element_addr [[ADDRESS]] : $*X1, #X1.a +// CHECK: load [trivial] [[AA]] : $*Int +// CHECK: [[OA1:%.*]] = struct_element_addr [[ADDRESS]] : $*X1, #X1.obj1 +// CHECK: [[OV1:%.*]] = load_borrow [[OA1]] : $*Builtin.NativeObject +// CHECK: [[OA2:%.*]] = struct_element_addr [[ADDRESS]] : $*X1, #X1.obj2 +// CHECK: [[OV2:%.*]] = load_borrow [[OA2]] : $*Builtin.NativeObject +// CHECK: copy_value [[OV1]] : $Builtin.NativeObject +// CHECK: copy_value [[OV2]] : $Builtin.NativeObject +// CHECK: end_borrow [[OV1]] +// CHECK: end_borrow [[OV2]] +// CHECK: return +// CHECK-LABEL: } // end sil function 'testLoadBorrowNontrivial' +sil private [ossa] @testLoadBorrowNontrivial : $@convention(thin) (@in_guaranteed X1) -> (Int, @owned Builtin.NativeObject, @owned Builtin.NativeObject) { +bb0(%0 : $*X1): + %load = load_borrow %0 : $*X1 + + %a = struct_extract %load : $X1, #X1.a + + %o1 = struct_extract %load : $X1, #X1.obj1 + %copy1 = copy_value %o1 : $Builtin.NativeObject + + %o2 = struct_extract %load : $X1, #X1.obj2 + %copy2 = copy_value %o2 : $Builtin.NativeObject + + end_borrow %load : $X1 + + %result = tuple (%a : $Int, %copy1 : $Builtin.NativeObject, %copy2 : $Builtin.NativeObject) + return %result : $(Int, Builtin.NativeObject, Builtin.NativeObject) +} + + struct X2 { - @_hasStorage @_hasInitialValue var obj: AnyObject { get set } + @_hasStorage @_hasInitialValue var obj: Builtin.NativeObject { get set } } struct X3 { @_hasStorage @_hasInitialValue var x2: X2 { get set } } -// CHECK-LABEL: sil private [ossa] @testStoreNontrivial : $@convention(thin) (@inout X3, @guaranteed AnyObject) -> () { -// CHECK-LABEL: bb0(%0 : $*X3, %1 : @guaranteed $AnyObject): -// CHECK: [[CP:%.*]] = copy_value %1 : $AnyObject +// CHECK-LABEL: sil private [ossa] @testStoreNontrivial : $@convention(thin) (@inout X3, @guaranteed Builtin.NativeObject) -> () { +// CHECK: bb0(%0 : $*X3, %1 : @guaranteed $Builtin.NativeObject): +// CHECK: [[CP:%.*]] = copy_value %1 : $Builtin.NativeObject // CHECK: [[ACCESS:%.*]] = begin_access [modify] [unknown] %0 : $*X3 -// CHECK: [[X2:%.*]] = struct $X2 ([[CP]] : $AnyObject) +// CHECK: [[X2:%.*]] = struct $X2 ([[CP]] : $Builtin.NativeObject) // CHECK: [[X3:%.*]] = struct $X3 ([[X2]] : $X2) // CHECK: store [[X3]] to [assign] [[ACCESS]] : $*X3 // CHECK: end_access [[ACCESS]] : $*X3 -// CHECK-LABEL: } // end sil function 'testStoreNontrivial' -sil private [ossa] @testStoreNontrivial : $@convention(thin) (@inout X3, @guaranteed AnyObject) -> () { -bb0(%0 : $*X3, %1 : @guaranteed $AnyObject): - %4 = copy_value %1 : $AnyObject +// CHECK: } // end sil function 'testStoreNontrivial' +sil private [ossa] @testStoreNontrivial : $@convention(thin) (@inout X3, @guaranteed Builtin.NativeObject) -> () { +bb0(%0 : $*X3, %1 : @guaranteed $Builtin.NativeObject): + %4 = copy_value %1 : $Builtin.NativeObject %5 = begin_access [modify] [unknown] %0 : $*X3 %6 = struct_element_addr %5 : $*X3, #X3.x2 %7 = struct_element_addr %6 : $*X2, #X2.obj - store %4 to [assign] %7 : $*AnyObject + store %4 to [assign] %7 : $*Builtin.NativeObject end_access %5 : $*X3 %12 = tuple () return %12 : $() diff --git a/test/SILOptimizer/specialization_and_resilience.swift b/test/SILOptimizer/specialization_and_resilience.swift index 21949b5b2d9da..763f846de172e 100644 --- a/test/SILOptimizer/specialization_and_resilience.swift +++ b/test/SILOptimizer/specialization_and_resilience.swift @@ -1,19 +1,19 @@ -// RUN: %target-swift-frontend -parse-as-library -O -module-name=test %s -enable-library-evolution -emit-sil | %FileCheck %s +// RUN: %empty-directory(%t) +// RUN: %target-build-swift -wmo -O -enable-library-evolution %S/Inputs/specialization_and_resilience_module.swift -DMODULE -parse-as-library -emit-module -emit-module-path=%t/Test.swiftmodule -module-name=Test -c -o %t/module.o +// RUN: %target-build-swift -wmo -O %s -I%t -module-name=Main -c -o %t/main.o +// RUN: %target-build-swift %t/main.o %t/module.o -o %t/a.out +// RUN: %target-codesign %t/a.out +// RUN: %target-run %t/a.out | %FileCheck %s -public enum En { - case A - case B -} +// REQUIRES: executable_test -@inlinable -@inline(never) -func genfunc(_ t: T) -> T { - return t -} +import Test + +// CHECK: Mystruct(x: 100) +testParam(Mystruct(100)) +// CHECK: Mystruct(x: 101) +print(testReturn([Mystruct(101)])) +// CHECK: Mystruct(x: 27) +// CHECK: Mystruct(x: 28) +otherFunc() -// CHECK-LABEL: sil @$s4test11callGenFuncyyF : $@convention(thin) () -> () { -// CHECK: = function_ref @$s4test7genfuncyxxlFAA2EnO_Tg5 : $@convention(thin) (En) -> @out En -// CHECK: } // end sil function '$s4test11callGenFuncyyF' -public func callGenFunc() { - _ = genfunc(En.A) -} diff --git a/test/SILOptimizer/specialize_default_witness_ossa.sil b/test/SILOptimizer/specialize_default_witness_ossa.sil index beacfa832a0d8..17c75f7db99e7 100644 --- a/test/SILOptimizer/specialize_default_witness_ossa.sil +++ b/test/SILOptimizer/specialize_default_witness_ossa.sil @@ -1,4 +1,4 @@ -// RUN: %target-sil-opt -enable-sil-verify-all -sil-generic-specializer-enable-ownership -generic-specializer %s | %FileCheck %s +// RUN: %target-sil-opt -enable-sil-verify-all -generic-specializer %s | %FileCheck %s sil_stage canonical diff --git a/test/SILOptimizer/specialize_default_witness_resilience.sil b/test/SILOptimizer/specialize_default_witness_resilience.sil index e99cc3639da03..645270e8a29bd 100644 --- a/test/SILOptimizer/specialize_default_witness_resilience.sil +++ b/test/SILOptimizer/specialize_default_witness_resilience.sil @@ -15,11 +15,11 @@ public struct ConformingStruct : ResilientProtocol { public func defaultB() } -// CHECK-LABEL: sil shared @$s8defaultA4main16ConformingStructV_Tg5 +// CHECK-LABEL: sil shared @$s8defaultA4main16ConformingStructV_TB5 // CHECK: bb0(%0 : $ConformingStruct): // CHECK-NEXT: [[TMP:%.*]] = alloc_stack $ConformingStruct // CHECK-NEXT: store %0 to [[TMP]] : $*ConformingStruct -// CHECK: [[FN:%.*]] = function_ref @$s8defaultB4main16ConformingStructV_Tg5 +// CHECK: [[FN:%.*]] = function_ref @$s8defaultB4main16ConformingStructV_TB5 // CHECK-NEXT: [[LOAD:%.*]] = load [[TMP]] : $*ConformingStruct // CHECK-NEXT: [[RESULT:%.*]] = apply [[FN]]([[LOAD]]) // CHECK-NEXT: dealloc_stack [[TMP]] : $*ConformingStruct @@ -32,7 +32,7 @@ bb0(%0 : $*Self): return %result : $() } -// CHECK-LABEL: sil shared @$s8defaultB4main16ConformingStructV_Tg5 +// CHECK-LABEL: sil shared @$s8defaultB4main16ConformingStructV_TB5 // CHECK: bb0(%0 : $ConformingStruct): // CHECK-NEXT: [[TMP:%.*]] = alloc_stack $ConformingStruct // CHECK-NEXT: store %0 to [[TMP]] : $*ConformingStruct @@ -48,7 +48,7 @@ bb0(%0 : $*Self): // CHECK-LABEL: sil hidden @test_specialize_default_witness_method // CHECK: bb0(%0 : $*ConformingStruct): -// CHECK: [[FN:%.*]] = function_ref @$s8defaultA4main16ConformingStructV_Tg5 +// CHECK: [[FN:%.*]] = function_ref @$s8defaultA4main16ConformingStructV_TB5 // CHECK-NEXT: [[VALUE:%.*]] = load %0 : $*ConformingStruct // CHECK-NEXT: [[RESULT:%.*]] = apply [[FN]]([[VALUE]]) // CHECK-NEXT: return [[RESULT]] diff --git a/test/SILOptimizer/specialize_default_witness_resilience_ossa.sil b/test/SILOptimizer/specialize_default_witness_resilience_ossa.sil index 2de2e23b74d55..81f71d17613a8 100644 --- a/test/SILOptimizer/specialize_default_witness_resilience_ossa.sil +++ b/test/SILOptimizer/specialize_default_witness_resilience_ossa.sil @@ -1,4 +1,4 @@ -// RUN: %target-sil-opt -enable-library-evolution -enable-sil-verify-all -generic-specializer -sil-generic-specializer-enable-ownership %s | %FileCheck %s +// RUN: %target-sil-opt -enable-library-evolution -enable-sil-verify-all -generic-specializer %s | %FileCheck %s sil_stage canonical @@ -25,25 +25,25 @@ public struct ConformingNonTrivialStruct : ResilientProtocol { public func defaultB() } -// CHECK-LABEL: sil shared [ossa] @$s8defaultA4main16ConformingStructV_Tg5 +// CHECK-LABEL: sil shared [ossa] @$s8defaultA4main16ConformingStructV_TB5 // CHECK: bb0(%0 : $ConformingStruct): // CHECK-NEXT: [[TMP:%.*]] = alloc_stack $ConformingStruct // CHECK-NEXT: store %0 to [trivial] [[TMP]] : $*ConformingStruct -// CHECK: [[FN:%.*]] = function_ref @$s8defaultB4main16ConformingStructV_Tg5 +// CHECK: [[FN:%.*]] = function_ref @$s8defaultB4main16ConformingStructV_TB5 // CHECK-NEXT: [[LOAD:%.*]] = load [trivial] [[TMP]] : $*ConformingStruct // CHECK-NEXT: [[RESULT:%.*]] = apply [[FN]]([[LOAD]]) // CHECK-NEXT: dealloc_stack [[TMP]] : $*ConformingStruct -// CHECK } // end sil function 's8defaultA4main16ConformingStructV_Tg5' +// CHECK } // end sil function 's8defaultA4main16ConformingStructV_TB5' -// CHECK-LABEL: sil shared [ossa] @$s8defaultA4main26ConformingNonTrivialStructV_Tg5 +// CHECK-LABEL: sil shared [ossa] @$s8defaultA4main26ConformingNonTrivialStructV_TB5 // CHECK: bb0(%0 : @guaranteed $ConformingNonTrivialStruct): // CHECK-NEXT: [[TMP:%.*]] = alloc_stack $ConformingNonTrivialStruct // CHECK-NEXT: store_borrow %0 to [[TMP]] : $*ConformingNonTrivialStruct -// CHECK: [[FN:%.*]] = function_ref @$s8defaultB4main26ConformingNonTrivialStructV_Tg5 +// CHECK: [[FN:%.*]] = function_ref @$s8defaultB4main26ConformingNonTrivialStructV_TB5 // CHECK-NEXT: [[LOAD:%.*]] = load_borrow [[TMP]] : $*ConformingNonTrivialStruct // CHECK-NEXT: [[RESULT:%.*]] = apply [[FN]]([[LOAD]]) // CHECK: dealloc_stack [[TMP]] : $*ConformingNonTrivialStruct -// CHECK } // end sil function 's8defaultA4main16ConformingNonTrivialStructV_Tg5' +// CHECK } // end sil function 's8defaultA4main16ConformingNonTrivialStructV_TB5' sil [ossa] @defaultA : $@convention(witness_method: ResilientProtocol) (@in_guaranteed Self) -> () { bb0(%0 : $*Self): @@ -52,19 +52,19 @@ bb0(%0 : $*Self): return %result : $() } -// CHECK-LABEL: sil shared [ossa] @$s8defaultB4main16ConformingStructV_Tg5 : +// CHECK-LABEL: sil shared [ossa] @$s8defaultB4main16ConformingStructV_TB5 : // CHECK: bb0(%0 : $ConformingStruct): // CHECK-NEXT: [[TMP:%.*]] = alloc_stack $ConformingStruct // CHECK-NEXT: store %0 to [trivial] [[TMP]] : $*ConformingStruct // CHECK: dealloc_stack [[TMP]] : $*ConformingStruct -// CHECK: } // end sil function '$s8defaultB4main16ConformingStructV_Tg5' +// CHECK: } // end sil function '$s8defaultB4main16ConformingStructV_TB5' -// CHECK-LABEL: sil shared [ossa] @$s8defaultB4main26ConformingNonTrivialStructV_Tg5 : +// CHECK-LABEL: sil shared [ossa] @$s8defaultB4main26ConformingNonTrivialStructV_TB5 : // CHECK: bb0(%0 : @guaranteed $ConformingNonTrivialStruct): // CHECK-NEXT: [[TMP:%.*]] = alloc_stack $ConformingNonTrivialStruct // CHECK-NEXT: store_borrow %0 to [[TMP]] : $*ConformingNonTrivialStruct // CHECK: dealloc_stack [[TMP]] : $*ConformingNonTrivialStruct -// CHECK: } // end sil function '$s8defaultB4main26ConformingNonTrivialStructV_Tg5' +// CHECK: } // end sil function '$s8defaultB4main26ConformingNonTrivialStructV_TB5' sil [ossa] @defaultB : $@convention(witness_method: ResilientProtocol) (@in_guaranteed Self) -> () { bb0(%0 : $*Self): @@ -74,7 +74,7 @@ bb0(%0 : $*Self): // CHECK-LABEL: sil hidden [ossa] @test_specialize_default_witness_method // CHECK: bb0(%0 : $*ConformingStruct): -// CHECK: [[FN:%.*]] = function_ref @$s8defaultA4main16ConformingStructV_Tg5 +// CHECK: [[FN:%.*]] = function_ref @$s8defaultA4main16ConformingStructV_TB5 // CHECK-NEXT: [[VALUE:%.*]] = load [trivial] %0 : $*ConformingStruct // CHECK-NEXT: [[RESULT:%.*]] = apply [[FN]]([[VALUE]]) // CHECK-NEXT: return [[RESULT]] @@ -88,7 +88,7 @@ bb0(%0 : $*ConformingStruct): // CHECK-LABEL: sil hidden [ossa] @test_specialize_default_witness_method_nontrivial // CHECK: bb0(%0 : $*ConformingNonTrivialStruct): -// CHECK: [[FN:%.*]] = function_ref @$s8defaultA4main26ConformingNonTrivialStructV_Tg5 +// CHECK: [[FN:%.*]] = function_ref @$s8defaultA4main26ConformingNonTrivialStructV_TB5 // CHECK-NEXT: [[VALUE:%.*]] = load_borrow %0 : $*ConformingNonTrivialStruct // CHECK-NEXT: [[RESULT:%.*]] = apply [[FN]]([[VALUE]]) // CHECK: } // end sil function 'test_specialize_default_witness_method_nontrivial' diff --git a/test/SILOptimizer/specialize_inherited_ossa.sil b/test/SILOptimizer/specialize_inherited_ossa.sil index feafbd1446590..1b98ad4a03779 100644 --- a/test/SILOptimizer/specialize_inherited_ossa.sil +++ b/test/SILOptimizer/specialize_inherited_ossa.sil @@ -1,4 +1,4 @@ -// RUN: %target-sil-opt -enable-sil-verify-all -generic-specializer -module-name inherit -sil-generic-specializer-enable-ownership %s | %FileCheck %s +// RUN: %target-sil-opt -enable-sil-verify-all -generic-specializer -module-name inherit %s | %FileCheck %s import Builtin import Swift diff --git a/test/SILOptimizer/specialize_metatypes_with_nondefault_representation_ossa.sil b/test/SILOptimizer/specialize_metatypes_with_nondefault_representation_ossa.sil new file mode 100644 index 0000000000000..ac662bc2918b1 --- /dev/null +++ b/test/SILOptimizer/specialize_metatypes_with_nondefault_representation_ossa.sil @@ -0,0 +1,61 @@ +// RUN: %target-sil-opt -enable-sil-verify-all %s -generic-specializer | %FileCheck %s + +// REQUIRES: objc_interop + +// This test checks that we properly distinguish in between the specialized +// functions for the @thick, @thin, and @objc_metatype metatypes. +// +// This can occur if we do not properly mangle in the metatype representation +// into the name of functions and thus reuse the incorrect already specialized +// method instead of the new specialized method. + +sil_stage canonical + +import Builtin + +protocol AnyObject {} + +sil [noinline] @tmp : $@convention(thin) () -> (@out T) { +bb0(%0 : $*T): + %1 = tuple() + return %1 : $() +} + +// CHECK-LABEL: sil [ossa] @tmp2 : $@convention(thin) () -> () { +// CHECK: [[FUN1:%[0-9]+]] = function_ref @$s3tmp4main9AnyObject_pXmT_Tg5 : $@convention(thin) () -> @thick AnyObject.Type +// CHECK-NEXT: apply [[FUN1]] +// CHECK: [[FUN2:%[0-9]+]] = function_ref @$s3tmp4main9AnyObject_pXmo_Tg5 : $@convention(thin) () -> @objc_metatype AnyObject.Type +// CHECK-NEXT: apply [[FUN2]] +// CHECK: [[FUN3:%[0-9]+]] = function_ref @$s3tmpBi32_XMT_Tg5 : $@convention(thin) () -> @thick Builtin.Int32.Type +// CHECK-NEXT: apply [[FUN3]] +// CHECK: [[FUN4:%[0-9]+]] = function_ref @$s3tmpBi32_XMo_Tg5 : $@convention(thin) () -> @objc_metatype Builtin.Int32.Type +// CHECK-NEXT: apply [[FUN4]] +// CHECK: [[FUN5:%[0-9]+]] = function_ref @$s3tmpBi32_XMt_Tg5 : $@convention(thin) () -> @thin Builtin.Int32.Type +// CHECK-NEXT: apply [[FUN5]] +sil [ossa] @tmp2 : $@convention(thin) () -> () { +bb0: + %0 = function_ref @tmp : $@convention(thin) () -> (@out T) + %1 = alloc_box $<τ_0_0> { var τ_0_0 } <@thick AnyObject.Type> + %1a = project_box %1 : $<τ_0_0> { var τ_0_0 } <@thick AnyObject.Type>, 0 + %2 = alloc_box $<τ_0_0> { var τ_0_0 } <@objc_metatype AnyObject.Type> + %2a = project_box %2 : $<τ_0_0> { var τ_0_0 } <@objc_metatype AnyObject.Type>, 0 + %4 = apply %0<@thick AnyObject.Type>(%1a) : $@convention(thin) () -> (@out T) + %5 = apply %0<@objc_metatype AnyObject.Type>(%2a) : $@convention(thin) () -> (@out T) + + %6 = alloc_box $<τ_0_0> { var τ_0_0 } <@thick Builtin.Int32.Type> + %6a = project_box %6 : $<τ_0_0> { var τ_0_0 } <@thick Builtin.Int32.Type>, 0 + %7 = alloc_box $<τ_0_0> { var τ_0_0 } <@objc_metatype Builtin.Int32.Type> + %7a = project_box %7 : $<τ_0_0> { var τ_0_0 } <@objc_metatype Builtin.Int32.Type>, 0 + %8 = alloc_box $<τ_0_0> { var τ_0_0 } <@thin Builtin.Int32.Type> + %8a = project_box %8 : $<τ_0_0> { var τ_0_0 } <@thin Builtin.Int32.Type>, 0 + %9 = apply %0<@thick Builtin.Int32.Type>(%6a) : $@convention(thin) () -> (@out T) + %10 = apply %0<@objc_metatype Builtin.Int32.Type>(%7a) : $@convention(thin) () -> (@out T) + %11 = apply %0<@thin Builtin.Int32.Type>(%8a) : $@convention(thin) () -> (@out T) + destroy_value %8 : $<τ_0_0> { var τ_0_0 } <@thin Builtin.Int32.Type> + destroy_value %7 : $<τ_0_0> { var τ_0_0 } <@objc_metatype Builtin.Int32.Type> + destroy_value %6 : $<τ_0_0> { var τ_0_0 } <@thick Builtin.Int32.Type> + destroy_value %2 : $<τ_0_0> { var τ_0_0 } <@objc_metatype AnyObject.Type> + destroy_value %1 : $<τ_0_0> { var τ_0_0 } <@thick AnyObject.Type> + %9999 = tuple() + return %9999 : $() +} diff --git a/test/SILOptimizer/specialize_no_definition_ossa.sil b/test/SILOptimizer/specialize_no_definition_ossa.sil new file mode 100644 index 0000000000000..75cc7647163b9 --- /dev/null +++ b/test/SILOptimizer/specialize_no_definition_ossa.sil @@ -0,0 +1,39 @@ +// RUN: %target-sil-opt -enable-sil-verify-all -generic-specializer -save-optimization-record-path=%t.yaml -o /dev/null %/s +// RUN: %FileCheck %s < %t.yaml + +import Builtin +import Swift + +sil_stage canonical + +// CHECK: --- !Missed +// CHECK-NEXT: Pass: sil-generic-specializer +// CHECK-NEXT: Name: sil.NoDef +// CHECK-NEXT: DebugLoc: +// CHECK: File: {{.*}}/specialize_no_definition_ossa.sil +// CHECK: Line: 33 +// CHECK: Column: 8 +// CHECK-NEXT: Function: foo +// CHECK-NEXT: Args: +// CHECK-NEXT: - String: 'Unable to specialize generic function ' +// CHECK-NEXT: - Callee: '"bar"' +// CHECK-NEXT: DebugLoc: +// CHECK: File: {{.*}}/specialize_no_definition_ossa.sil +// CHECK: Line: 32 +// CHECK: Column: 21 +// CHECK-NEXT: - String: ' since definition is not visible' +// CHECK-NEXT: ... +sil hidden [ossa] @foo : $@convention(thin) () -> () { +bb0: + %0 = integer_literal $Builtin.Int64, 2 + %1 = struct $Int64 (%0 : $Builtin.Int64) + %2 = alloc_stack $Int64 + store %1 to [trivial] %2 : $*Int64 + %4 = function_ref @bar : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () + %5 = apply %4(%2) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () + dealloc_stack %2 : $*Int64 + %7 = tuple () + return %7 : $() +} + +sil hidden_external @bar : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () diff --git a/test/SILOptimizer/specialize_opaque_ossa.sil b/test/SILOptimizer/specialize_opaque_ossa.sil index b7eab611c04cd..447e5e1b576b5 100644 --- a/test/SILOptimizer/specialize_opaque_ossa.sil +++ b/test/SILOptimizer/specialize_opaque_ossa.sil @@ -1,4 +1,4 @@ -// RUN: %target-sil-opt -enable-sil-opaque-values -enable-sil-verify-all -generic-specializer -sil-generic-specializer-enable-ownership %s | %FileCheck %s +// RUN: %target-sil-opt -enable-sil-opaque-values -enable-sil-verify-all -generic-specializer %s | %FileCheck %s sil_stage canonical diff --git a/test/SILOptimizer/specialize_opaque_result_types_ossa.sil b/test/SILOptimizer/specialize_opaque_result_types_ossa.sil index 1ead1ab5274b6..538bbd635f780 100644 --- a/test/SILOptimizer/specialize_opaque_result_types_ossa.sil +++ b/test/SILOptimizer/specialize_opaque_result_types_ossa.sil @@ -1,6 +1,6 @@ // RUN: %empty-directory(%t) // RUN: %target-swift-frontend -disable-availability-checking %S/Inputs/opaque_result_types.swift -module-name External -emit-module -emit-module-path %t/External.swiftmodule -// RUN: %target-sil-opt -I %t -enable-sil-verify-all %s -generic-specializer -sil-generic-specializer-enable-ownership | %FileCheck %s +// RUN: %target-sil-opt -I %t -enable-sil-verify-all %s -generic-specializer | %FileCheck %s // REQUIRES: CPU=x86_64 diff --git a/test/SILOptimizer/specialize_ossa.sil b/test/SILOptimizer/specialize_ossa.sil index 45714dcc4f9f5..98f47589b2d5e 100644 --- a/test/SILOptimizer/specialize_ossa.sil +++ b/test/SILOptimizer/specialize_ossa.sil @@ -1,4 +1,4 @@ -// RUN: %target-sil-opt -enable-sil-verify-all -sil-partial-specialization -generic-specializer -sil-generic-specializer-enable-ownership %s | %FileCheck %s +// RUN: %target-sil-opt -enable-sil-verify-all -sil-partial-specialization -generic-specializer %s | %FileCheck %s sil_stage canonical diff --git a/test/SILOptimizer/specialize_reabstraction_ossa.sil b/test/SILOptimizer/specialize_reabstraction_ossa.sil index 916bc1797e5e0..f769c05049b74 100644 --- a/test/SILOptimizer/specialize_reabstraction_ossa.sil +++ b/test/SILOptimizer/specialize_reabstraction_ossa.sil @@ -1,4 +1,4 @@ -// RUN: %target-sil-opt -enable-sil-verify-all -generic-specializer -sil-generic-specializer-enable-ownership %s | %FileCheck %s +// RUN: %target-sil-opt -enable-sil-verify-all -generic-specializer %s | %FileCheck %s sil_stage canonical diff --git a/test/SILOptimizer/specialize_recursive_generics_ossa.sil b/test/SILOptimizer/specialize_recursive_generics_ossa.sil index 5bb75added371..e14d3b676fb7a 100644 --- a/test/SILOptimizer/specialize_recursive_generics_ossa.sil +++ b/test/SILOptimizer/specialize_recursive_generics_ossa.sil @@ -1,4 +1,4 @@ -// RUN: %target-sil-opt -enable-sil-verify-all %s -generic-specializer -cse -sil-generic-specializer-enable-ownership | %FileCheck %s +// RUN: %target-sil-opt -enable-sil-verify-all %s -generic-specializer -cse | %FileCheck %s // Check that SIL cloner can correctly handle specialization of recursive // functions with generic arguments. diff --git a/test/SILOptimizer/templvalueopt_ossa.sil b/test/SILOptimizer/templvalueopt_ossa.sil index e37a777091b80..23b0673650782 100644 --- a/test/SILOptimizer/templvalueopt_ossa.sil +++ b/test/SILOptimizer/templvalueopt_ossa.sil @@ -249,3 +249,36 @@ bb0(%0 : $*T, %1 : $*T): return %78 : $() } +class Child { } + +struct Parent { + var c : Child +} + +sil @gen_child : $@convention(thin) () -> @out Child + +// Check that alias analysis is invalidated correctly and that the pass does +// not produce invalid SIL (which would trigger a MemoryLifetime failure). +// +// CHECK-LABEL: sil [ossa] @invalidateAliasAnalysis : +// CHECK: copy_addr +// CHECK: } // end sil function 'invalidateAliasAnalysis' +sil [ossa] @invalidateAliasAnalysis : $@convention(thin) (@owned Child) -> () { +bb0(%0 :@owned $Child): + %2 = alloc_stack $Parent + %4 = alloc_stack $Child + store %0 to [init] %4 : $*Child + %7 = alloc_stack $Child + %func = function_ref @gen_child : $@convention(thin) () -> @out Child + %10 = apply %func(%7) : $@convention(thin)() -> @out Child + %11 = struct_element_addr %2 : $*Parent, #Parent.c + copy_addr [take] %7 to [initialization] %11 : $*Child + dealloc_stack %7 : $*Child + copy_addr [take] %4 to %11 : $*Child + %17 = tuple () + dealloc_stack %4 : $*Child + destroy_addr %2 : $*Parent + dealloc_stack %2 : $*Parent + %res = tuple () + return %res : $() +} diff --git a/test/SPI/client_use_multiple_module_specialize.swift b/test/SPI/client_use_multiple_module_specialize.swift index aea12a06527ba..be284d9c0d897 100644 --- a/test/SPI/client_use_multiple_module_specialize.swift +++ b/test/SPI/client_use_multiple_module_specialize.swift @@ -1,22 +1,22 @@ // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -emit-module -DLIB_A %s -module-name A -emit-module-path %t/A.swiftmodule -// RUN: %target-swift-frontend -emit-module -DLIB_B %s -module-name B -emit-module-path %t/B.swiftmodule -I %t -// RUN: %target-swift-frontend -module-name C -emit-sil -O -DLIB_C %s -I %t | %FileCheck %s -// RUN: %target-swift-frontend -module-name C -emit-sil -O -DLIB_C_NO_SPI %s -I %t | %FileCheck %s --check-prefix=NOSPI +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module -DLIB_A %s -module-name A -emit-module-path %t/A.swiftmodule +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module -DLIB_B %s -module-name B -emit-module-path %t/B.swiftmodule -I %t +// RUN: %target-swift-frontend -enable-experimental-prespecialization -module-name C -emit-sil -O -DLIB_C %s -I %t | %FileCheck %s +// RUN: %target-swift-frontend -enable-experimental-prespecialization -module-name C -emit-sil -O -DLIB_C_NO_SPI %s -I %t | %FileCheck %s --check-prefix=NOSPI // Test using the public swiftinterface // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -swift-version 5 -enable-library-evolution -emit-module -DLIB_A %s -module-name A -emit-module-path %t/A.swiftmodule -emit-module-interface-path %t/A.swiftinterface -// RUN: %target-swift-frontend -swift-version 5 -enable-library-evolution -emit-module -DLIB_B %s -module-name B -emit-module-path %t/B.swiftmodule -I %t -emit-module-interface-path %t/B.swiftinterface +// RUN: %target-swift-frontend -enable-experimental-prespecialization -swift-version 5 -enable-library-evolution -emit-module -DLIB_A %s -module-name A -emit-module-path %t/A.swiftmodule -emit-module-interface-path %t/A.swiftinterface +// RUN: %target-swift-frontend -enable-experimental-prespecialization -swift-version 5 -enable-library-evolution -emit-module -DLIB_B %s -module-name B -emit-module-path %t/B.swiftmodule -I %t -emit-module-interface-path %t/B.swiftinterface // RUN: rm %t/A.swiftmodule %t/B.swiftmodule -// RUN: %target-swift-frontend -module-name C -emit-sil -O -DLIB_C %s -I %t | %FileCheck %s --check-prefix=PUBLIC +// RUN: %target-swift-frontend -enable-experimental-prespecialization -module-name C -emit-sil -O -DLIB_C %s -I %t | %FileCheck %s --check-prefix=PUBLIC // Test using the private swiftinterface // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -swift-version 5 -enable-library-evolution -emit-module -DLIB_A %s -module-name A -emit-module-path %t/A.swiftmodule -emit-module-interface-path %t/A.swiftinterface -emit-private-module-interface-path %t/A.private.swiftinterface -// RUN: %target-swift-frontend -swift-version 5 -enable-library-evolution -emit-module -DLIB_B %s -module-name B -emit-module-path %t/B.swiftmodule -I %t -emit-module-interface-path %t/B.swiftinterface -emit-private-module-interface-path %t/B.private.swiftinterface +// RUN: %target-swift-frontend -enable-experimental-prespecialization -swift-version 5 -enable-library-evolution -emit-module -DLIB_A %s -module-name A -emit-module-path %t/A.swiftmodule -emit-module-interface-path %t/A.swiftinterface -emit-private-module-interface-path %t/A.private.swiftinterface +// RUN: %target-swift-frontend -enable-experimental-prespecialization -swift-version 5 -enable-library-evolution -emit-module -DLIB_B %s -module-name B -emit-module-path %t/B.swiftmodule -I %t -emit-module-interface-path %t/B.swiftinterface -emit-private-module-interface-path %t/B.private.swiftinterface // RUN: rm %t/A.swiftmodule %t/B.swiftmodule -// RUN: %target-swift-frontend -module-name C -emit-sil -O -DLIB_C %s -I %t | %FileCheck %s +// RUN: %target-swift-frontend -enable-experimental-prespecialization -module-name C -emit-sil -O -DLIB_C %s -I %t | %FileCheck %s #if LIB_A diff --git a/test/SPI/experimental_spi_imports_type_check.swift b/test/SPI/experimental_spi_imports_type_check.swift index be728e5c3aa27..577264b08caa7 100644 --- a/test/SPI/experimental_spi_imports_type_check.swift +++ b/test/SPI/experimental_spi_imports_type_check.swift @@ -16,11 +16,6 @@ // RUN: %target-typecheck-verify-swift -DCLIENT -I %t // RUN: %target-swift-frontend -typecheck %s -DCLIENT -DCLIENT_LOAD_CORE -I %t -/// Test with the public swiftinterface file, the SPI is unknown. -// RUN: rm %t/LibPublic.private.swiftinterface -// RUN: %target-typecheck-verify-swift -DCLIENT -I %t -// RUN: %target-typecheck-verify-swift -DCLIENT -DCLIENT_LOAD_CORE -I %t - #if LIB_CORE public struct CoreStruct { diff --git a/test/SPI/private_swiftinterface.swift b/test/SPI/private_swiftinterface.swift index 84322a572dd3f..2987873b94a18 100644 --- a/test/SPI/private_swiftinterface.swift +++ b/test/SPI/private_swiftinterface.swift @@ -2,8 +2,8 @@ /// one doesn't leak SPI decls and info. // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend -emit-module %S/Inputs/spi_helper.swift -module-name SPIHelper -emit-module-path %t/SPIHelper.swiftmodule -swift-version 5 -enable-library-evolution -emit-module-interface-path %t/SPIHelper.swiftinterface -emit-private-module-interface-path %t/SPIHelper.private.swiftinterface -// RUN: %target-swift-frontend -emit-module %S/Inputs/ioi_helper.swift -module-name IOIHelper -emit-module-path %t/IOIHelper.swiftmodule -swift-version 5 -enable-library-evolution -emit-module-interface-path %t/IOIHelper.swiftinterface -emit-private-module-interface-path %t/IOIHelper.private.swiftinterface +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module %S/Inputs/spi_helper.swift -module-name SPIHelper -emit-module-path %t/SPIHelper.swiftmodule -swift-version 5 -enable-library-evolution -emit-module-interface-path %t/SPIHelper.swiftinterface -emit-private-module-interface-path %t/SPIHelper.private.swiftinterface +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module %S/Inputs/ioi_helper.swift -module-name IOIHelper -emit-module-path %t/IOIHelper.swiftmodule -swift-version 5 -enable-library-evolution -emit-module-interface-path %t/IOIHelper.swiftinterface -emit-private-module-interface-path %t/IOIHelper.private.swiftinterface /// Make sure that the public swiftinterface of spi_helper doesn't leak SPI. // RUN: %FileCheck -check-prefix=CHECK-HELPER %s < %t/SPIHelper.swiftinterface diff --git a/test/SPI/public_client.swift b/test/SPI/public_client.swift index cd05fcce02512..825c81fffae64 100644 --- a/test/SPI/public_client.swift +++ b/test/SPI/public_client.swift @@ -3,14 +3,14 @@ // RUN: %empty-directory(%t) /// Compile the SPI lib -// RUN: %target-swift-frontend -emit-module %S/Inputs/spi_helper.swift -module-name SPIHelper -emit-module-path %t/SPIHelper.swiftmodule -emit-module-interface-path %t/SPIHelper.swiftinterface -emit-private-module-interface-path %t/SPIHelper.private.swiftinterface -enable-library-evolution -swift-version 5 -parse-as-library +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module %S/Inputs/spi_helper.swift -module-name SPIHelper -emit-module-path %t/SPIHelper.swiftmodule -emit-module-interface-path %t/SPIHelper.swiftinterface -emit-private-module-interface-path %t/SPIHelper.private.swiftinterface -enable-library-evolution -swift-version 5 -parse-as-library /// Reading from swiftmodule -// RUN: %target-typecheck-verify-swift -I %t -verify-ignore-unknown +// RUN: %target-typecheck-verify-swift -enable-experimental-prespecialization -I %t -verify-ignore-unknown /// Reading from .private.swiftinterface // RUN: rm %t/SPIHelper.swiftmodule -// RUN: %target-typecheck-verify-swift -I %t -verify-ignore-unknown +// RUN: %target-typecheck-verify-swift -enable-experimental-prespecialization -I %t -verify-ignore-unknown /// Reading from .swiftinterface should still produce the same failures // RUN: rm %t/SPIHelper.private.swiftinterface diff --git a/test/SPI/run_spi_client.swift b/test/SPI/run_spi_client.swift index 91f61f890b19d..fddf8c33ff1da 100644 --- a/test/SPI/run_spi_client.swift +++ b/test/SPI/run_spi_client.swift @@ -3,7 +3,7 @@ // RUN: %empty-directory(%t) /// Compile the lib with SPI decls -// RUN: %target-build-swift-dylib(%t/%target-library-name(SPIHelper)) %S/Inputs/spi_helper.swift -emit-module -emit-module-path %t/SPIHelper.swiftmodule -module-name SPIHelper -enable-library-evolution +// RUN: %target-build-swift-dylib(%t/%target-library-name(SPIHelper)) -Xfrontend -enable-experimental-prespecialization %S/Inputs/spi_helper.swift -emit-module -emit-module-path %t/SPIHelper.swiftmodule -module-name SPIHelper -enable-library-evolution // RUN: %target-codesign %t/%target-library-name(SPIHelper) /// Client with SPI access diff --git a/test/SPI/spi_client.swift b/test/SPI/spi_client.swift index fbaf7fbaca9f3..649898d94d4c9 100644 --- a/test/SPI/spi_client.swift +++ b/test/SPI/spi_client.swift @@ -3,18 +3,18 @@ // RUN: %empty-directory(%t) // /// Compile the SPI lib -// RUN: %target-swift-frontend -emit-module %S/Inputs/spi_helper.swift -module-name SPIHelper -emit-module-path %t/SPIHelper.swiftmodule -emit-module-interface-path %t/SPIHelper.swiftinterface -emit-private-module-interface-path %t/SPIHelper.private.swiftinterface -enable-library-evolution -swift-version 5 -parse-as-library +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module %S/Inputs/spi_helper.swift -module-name SPIHelper -emit-module-path %t/SPIHelper.swiftmodule -emit-module-interface-path %t/SPIHelper.swiftinterface -emit-private-module-interface-path %t/SPIHelper.private.swiftinterface -enable-library-evolution -swift-version 5 -parse-as-library /// Reading from swiftmodule -// RUN: %target-typecheck-verify-swift -I %t -verify-ignore-unknown +// RUN: %target-typecheck-verify-swift -enable-experimental-prespecialization -I %t -verify-ignore-unknown /// Reading from .private.swiftinterface // RUN: rm %t/SPIHelper.swiftmodule -// RUN: %target-typecheck-verify-swift -I %t -verify-ignore-unknown +// RUN: %target-typecheck-verify-swift -enable-experimental-prespecialization -I %t -verify-ignore-unknown //// Reading from .swiftinterface should fail as it won't find the decls // RUN: rm %t/SPIHelper.private.swiftinterface -// RUN: not %target-swift-frontend -typecheck -I %t +// RUN: not %target-swift-frontend -enable-experimental-prespecialization -typecheck -I %t @_spi(HelperSPI) import SPIHelper diff --git a/test/SPI/spi_symbols.swift b/test/SPI/spi_symbols.swift index 8e44a754994b9..d4e7d7d6d338b 100644 --- a/test/SPI/spi_symbols.swift +++ b/test/SPI/spi_symbols.swift @@ -1,7 +1,7 @@ // REQUIRES: VENDOR=apple // RUN: %empty-directory(%t) -// RUN: %target-swift-frontend %S/Inputs/spi_helper.swift -emit-ir -o %t/spi_helper.ll -emit-tbd-path %t/spi_helper.tbd -module-name spi_helper +// RUN: %target-swift-frontend -enable-experimental-prespecialization %S/Inputs/spi_helper.swift -emit-ir -o %t/spi_helper.ll -emit-tbd-path %t/spi_helper.tbd -module-name spi_helper // RUN: cat %t/spi_helper.ll | %FileCheck -check-prefix=CHECK-IR %s // RUN: cat %t/spi_helper.tbd | %FileCheck -check-prefix=CHECK-TBD %s diff --git a/test/SPI/warn_on_ineffective_spi_import.swift b/test/SPI/warn_on_ineffective_spi_import.swift new file mode 100644 index 0000000000000..323013f838b78 --- /dev/null +++ b/test/SPI/warn_on_ineffective_spi_import.swift @@ -0,0 +1,19 @@ +/// Test the warning on an SPI import of the public interface of a module. + +// RUN: %empty-directory(%t) + +/// Compile the SPI lib. +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-module %S/Inputs/spi_helper.swift -module-name SPIHelper -emit-module-path %t/SPIHelper.swiftmodule -emit-module-interface-path %t/SPIHelper.swiftinterface -emit-private-module-interface-path %t/SPIHelper.private.swiftinterface -enable-library-evolution -swift-version 5 -parse-as-library + +/// Reading from swiftmodule, no warning. +// RUN: %target-swift-frontend -typecheck %s -I %t + +/// Reading from .private.swiftinterface, no warning. +// RUN: rm %t/SPIHelper.swiftmodule +// RUN: %target-swift-frontend -typecheck %s -I %t + +/// Reading from the public .swiftinterface should produce the warning. +// RUN: rm %t/SPIHelper.private.swiftinterface +// RUN: %target-typecheck-verify-swift -I %t + +@_spi(SPIHelper) import SPIHelper // expected-warning {{'@_spi' import of 'SPIHelper' will not include any SPI symbols; 'SPIHelper' was built from the public interface at}} diff --git a/test/Serialization/Inputs/non-modular-header.h b/test/Serialization/Inputs/non-modular-header.h new file mode 100644 index 0000000000000..e6e1274b245ec --- /dev/null +++ b/test/Serialization/Inputs/non-modular-header.h @@ -0,0 +1,3 @@ +typedef struct { + int x; +} PlaceholderType; diff --git a/test/Serialization/async.swift b/test/Serialization/async.swift index ee56ec7288d35..d0275a936788b 100644 --- a/test/Serialization/async.swift +++ b/test/Serialization/async.swift @@ -9,5 +9,5 @@ import def_async func testDoSomethingBig() { - let _: () -> Int = doSomethingBig // expected-error{{cannot convert value of type '() async -> Int' to specified type '() -> Int'}} + let _: () -> Int = doSomethingBig // expected-error{{invalid conversion from 'async' function of type '() async -> Int' to synchronous function type '() -> Int'}} } diff --git a/test/Serialization/attr-actorindependent.swift b/test/Serialization/attr-actorindependent.swift index a2a7809db1888..f07c51599cff2 100644 --- a/test/Serialization/attr-actorindependent.swift +++ b/test/Serialization/attr-actorindependent.swift @@ -18,7 +18,6 @@ // MODULE-CHECK-NEXT: @actorIndependent var count: Int // MODULE-CHECK-NEXT: var actorCount: Int // MODULE-CHECK-NEXT: @actorIndependent(unsafe) func enqueue(partialTask: PartialAsyncTask) -// MODULE-CHECK-NEXT: var $__actor_storage: _DefaultActorQueue // MODULE-CHECK-NEXT: init() // MODULE-CHECK-NEXT: } diff --git a/test/Serialization/non-modular-clang-type.swift b/test/Serialization/non-modular-clang-type.swift new file mode 100644 index 0000000000000..a56215db7b6b0 --- /dev/null +++ b/test/Serialization/non-modular-clang-type.swift @@ -0,0 +1,18 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) %s -enable-testing -module-name NonModularApp -emit-module -o %t/NonModularApp.swiftmodule -import-objc-header %S/Inputs/non-modular-header.h -DNON_MODULAR_APP -use-clang-function-types +// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) %s -typecheck -module-name NonModularAppTest -I %t -DNON_MODULAR_APP_TEST + +// CHECK: Clang function type is not serializable + +#if NON_MODULAR_APP +import ctypes +struct S { + static func f(_ : @convention(c, cType: "void (*)(PlaceholderType, size_t)") (PlaceholderType, Int) -> ()) {} +} +#endif + +#if NON_MODULAR_APP_TEST +@testable import NonModularApp + +S.f({ _, _ in }) +#endif diff --git a/test/SourceKit/CodeComplete/complete_missing_files.swift b/test/SourceKit/CodeComplete/complete_missing_files.swift new file mode 100644 index 0000000000000..18861369c1bc3 --- /dev/null +++ b/test/SourceKit/CodeComplete/complete_missing_files.swift @@ -0,0 +1,3 @@ +// RUN: %sourcekitd-test -req=complete -pos=1:1 %s -- /tmp/SOMETHING_DOES_NOT_EXIST_1.swift %s /tmp/SOMETHING_DOES_NOT_EXIST_2.swift | %FileCheck %s + +// CHECK: results: [ diff --git a/test/Syntax/Outputs/round_trip_parse_gen.swift.withkinds b/test/Syntax/Outputs/round_trip_parse_gen.swift.withkinds index 5bb514fb29d4b..cefe835634683 100644 --- a/test/Syntax/Outputs/round_trip_parse_gen.swift.withkinds +++ b/test/Syntax/Outputs/round_trip_parse_gen.swift.withkinds @@ -1,12 +1,12 @@ // RUN: rm -rf %t -// RUN: %swift-syntax-test -input-source-filename %s -parse-gen > %t +// RUN: %swift-syntax-test -enable-experimental-prespecialization -input-source-filename %s -parse-gen > %t // RUN: diff -u %s %t -// RUN: %swift-syntax-test -input-source-filename %s -parse-gen -print-node-kind > %t.withkinds +// RUN: %swift-syntax-test -enable-experimental-prespecialization -input-source-filename %s -parse-gen -print-node-kind > %t.withkinds // RUN: diff -u %S/Outputs/round_trip_parse_gen.swift.withkinds %t.withkinds -// RUN: %swift-syntax-test -input-source-filename %s -eof > %t +// RUN: %swift-syntax-test -enable-experimental-prespecialization -input-source-filename %s -eof > %t // RUN: diff -u %s %t -// RUN: %swift-syntax-test -serialize-raw-tree -input-source-filename %s > %t.dump -// RUN: %swift-syntax-test -deserialize-raw-tree -input-source-filename %t.dump -output-filename %t +// RUN: %swift-syntax-test -enable-experimental-prespecialization -serialize-raw-tree -input-source-filename %s > %t.dump +// RUN: %swift-syntax-test -enable-experimental-prespecialization -deserialize-raw-tree -input-source-filename %t.dump -output-filename %t // RUN: diff -u %s %t import ABC diff --git a/test/Syntax/round_trip_parse_gen.swift b/test/Syntax/round_trip_parse_gen.swift index 66ab03e9195b1..5fbd71ee204db 100644 --- a/test/Syntax/round_trip_parse_gen.swift +++ b/test/Syntax/round_trip_parse_gen.swift @@ -1,12 +1,12 @@ // RUN: rm -rf %t -// RUN: %swift-syntax-test -input-source-filename %s -parse-gen > %t +// RUN: %swift-syntax-test -enable-experimental-prespecialization -input-source-filename %s -parse-gen > %t // RUN: diff -u %s %t -// RUN: %swift-syntax-test -input-source-filename %s -parse-gen -print-node-kind > %t.withkinds +// RUN: %swift-syntax-test -enable-experimental-prespecialization -input-source-filename %s -parse-gen -print-node-kind > %t.withkinds // RUN: diff -u %S/Outputs/round_trip_parse_gen.swift.withkinds %t.withkinds -// RUN: %swift-syntax-test -input-source-filename %s -eof > %t +// RUN: %swift-syntax-test -enable-experimental-prespecialization -input-source-filename %s -eof > %t // RUN: diff -u %s %t -// RUN: %swift-syntax-test -serialize-raw-tree -input-source-filename %s > %t.dump -// RUN: %swift-syntax-test -deserialize-raw-tree -input-source-filename %t.dump -output-filename %t +// RUN: %swift-syntax-test -enable-experimental-prespecialization -serialize-raw-tree -input-source-filename %s > %t.dump +// RUN: %swift-syntax-test -enable-experimental-prespecialization -deserialize-raw-tree -input-source-filename %t.dump -output-filename %t // RUN: diff -u %s %t import ABC diff --git a/test/TBD/specialize_verify.swift b/test/TBD/specialize_verify.swift index e7942965a5c73..c7a72a0ee18cb 100644 --- a/test/TBD/specialize_verify.swift +++ b/test/TBD/specialize_verify.swift @@ -1,5 +1,5 @@ // REQUIRES: VENDOR=apple -// RUN: %target-swift-frontend -emit-ir -o/dev/null -O -module-name test -validate-tbd-against-ir=missing %s +// RUN: %target-swift-frontend -enable-experimental-prespecialization -emit-ir -o/dev/null -O -module-name test -validate-tbd-against-ir=missing %s @_specialize(exported: true, where T == Float) public func foo(_ x : T) -> T { diff --git a/test/attr/ApplicationMain/attr_main_throws_prints_error.swift b/test/attr/ApplicationMain/attr_main_throws_prints_error.swift index 2333737782256..0796a14233815 100644 --- a/test/attr/ApplicationMain/attr_main_throws_prints_error.swift +++ b/test/attr/ApplicationMain/attr_main_throws_prints_error.swift @@ -9,7 +9,7 @@ enum Err : Error { case or } -// CHECK: Fatal error: Error raised at top level: main.Err.or: +// CHECK: Fatal error: Error raised at top level: main.Err.or @main struct S { static func main() throws { diff --git a/test/attr/attr_prespecialize_experimental.swift b/test/attr/attr_prespecialize_experimental.swift new file mode 100644 index 0000000000000..bfe9a7e924705 --- /dev/null +++ b/test/attr/attr_prespecialize_experimental.swift @@ -0,0 +1,4 @@ +// RUN: %target-typecheck-verify-swift + +@_specialize(exported: true, where T == Int) // expected-error{{'exported: true' has no effect in '_specialize' attribute}} +public func myGenericFunc(_ t: T) {} diff --git a/test/attr/attr_specialize.swift b/test/attr/attr_specialize.swift index 9c329e603ce5a..2ee6b79f38893 100644 --- a/test/attr/attr_specialize.swift +++ b/test/attr/attr_specialize.swift @@ -1,5 +1,5 @@ -// RUN: %target-typecheck-verify-swift -// RUN: %target-swift-ide-test -print-ast-typechecked -source-filename=%s -disable-objc-attr-requires-foundation-module | %FileCheck %s +// RUN: %target-typecheck-verify-swift -enable-experimental-prespecialization +// RUN: %target-swift-ide-test -enable-experimental-prespecialization -print-ast-typechecked -source-filename=%s -disable-objc-attr-requires-foundation-module | %FileCheck %s struct S {} diff --git a/test/cmake/modules/SwiftTestUtils.cmake b/test/cmake/modules/SwiftTestUtils.cmake new file mode 100644 index 0000000000000..8be32b23c84f9 --- /dev/null +++ b/test/cmake/modules/SwiftTestUtils.cmake @@ -0,0 +1,37 @@ +# SwiftTestUtils.cmake +# +# Utility functions for Swift testing targets + +# Get the possible build flavors for testing +function(get_swift_test_build_flavors build_flavors_out_var sdk) + set(build_flavors "default") + if(SWIFT_ENABLE_MACCATALYST AND "${sdk}" STREQUAL "OSX") + list(APPEND build_flavors "ios-like") + endif() + + set(${build_flavors_out_var} ${build_flavors} PARENT_SCOPE) +endfunction() + +# Get the variant suffix for test targets and folders +function(get_swift_test_variant_suffix variant_suffix_out_var sdk arch build_flavor) + if(build_flavor STREQUAL "ios-like") + set(variant_suffix "-${SWIFT_SDK_${sdk}_LIB_SUBDIR}-maccatalyst-${arch}") + else() + set(variant_suffix "-${SWIFT_SDK_${sdk}_LIB_SUBDIR}-${arch}") + endif() + + set(${variant_suffix_out_var} "${variant_suffix}" PARENT_SCOPE) +endfunction() + + +# Get the variant triple for test targets +function(get_swift_test_versioned_target_triple variant_triple_out_var sdk arch build_flavor) + if(build_flavor STREQUAL "ios-like") + # Use the macCatalyst target triple and compiler resources for the iOS-like build flavor. + set(variant_triple "${arch}-apple-ios13.0-macabi") + else() + get_versioned_target_triple(variant_triple ${sdk} ${arch} "${SWIFT_SDK_${sdk}_DEPLOYMENT_VERSION}") + endif() + + set(${variant_triple_out_var} "${variant_triple}" PARENT_SCOPE) +endfunction() diff --git a/test/decl/enum/enumtest.swift b/test/decl/enum/enumtest.swift index 3f5a450adf228..067b80a26ecfa 100644 --- a/test/decl/enum/enumtest.swift +++ b/test/decl/enum/enumtest.swift @@ -510,7 +510,24 @@ let _: GenericEnumWithStaticNone? = .none // expected-warning {{assuming yo // expected-note@-1 {{explicitly specify 'Optional' to silence this warning}}{{42-42=Optional}} // expected-note@-2 {{use 'GenericEnumWithStaticNone.none' instead}}{{42-42=GenericEnumWithStaticNone}} let _: GenericEnumWithStaticNone? = .none // Okay -let _: GenericEnumWithStaticNone? = .none // FIXME(SR-11535): This should be diagnosed + +let _: GenericEnumWithStaticNone? = .none // expected-warning {{assuming you mean 'GenericEnumWithStaticNone.none'; did you mean 'Optional>.none' instead?}} +// expected-note@-1 {{use 'Optional>.none' instead}} {{37-37=Optional>}} +// expected-note@-2 {{use 'GenericEnumWithStaticNone.none' instead}} {{37-37=GenericEnumWithStaticNone}} + +enum GenericStructWithStaticNone { + init() {} + static var none: GenericStructWithStaticNone { GenericStructWithStaticNone() } +} + +let _: GenericStructWithStaticNone? = .none // expected-warning {{assuming you mean 'Optional>.none'; did you mean 'GenericStructWithStaticNone.none' instead?}} +// expected-note@-1 {{explicitly specify 'Optional' to silence this warning}}{{44-44=Optional}} +// expected-note@-2 {{use 'GenericStructWithStaticNone.none' instead}}{{44-44=GenericStructWithStaticNone}} +let _: GenericStructWithStaticNone? = .none // Okay + +let _: GenericStructWithStaticNone? = .none // expected-warning {{assuming you mean 'GenericStructWithStaticNone.none'; did you mean 'Optional>.none' instead?}} +// expected-note@-1 {{use 'Optional>.none' instead}} {{39-39=Optional>}} +// expected-note@-2 {{use 'GenericStructWithStaticNone.none' instead}} {{39-39=GenericStructWithStaticNone}} enum GenericEnumWithoutNone { case a diff --git a/test/decl/func/async.swift b/test/decl/func/async.swift index 53a20f0a395a9..4f1b1db55cf12 100644 --- a/test/decl/func/async.swift +++ b/test/decl/func/async.swift @@ -6,6 +6,9 @@ func redecl1() async { } // expected-note{{previously declared here}} func redecl1() async throws { } // expected-error{{invalid redeclaration of 'redecl1()'}} +func redecl2() -> String { "" } // expected-note{{previously declared here}} +func redecl2() async -> String { "" } // expected-error{{invalid redeclaration of 'redecl2()'}} + // Override checking class Super { @@ -30,9 +33,9 @@ struct ConformsToP1: P1 { // expected-error{{type 'ConformsToP1' does not confor } protocol P2 { - func f() async // expected-note{{protocol requires function 'f()' with type '() async -> ()'; do you want to add a stub?}} + func f() async } -struct ConformsToP2: P2 { // expected-error{{type 'ConformsToP2' does not conform to protocol 'P2'}} - func f() { } // expected-note{{candidate is not 'async', but protocol requirement is}} +struct ConformsToP2: P2 { + func f() { } // okay } diff --git a/test/decl/func/dynamic_self.swift b/test/decl/func/dynamic_self.swift index 0203c5f1e0f97..ef2fffc5c49d8 100644 --- a/test/decl/func/dynamic_self.swift +++ b/test/decl/func/dynamic_self.swift @@ -44,7 +44,7 @@ extension P0 { } } -protocol P1: class { +protocol P1: AnyObject { func f() -> Self // okay func g(_ ds: Self) // okay @@ -85,7 +85,8 @@ class C1 { var x: Int = self // expected-error{{cannot convert value of type 'Self.Type' to specified type 'Int'}} // Can't utter Self within the body of a method. - var c1 = C1(int: 5) as Self // expected-error{{'C1' is not convertible to 'Self'; did you mean to use 'as!' to force downcast?}} + var c1 = C1(int: 5) as Self // expected-error{{'C1' is not convertible to 'Self'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{25-27=as!}} if b { return self.init(int: 5) } diff --git a/test/decl/inherit/inherit_anyobject_protocol.swift b/test/decl/inherit/inherit_anyobject_protocol.swift new file mode 100644 index 0000000000000..33675129c001b --- /dev/null +++ b/test/decl/inherit/inherit_anyobject_protocol.swift @@ -0,0 +1,7 @@ +// RUN: %target-typecheck-verify-swift + +protocol P: class {} +protocol P1: AnyObject {} +protocol P2 {} +protocol P3: class, P2 {} +protocol P4: P2, class {} // expected-error {{'class' must come first in the requirement list}} diff --git a/test/decl/inherit/inherit_anyobject_protocol_swift5.swift b/test/decl/inherit/inherit_anyobject_protocol_swift5.swift new file mode 100644 index 0000000000000..84ef9368880fc --- /dev/null +++ b/test/decl/inherit/inherit_anyobject_protocol_swift5.swift @@ -0,0 +1,10 @@ +// RUN: %target-typecheck-verify-swift -swift-version 5 + +protocol P: class {} +// expected-warning@-1 {{using 'class' keyword for protocol inheritance is deprecated; use 'AnyObject' instead}} {{13-18=AnyObject}} +protocol P1: AnyObject {} +protocol P2 {} +protocol P3: class, P2 {} +// expected-warning@-1 {{using 'class' keyword for protocol inheritance is deprecated; use 'AnyObject' instead}} {{14-19=AnyObject}} +protocol P4: P2, class {} // expected-error {{'class' must come first in the requirement list}} +// expected-warning@-1 {{using 'class' keyword for protocol inheritance is deprecated; use 'AnyObject' instead}} {{18-23=AnyObject}} diff --git a/test/decl/nested/type_in_function.swift b/test/decl/nested/type_in_function.swift index be79256cd8ced..7fa1c325d4b60 100644 --- a/test/decl/nested/type_in_function.swift +++ b/test/decl/nested/type_in_function.swift @@ -98,7 +98,8 @@ class OuterGenericClass { func f5(x: T, y: U) { struct Local { // expected-error {{type 'Local' cannot be nested in generic function 'f5(x:y:)'}} func f() { - _ = 17 as T // expected-error{{'Int' is not convertible to 'T'}} {{14-16=as!}} + _ = 17 as T // expected-error{{'Int' is not convertible to 'T'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{14-16=as!}} _ = 17 as U // okay: refers to 'U' declared within the local class } typealias U = Int diff --git a/test/decl/var/async_let.swift b/test/decl/var/async_let.swift index c0df6346713bf..dfac654771128 100644 --- a/test/decl/var/async_let.swift +++ b/test/decl/var/async_let.swift @@ -2,7 +2,10 @@ // REQUIRES: concurrency -async let x = 1 // okay +func test() async { + async let x = 1 // okay + _ = await x +} struct X { async let x = 1 // expected-error{{'async let' can only be used on local declarations}} diff --git a/test/expr/cast/as_coerce.swift b/test/expr/cast/as_coerce.swift index 165d7b9eebafb..71670135ad949 100644 --- a/test/expr/cast/as_coerce.swift +++ b/test/expr/cast/as_coerce.swift @@ -72,13 +72,15 @@ var c: AnyObject = C3() //if let castX = c as! C4? {} // XXX TODO: Only suggest replacing 'as' with 'as!' if it would fix the error. -C3() as C4 // expected-error {{'C3' is not convertible to 'C4'; did you mean to use 'as!' to force downcast?}} {{6-8=as!}} +C3() as C4 // expected-error {{'C3' is not convertible to 'C4'}} +// expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{6-8=as!}} C3() as C5 // expected-error {{cannot convert value of type 'C3' to type 'C5' in coercion}} // Diagnostic shouldn't include @lvalue in type of c3. var c3 = C3() // XXX TODO: This should not suggest `as!` -c3 as C4 // expected-error {{'C3' is not convertible to 'C4'; did you mean to use 'as!' to force downcast?}} {{4-6=as!}} +c3 as C4 // expected-error {{'C3' is not convertible to 'C4'}} +// expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{4-6=as!}} // Various incorrect diagnostics for explicit type conversions 1 as Double as Float // expected-error{{cannot convert value of type 'Double' to type 'Float' in coercion}} @@ -102,12 +104,14 @@ _ = "hello" as! String // expected-warning{{forced cast of 'String' to same type // QoI: Nimble as -> as! changes not covered by Fix-Its func f(_ x : String) {} -f("what" as Any as String) // expected-error {{'Any' is not convertible to 'String'; did you mean to use 'as!' to force downcast?}} {{17-19=as!}} +f("what" as Any as String) // expected-error {{'Any' is not convertible to 'String'}} +// expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{17-19=as!}} f(1 as String) // expected-error{{cannot convert value of type 'Int' to type 'String' in coercion}} // Swift compiler segfaults while running the annotation tests let s : AnyObject = C3() -s as C3 // expected-error{{'AnyObject' is not convertible to 'C3'; did you mean to use 'as!' to force downcast?}} {{3-5=as!}} +s as C3 // expected-error{{'AnyObject' is not convertible to 'C3'}} +// expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{3-5=as!}} // SR-6022 func sr6022() -> Any { return 0 } @@ -137,3 +141,16 @@ _ = sr6022 as! AnyObject // expected-warning {{forced cast from '() -> Any' to ' _ = sr6022 as? AnyObject // expected-warning {{conditional cast from '() -> Any' to 'AnyObject' always succeeds}} _ = sr6022_1 as! Any // expected-warning {{forced cast from '() -> ()' to 'Any' always succeeds; did you mean to use 'as'?}} _ = sr6022_1 as? Any // expected-warning {{conditional cast from '() -> ()' to 'Any' always succeeds}} + +// SR-13899 +let any: Any = 1 +if let int = any as Int { // expected-error {{'Any' is not convertible to 'Int'}} +// expected-note@-1 {{did you mean to use 'as?' to conditionally downcast?}} {{18-20=as?}} +} + +let _ = any as Int // expected-error {{'Any' is not convertible to 'Int'}} +// expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{13-15=as!}} +let _: Int = any as Int // expected-error {{'Any' is not convertible to 'Int'}} +// expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{18-20=as!}} +let _: Int? = any as Int // expected-error {{'Any' is not convertible to 'Int'}} +// expected-note@-1 {{did you mean to use 'as?' to conditionally downcast?}} {{19-21=as?}} diff --git a/test/expr/cast/cf.swift b/test/expr/cast/cf.swift index d54966ec5b92d..e06c35e31ff18 100644 --- a/test/expr/cast/cf.swift +++ b/test/expr/cast/cf.swift @@ -36,7 +36,8 @@ func testCFToNative(_ cfStr: CFString, cfMutableStr: CFMutableString) { func testNativeToCF(_ str: String) { var cfStr = str as CFString - var cfMutableStr = str as CFMutableString // expected-error{{'String' is not convertible to 'CFMutableString'}} {{26-28=as!}} + var cfMutableStr = str as CFMutableString // expected-error{{'String' is not convertible to 'CFMutableString'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{26-28=as!}} } func testCFToAnyObject(_ cfStr: CFString, cfMutableStr: CFMutableString, @@ -53,7 +54,8 @@ func testAnyObjectToCF(_ anyObject: AnyObject) { var _: CFTree = anyObject as! CFTree // No implicit conversions. - cfStr = anyObject // expected-error{{'AnyObject' is not convertible to 'CFString'; did you mean to use 'as!' to force downcast?}} {{20-20= as! CFString}} + cfStr = anyObject // expected-error{{'AnyObject' is not convertible to 'CFString'}} + // expected-note@-1{{did you mean to use 'as!' to force downcast?}} {{20-20= as! CFString}} _ = cfStr } diff --git a/test/expr/delayed-ident/optional_overload.swift b/test/expr/delayed-ident/optional_overload.swift new file mode 100644 index 0000000000000..cd2bfc0073b65 --- /dev/null +++ b/test/expr/delayed-ident/optional_overload.swift @@ -0,0 +1,47 @@ +// RUN: %target-typecheck-verify-swift -dump-ast > %t.dump +// RUN: %FileCheck %s < %t.dump + +// SR-13815 +extension Optional { + func sr13815() -> SR13815? { SR13815() } + static func sr13815_2() -> SR13815? { SR13815() } + static func sr13815_3() -> SR13815? { SR13815() } + static var sr13815_wrongType: Int { 0 } + static var sr13815_overload: SR13815 { SR13815() } + init(overloaded: Void) { self = nil } +} + +struct SR13815 { + static var sr13815: SR13815? = SR13815() + static var sr13815_2: SR13815? = SR13815() + static var sr13815_wrongType: SR13815? { SR13815() } + static var p_SR13815: SR13815? { SR13815() } + static func sr13815_3() -> SR13815? { SR13815() } + static var sr13815_overload: SR13815? { SR13815() } + init(overloaded: Void) {} + init?(failable: Void) {} + init() {} +} + +protocol P_SR13815 {} +extension Optional: P_SR13815 where Wrapped: Equatable { + static func p_SR13815() {} +} + +let _: SR13815? = .sr13815 +let _: SR13815? = .sr13815_wrongType +let _: SR13815? = .init() +let _: SR13815? = .sr13815() // expected-error {{instance member 'sr13815' cannot be used on type 'SR13815?'}} +let _: SR13815? = .sr13815_2() +let _: SR13815? = .init(SR13815()) +let _: SR13815? = .init(overloaded: ()) +// If members exist on Optional and Wrapped, always choose the one on optional +// CHECK: declref_expr {{.*}} location={{.*}}optional_overload.swift:37 +// CHECK-SAME: decl=optional_overload.(file).Optional extension.init(overloaded:) +let _: SR13815? = .sr13815_overload +// Should choose the overload from Optional even if the Wrapped overload would otherwise have a better score +// CHECK: member_ref_expr {{.*}} location={{.*}}optional_overload.swift:41 +// CHECK-SAME: decl=optional_overload.(file).Optional extension.sr13815_overload +let _: SR13815? = .init(failable: ()) +let _: SR13815? = .sr13815_3() +let _: SR13815? = .p_SR13815 diff --git a/test/expr/expressions.swift b/test/expr/expressions.swift index 308a52f5b43c7..810a77116160b 100644 --- a/test/expr/expressions.swift +++ b/test/expr/expressions.swift @@ -841,7 +841,7 @@ func inoutTests(_ arr: inout Int) { inoutTests((&x, 24).0) // expected-error {{use of extraneous '&'}} - inoutTests((&x)) // expected-error {{use of extraneous '&'}} + inoutTests((&x)) // expected-error {{use of extraneous '&'}} {{15-16=(}} {{14-15=&}} inoutTests(&x) // inout not rejected as operand to assignment operator diff --git a/test/expr/postfix/dot/init_ref_delegation.swift b/test/expr/postfix/dot/init_ref_delegation.swift index e5ffb0be4d804..c0d5b23e45dfc 100644 --- a/test/expr/postfix/dot/init_ref_delegation.swift +++ b/test/expr/postfix/dot/init_ref_delegation.swift @@ -272,10 +272,10 @@ func foo(_ x: T, y: T.Type) where T: P { var ci4 = x.init(proto: "") // expected-error{{'init' is a member of the type; use 'type(of: ...)' to initialize a new object of the same dynamic type}} {{13-13=type(of: }} {{14-14=)}} var z = x - z.init(required: 0) // expected-error {{'init' is a member of the type; use assignment to initalize the value instead}} {{4-4= = }} - z.init(x: 0) // expected-error {{'init' is a member of the type; use assignment to initalize the value instead}} {{4-4= = }} - z.init() // expected-error {{'init' is a member of the type; use assignment to initalize the value instead}} {{4-4= = }} - z.init(proto: "") // expected-error {{'init' is a member of the type; use assignment to initalize the value instead}} {{4-4= = }} + z.init(required: 0) // expected-error {{'init' is a member of the type; use assignment to initialize the value instead}} {{4-4= = }} + z.init(x: 0) // expected-error {{'init' is a member of the type; use assignment to initialize the value instead}} {{4-4= = }} + z.init() // expected-error {{'init' is a member of the type; use assignment to initialize the value instead}} {{4-4= = }} + z.init(proto: "") // expected-error {{'init' is a member of the type; use assignment to initialize the value instead}} {{4-4= = }} var ci1a = z.init(required: 0) // expected-error {{'init' is a member of the type; use 'type(of: ...)' to initialize a new object of the same dynamic type}} {{14-14=type(of: }} {{15-15=)}} var ci2a = z.init(x: 0) // expected-error {{'init' is a member of the type; use 'type(of: ...)' to initialize a new object of the same dynamic type}} {{14-14=type(of: }} {{15-15=)}} @@ -520,7 +520,7 @@ struct Y { let x2: X init() { - x.init() // expected-error {{'init' is a member of the type; use assignment to initalize the value instead}} {{6-6= = }} + x.init() // expected-error {{'init' is a member of the type; use assignment to initialize the value instead}} {{6-6= = }} foo(x.init()) // expected-error {{'init' is a member of the type; use 'type(of: ...)' to initialize a new object of the same dynamic type}} {{9-9=type(of: }} {{10-10=)}} } @@ -537,7 +537,7 @@ struct MultipleMemberAccesses { init() { y = Y() y2 = Y() - y.x.init() // expected-error {{'init' is a member of the type; use assignment to initalize the value instead}} {{8-8= = }} + y.x.init() // expected-error {{'init' is a member of the type; use assignment to initialize the value instead}} {{8-8= = }} y2.x2.init() // expected-error {{'init' is a member of the type; use 'type(of: ...)' to initialize a new object of the same dynamic type}} {{5-5=type(of: }} {{10-10=)}} } } diff --git a/test/expr/unary/async_await.swift b/test/expr/unary/async_await.swift index 3f12e52815a5c..138ee830ba514 100644 --- a/test/expr/unary/async_await.swift +++ b/test/expr/unary/async_await.swift @@ -65,14 +65,14 @@ func testClosure() { await getInt() } - let _: () -> Int = closure // expected-error{{cannot convert value of type '() async -> Int' to specified type '() -> Int'}} + let _: () -> Int = closure // expected-error{{invalid conversion from 'async' function of type '() async -> Int' to synchronous function type '() -> Int'}} let closure2 = { () async -> Int in print("here") return await getInt() } - let _: () -> Int = closure2 // expected-error{{cannot convert value of type '() async -> Int' to specified type '() -> Int'}} + let _: () -> Int = closure2 // expected-error{{invalid conversion from 'async' function of type '() async -> Int' to synchronous function type '() -> Int'}} } // Nesting async and await together @@ -84,7 +84,7 @@ enum HomeworkError : Error { func testThrowingAndAsync() async throws { _ = await try throwingAndAsync() - _ = try await throwingAndAsync() + _ = try await throwingAndAsync() // expected-error{{'await' must precede 'try'}}{{11-17=}}{{7-7=await }} _ = await (try throwingAndAsync()) _ = try (await throwingAndAsync()) @@ -136,13 +136,12 @@ func testStringInterpolation() async throws { _ = await "Eventually produces \(getInt())" } -// Make sure try await works too func invalidAsyncFunction() async { - _ = try await throwingAndAsync() // expected-error {{errors thrown from here are not handled}} + _ = await try throwingAndAsync() // expected-error {{errors thrown from here are not handled}} } func validAsyncFunction() async throws { - _ = try await throwingAndAsync() + _ = await try throwingAndAsync() } // Async let checking diff --git a/test/lit.cfg b/test/lit.cfg index e7031a7741122..dee3a4a5965d9 100644 --- a/test/lit.cfg +++ b/test/lit.cfg @@ -354,6 +354,7 @@ if run_os == 'openbsd' and run_cpu == 'amd64': run_ptrsize = '64' if ('64' in run_cpu or run_cpu == "s390x") else '32' run_ptrauth = 'ptrauth' if run_cpu == 'arm64e' else 'noptrauth' run_endian = 'little' if run_cpu != 's390x' else 'big' +run_objc_interop = 'nonobjc' # overwritten later sdk_overlay_link_path = "" sdk_overlay_linker_opt = "" @@ -436,8 +437,8 @@ if kIsWindows: config.swift_test_options, config.swift_driver_test_options)) ) else: - config.substitutions.append( ('%swift_driver', "env SDKROOT=%s %r %s %s %s" % (config.variant_sdk, config.swift, mcp_opt, config.swift_test_options, config.swift_driver_test_options)) ) - config.substitutions.append( ('%swiftc_driver', "env SDKROOT=%s %r -toolchain-stdlib-rpath -Xlinker -rpath -Xlinker /usr/lib/swift %s %s %s" % (config.variant_sdk, config.swiftc, mcp_opt, config.swift_test_options, config.swift_driver_test_options)) ) + config.substitutions.append( ('%swift_driver', "env SDKROOT=%s %r %s %s %s" % (shell_quote(config.variant_sdk), config.swift, mcp_opt, config.swift_test_options, config.swift_driver_test_options)) ) + config.substitutions.append( ('%swiftc_driver', "env SDKROOT=%s %r -toolchain-stdlib-rpath -Xlinker -rpath -Xlinker /usr/lib/swift %s %s %s" % (shell_quote(config.variant_sdk), config.swiftc, mcp_opt, config.swift_test_options, config.swift_driver_test_options)) ) config.substitutions.append( ('%sil-opt', "%r %s %s" % (config.sil_opt, mcp_opt, config.sil_test_options)) ) config.substitutions.append( ('%sil-func-extractor', "%r %s" % (config.sil_func_extractor, mcp_opt)) ) config.substitutions.append( ('%sil-llvm-gen', "%r %s" % (config.sil_llvm_gen, mcp_opt)) ) @@ -848,6 +849,7 @@ if run_vendor == 'apple': config.available_features.add('libdispatch') config.available_features.add('foundation') config.available_features.add('objc_interop') + run_objc_interop = "objc" # The "freestanding" tests will link against the static libswiftCore.a and # cannot use any of Obj-C / Dispatch / Foundation. @@ -1298,7 +1300,7 @@ elif (run_os in ['linux-gnu', 'linux-gnueabihf', 'freebsd', 'openbsd', 'windows- config.target_ld = "ld -L%r" % (make_path(test_resource_dir, config.target_sdk_name)) elif run_os == 'linux-androideabi' or run_os == 'linux-android': # The module triple for Android ARMv7 seems to be canonicalized in LLVM - # to be armv7-none-linux-android, without the "eabi" bit. Let's remove the + # to be armv7-unknown-linux-android, without the "eabi" bit. Let's remove the # same bit from the substitutions so the tests pass correctly. target_specific_module_triple = re.sub(r'androideabi', 'android', target_specific_module_triple) @@ -1671,7 +1673,6 @@ if config.target_sdk_name in simulator_sdks: else: config.substitutions.append(('%target-is-simulator', 'false')) - config.compiler_rt_libs = [] config.compiler_rt_platform = { 'iphoneos': 'ios', @@ -1725,6 +1726,8 @@ base_runtime_lib_name = ( if os.path.exists(make_path(compiler_rt_dir, base_runtime_lib_name)): config.available_features.add('c_runtime') +config.substitutions.append(('%target-objc-interop', run_objc_interop)) + # For testing the remote-run utility itself, see if we can find an sftp-server # binary. def find_sftp_server(): @@ -2048,8 +2051,7 @@ config.substitutions.append(('%llvm-cov', config.llvm_cov)) if hasattr(config, 'otool_classic'): config.substitutions.append(('%otool-classic', config.otool_classic)) -config.substitutions.append(('%FileCheck', - '%s %s --sanitize BUILD_DIR=%s --sanitize SOURCE_DIR=%s --use-filecheck %s %s' % ( +run_filecheck = '%s %s --sanitize BUILD_DIR=%s --sanitize SOURCE_DIR=%s --use-filecheck %s %s' % ( shell_quote(sys.executable), shell_quote(config.PathSanitizingFileCheck), # LLVM Lit performs realpath with the config path, so all paths are relative @@ -2061,7 +2063,9 @@ config.substitutions.append(('%FileCheck', shell_quote(os.path.realpath(swift_obj_root).replace("\\", "/")), shell_quote(os.path.realpath(config.swift_src_root).replace("\\", "/")), shell_quote(config.filecheck), - '--enable-windows-compatibility' if kIsWindows else ''))) + '--enable-windows-compatibility' if kIsWindows else '') + +config.substitutions.append(('%FileCheck', run_filecheck)) config.substitutions.append(('%raw-FileCheck', shell_quote(config.filecheck))) config.substitutions.append(('%import-libdispatch', getattr(config, 'import_libdispatch', ''))) @@ -2140,6 +2144,19 @@ else: config.substitutions.append( ('%diff', 'diff') ) config.substitutions.append( ('%long-tmp', '%t') ) +# A FileCheck that automatically supports a large variety of target +# conditionalization. +run_target_filecheck = '%s --check-prefix=CHECK --check-prefix=CHECK-%s --check-prefix=CHECK-%s --check-prefix=CHECK-%s --check-prefix=CHECK-%s --check-prefix=CHECK-%s --check-prefix=CHECK-%s -DINT=i%s' % ( + run_filecheck, + run_os, + run_cpu, + run_endian, + run_ptrsize, + run_ptrauth, + run_objc_interop, + run_ptrsize) +config.substitutions.append( ('%target-FileCheck', run_target_filecheck) ) + visual_studio_version = os.environ.get('VisualStudioVersion') if kIsWindows and visual_studio_version: config.available_features.add('MSVC_VER=%s' % visual_studio_version) diff --git a/test/stdlib/Builtins.swift b/test/stdlib/Builtins.swift index b5496c680082f..1be6786a41b9e 100644 --- a/test/stdlib/Builtins.swift +++ b/test/stdlib/Builtins.swift @@ -165,7 +165,7 @@ tests.test("array value witnesses") { expectEqual(NoisyLifeCount, NoisyDeathCount) } -protocol Classy : class {} +protocol Classy : AnyObject {} class A : Classy {} class B : A {} class C : B {} diff --git a/test/stdlib/Error.swift b/test/stdlib/Error.swift index 7c95c04c72481..d8f9b947b2442 100644 --- a/test/stdlib/Error.swift +++ b/test/stdlib/Error.swift @@ -15,7 +15,7 @@ protocol OtherProtocol { var otherProperty: String { get } } -protocol OtherClassProtocol : class { +protocol OtherClassProtocol : AnyObject { var otherClassProperty: String { get } } @@ -122,7 +122,7 @@ ErrorTests.test("try!/location") .skip(.custom({ _isFastAssertConfiguration() }, reason: "trap is not guaranteed to happen in -Ounchecked")) .crashOutputMatches(_isDebugAssertConfiguration() - ? "main/Error.swift, line 128" + ? "main/Error.swift:128" : "") .code { expectCrashLater() diff --git a/test/stdlib/ErrorBridged.swift b/test/stdlib/ErrorBridged.swift index b996dad5d213f..3ca707719b2bf 100644 --- a/test/stdlib/ErrorBridged.swift +++ b/test/stdlib/ErrorBridged.swift @@ -22,7 +22,7 @@ protocol OtherProtocol { var otherProperty: String { get } } -protocol OtherClassProtocol : class { +protocol OtherClassProtocol : AnyObject { var otherClassProperty: String { get } } diff --git a/test/stdlib/ForEachField.swift b/test/stdlib/ForEachField.swift index 5078278840dd4..e80375fafbd8b 100644 --- a/test/stdlib/ForEachField.swift +++ b/test/stdlib/ForEachField.swift @@ -101,6 +101,27 @@ struct ContainsObject { var obj: TestClass } +struct LetKeyPaths { + let int : Int + let double: Double +} + +protocol TestExisential {} + +struct KeyPathTypes { + weak var weakObj: TestClass? + unowned var unownedObj: TestClass + var obj: TestClass + var tuple: (Int, Int, Int) + var structField: Int + var function: (Int) -> (Int) + var optionalFunction: (Int) -> (Int)? + var enumField: TestEnum + var existential: TestExisential + var existentialMetatype: Any.Type + var metatype: Int.Type +} + #if _runtime(_ObjC) import Foundation @@ -141,6 +162,31 @@ func checkFields( expectEqual(fields.count, count) } +@available(macOS 9999, iOS 9999, tvOS 9999, watchOS 9999, *) +func checkFieldsWithKeyPath( + of type: T.Type, + options: _EachFieldOptions = [], + fields: [String: PartialKeyPath] +) { + var count = 0 + + _forEachFieldWithKeyPath(of: T.self, options: options) { + charPtr, keyPath in + count += 1 + + let fieldName = String(cString: charPtr) + guard let checkKeyPath = fields[fieldName] else { + expectTrue(false, "Unexpected field '\(fieldName)'") + return true + } + + expectTrue(checkKeyPath == keyPath) + return true + } + + expectEqual(fields.count, count) +} + protocol ExistentialProtocol {} extension TestStruct: ExistentialProtocol {} @@ -252,6 +298,53 @@ if #available(macOS 10.15.4, iOS 13.4, tvOS 13.4, watchOS 6.2, *) { }) } + if #available(macOS 9999, iOS 9999, tvOS 9999, watchOS 9999, *) { + tests.test("StructKeyPath") { + checkFieldsWithKeyPath( + of: TestStruct.self, + fields: [ + "int": \TestStruct.int, + "double": \TestStruct.double, + "bool": \TestStruct.bool, + ]) + } + + tests.test("LetKeyPaths") { + checkFieldsWithKeyPath( + of: LetKeyPaths.self, + fields: [ + "int": \LetKeyPaths.int, + "double": \LetKeyPaths.double, + ]) + } + + tests.test("KeyPathTypes") { + checkFieldsWithKeyPath( + of: KeyPathTypes.self, + options: .ignoreUnknown, + fields: [ + "obj": \KeyPathTypes.obj, + "tuple": \KeyPathTypes.tuple, + "structField": \KeyPathTypes.structField, + "enumField": \KeyPathTypes.enumField, + "existential": \KeyPathTypes.existential, + "existentialMetatype": \KeyPathTypes.existentialMetatype, + ]) + } + + tests.test("TupleKeyPath") { + typealias TestTuple = (Int, Int, TestClass, TestStruct) + checkFieldsWithKeyPath( + of: TestTuple.self, + fields: [ + ".0": \TestTuple.0, + ".1": \TestTuple.1, + ".2": \TestTuple.2, + ".3": \TestTuple.3, + ]) + } + } + func checkGenericStruct(_: T.Type) { let firstOffset = max(MemoryLayout.stride, MemoryLayout.alignment) diff --git a/test/stdlib/Inputs/CommonArrayTests.gyb b/test/stdlib/Inputs/CommonArrayTests.gyb index cd14aa47eedd9..c2b421c79dc26 100644 --- a/test/stdlib/Inputs/CommonArrayTests.gyb +++ b/test/stdlib/Inputs/CommonArrayTests.gyb @@ -5,6 +5,9 @@ % # - Suite -- an identifier for the test suite to append tests to. % # - ArrayType -- the type being tested. +// We use this global array to prevent ARC from eliminating temporary ARC +// traffic in nonUniqueCode below. It is only ever assigned to. +var globalArrayForNonUnique = ${ArrayType}() extension ${ArrayType} { typealias _BufferID = UnsafeRawPointer? @@ -107,7 +110,7 @@ ${Suite}.test("${ArrayType}/appendNonUnique") x.reserveCapacity(10002) let capacity = x.capacity for _ in 1...100 { - let y = x + globalArrayForNonUnique = x x.append(1) expectTrue(x.capacity == capacity) } @@ -120,7 +123,7 @@ ${Suite}.test("${ArrayType}/removeNonUnique") var x = ${ArrayType}(repeating: 27, count: 200) x.reserveCapacity(10002) for _ in 1...100 { - let y = x + globalArrayForNonUnique = x x.remove(at: 0) expectTrue(x.capacity < 1000) } @@ -133,7 +136,7 @@ ${Suite}.test("${ArrayType}/mutateNonUnique") var x = ${ArrayType}(repeating: 27, count: 200) x.reserveCapacity(10002) for _ in 1...100 { - let y = x + globalArrayForNonUnique = x x[0] = 0 expectTrue(x.capacity < 1000) } diff --git a/test/stdlib/Mirror.swift b/test/stdlib/Mirror.swift index c5a1501beb1ef..b3cc4f2f50efb 100644 --- a/test/stdlib/Mirror.swift +++ b/test/stdlib/Mirror.swift @@ -514,7 +514,7 @@ mirrors.test("struct/WrapNSArray") { // Check that Mirror correctly reflects weak/unowned refs to both // Swift and ObjC objects from Swift structs and classes. -protocol WeakUnownedTestsP1: class { +protocol WeakUnownedTestsP1: AnyObject { func f1() -> Int } diff --git a/test/stdlib/OptionalTraps.swift b/test/stdlib/OptionalTraps.swift index 9c489e9a021e5..421d2b171651a 100644 --- a/test/stdlib/OptionalTraps.swift +++ b/test/stdlib/OptionalTraps.swift @@ -37,7 +37,7 @@ OptionalTraps.test("UnwrapNone/location") { _isFastAssertConfiguration() }, reason: "this trap is not guaranteed to happen in -Ounchecked")) .crashOutputMatches(_isDebugAssertConfiguration() - ? "OptionalTraps.swift, line 45" + ? "OptionalTraps.swift:45:" : "") .code { expectCrashLater() diff --git a/test/stdlib/PrintFloat.swift.gyb b/test/stdlib/PrintFloat.swift.gyb index 6fff1e94ee3c8..482f7ab07d1e4 100644 --- a/test/stdlib/PrintFloat.swift.gyb +++ b/test/stdlib/PrintFloat.swift.gyb @@ -347,7 +347,7 @@ let PrintTests = TestSuite("FloatingPointPrinting") % for FloatType in ['Float16', 'Float', 'Double', 'Float80']: % if FloatType == 'Float16': -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) @available(macOS 11.0, iOS 14.0, watchOS 7.0, tvOS 14.0, *) % elif FloatType == 'Float80': #if !os(Windows) && (arch(i386) || arch(x86_64)) @@ -385,9 +385,7 @@ fileprivate func expectDescription(_ expected: String, _ object: ${FloatType}, // that is closest (as an infinitely-precise real number) to the original // binary float (interpreted as an infinitely-precise real number). % if FloatType == 'Float16': -@available(iOS 14.0, watchOS 7.0, tvOS 14.0, *) -@available(macOS, unavailable) -@available(macCatalyst, unavailable) +@available(macOS 11.0, iOS 14.0, watchOS 7.0, tvOS 14.0, *) % end fileprivate func expectAccurateDescription(_ object: ${FloatType}, _ message: @autoclosure () -> String = "", @@ -587,7 +585,7 @@ PrintTests.test("Printable_CDouble") { expectDescription("-1.0", CDouble(-1.0)) } -#if !(os(macOS) && arch(x86_64)) +#if !((os(macOS) || targetEnvironment(macCatalyst)) && arch(x86_64)) if #available(macOS 11.0, iOS 14.0, watchOS 7.0, tvOS 14.0, *) { PrintTests.test("Printable_Float16") { func asFloat16(_ f: Float16) -> Float16 { return f } diff --git a/test/stdlib/Reflection.swift b/test/stdlib/Reflection.swift index 24761d17fa70d..15a4f713767bf 100644 --- a/test/stdlib/Reflection.swift +++ b/test/stdlib/Reflection.swift @@ -118,7 +118,7 @@ print("Fooable double:") fooable = 2.5 dump(fooable) -protocol Barrable : class {} +protocol Barrable : AnyObject {} extension Best: Barrable {} // CHECK-LABEL: Barrable class: diff --git a/test/stdlib/Runtime.swift.gyb b/test/stdlib/Runtime.swift.gyb index e9b71a5186e49..02443c1aa41ce 100644 --- a/test/stdlib/Runtime.swift.gyb +++ b/test/stdlib/Runtime.swift.gyb @@ -536,7 +536,7 @@ Runtime.test("abstraction barrier on casting generic param bound to existential" class Malkovich: Malkovichable { var malkovich: String { return "malkovich" } } -protocol Malkovichable: class { +protocol Malkovichable: AnyObject { var malkovich: String { get } } diff --git a/test/stdlib/WeakMirror.swift b/test/stdlib/WeakMirror.swift index 02e88fdf33754..358d21ffe56b1 100644 --- a/test/stdlib/WeakMirror.swift +++ b/test/stdlib/WeakMirror.swift @@ -35,7 +35,7 @@ class NativeSwiftClass : NativeClassBoundExistential { } } -protocol NativeClassBoundExistential : class { +protocol NativeClassBoundExistential : AnyObject { var x: Int { get } } class NativeSwiftClassHasWeak { @@ -123,7 +123,7 @@ mirrors.test("class/NativeSwiftClassHasNativeWeakReferenceNoLeak") { import Foundation -@objc protocol ObjCClassExistential : class { +@objc protocol ObjCClassExistential : AnyObject { var weakProperty: AnyObject? { get set } var x: Int { get } } diff --git a/test/stmt/async.swift b/test/stmt/async.swift new file mode 100644 index 0000000000000..8519cb9cc37fb --- /dev/null +++ b/test/stmt/async.swift @@ -0,0 +1,10 @@ +// RUN: %target-typecheck-verify-swift -enable-experimental-concurrency + +// REQUIRES: concurrency + +func f() async -> Int { 0 } + +_ = await f() // expected-error{{'async' in a function that does not support concurrency}} + +async let y = await f() // expected-error{{'async let' in a function that does not support concurrency}} +// expected-error@-1{{'async' in a function that does not support concurrency}} diff --git a/test/type/subclass_composition.swift b/test/type/subclass_composition.swift index 76b1efb3fd850..c0d4ec55426f1 100644 --- a/test/type/subclass_composition.swift +++ b/test/type/subclass_composition.swift @@ -114,7 +114,8 @@ func basicSubtyping( // let _ = Unrelated() as AnyObject // let _ = Unrelated() as? AnyObject - let _ = anyObject as Unrelated // expected-error {{'AnyObject' is not convertible to 'Unrelated'; did you mean to use 'as!' to force downcast?}} + let _ = anyObject as Unrelated // expected-error {{'AnyObject' is not convertible to 'Unrelated'}} + //expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{21-23=as!}} let _ = anyObject as? Unrelated // No-ops @@ -199,13 +200,16 @@ func basicSubtyping( let _: Base & P2 = baseAndP2.protocolSelfReturn() // Downcasts - let _ = baseAndP2 as Derived // expected-error {{did you mean to use 'as!' to force downcast?}} + let _ = baseAndP2 as Derived //expected-error {{'Base & P2' is not convertible to 'Derived'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{21-23=as!}} let _ = baseAndP2 as? Derived - let _ = baseAndP2 as Derived & P3 // expected-error {{did you mean to use 'as!' to force downcast?}} + let _ = baseAndP2 as Derived & P3 // expected-error {{'Base & P2' is not convertible to 'Derived & P3'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} {{21-23=as!}} let _ = baseAndP2 as? Derived & P3 - let _ = base as Derived & P2 // expected-error {{did you mean to use 'as!' to force downcast?}} + let _ = base as Derived & P2 //expected-error {{'Base' is not convertible to 'Derived & P2'}} + // expected-note@-1 {{did you mean to use 'as!' to force downcast?}} let _ = base as? Derived & P2 // Invalid cases diff --git a/tools/SourceKit/tools/sourcekitd/bin/XPC/Service/XPCService.cpp b/tools/SourceKit/tools/sourcekitd/bin/XPC/Service/XPCService.cpp index e64538ce3f536..f9394664e33b9 100644 --- a/tools/SourceKit/tools/sourcekitd/bin/XPC/Service/XPCService.cpp +++ b/tools/SourceKit/tools/sourcekitd/bin/XPC/Service/XPCService.cpp @@ -222,6 +222,8 @@ static std::string getDiagnosticDocumentationPath() { return path.str().str(); } +static dispatch_queue_t msgHandlingQueue; + static void sourcekitdServer_peer_event_handler(xpc_connection_t peer, xpc_object_t event) { xpc_type_t type = xpc_get_type(event); @@ -245,8 +247,7 @@ static void sourcekitdServer_peer_event_handler(xpc_connection_t peer, assert(type == XPC_TYPE_DICTIONARY); // Handle the message xpc_retain(event); - dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), - ^{ + dispatch_async(msgHandlingQueue, ^{ xpc_object_t contents = xpc_dictionary_get_value(event, "msg"); if (!contents) { @@ -324,7 +325,7 @@ static void sourcekitdServer_event_handler(xpc_connection_t peer) { // you can defer this call until after that initialization is done. xpc_connection_resume(peer); - dispatch_async(dispatch_get_main_queue(), ^{ + dispatch_barrier_async(msgHandlingQueue, ^{ getInitializationInfo(MainConnection); }); } @@ -368,6 +369,10 @@ int main(int argc, const char *argv[]) { LOG_WARN_FUNC("getrlimit failed: " << llvm::sys::StrError()); } + auto attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT, + QOS_CLASS_DEFAULT, 0); + msgHandlingQueue = dispatch_queue_create("request-handling", attr); + xpc_main(sourcekitdServer_event_handler); return 0; } diff --git a/tools/driver/driver.cpp b/tools/driver/driver.cpp index 8774a76ea16e9..0f5f7ad34dbd2 100644 --- a/tools/driver/driver.cpp +++ b/tools/driver/driver.cpp @@ -129,6 +129,24 @@ static bool shouldRunAsSubcommand(StringRef ExecName, return true; } +static bool shouldDisallowNewDriver(StringRef ExecName, + const ArrayRef argv) { + // We are not invoking the driver, so don't forward. + if (ExecName != "swift" && ExecName != "swiftc") { + return true; + } + // If user specified using the old driver, don't forward. + if (llvm::find_if(argv, [](const char* arg) { + return StringRef(arg) == "-disallow-use-new-driver"; + }) != argv.end()) { + return true; + } + if (llvm::sys::Process::GetEnv("SWIFT_USE_OLD_DRIVER").hasValue()) { + return true; + } + return false; +} + static int run_driver(StringRef ExecName, const ArrayRef argv) { // Handle integrated tools. @@ -169,23 +187,16 @@ static int run_driver(StringRef ExecName, DiagnosticEngine Diags(SM); Diags.addConsumer(PDC); - std::string newDriverName; + std::string newDriverName = "swift-driver-new"; if (auto driverNameOp = llvm::sys::Process::GetEnv("SWIFT_USE_NEW_DRIVER")) { newDriverName = driverNameOp.getValue(); } - auto disallowForwarding = llvm::find_if(argv, [](const char* arg) { - return StringRef(arg) == "-disallow-use-new-driver"; - }) != argv.end(); // Forwarding calls to the swift driver if the C++ driver is invoked as `swift` // or `swiftc`, and an environment variable SWIFT_USE_NEW_DRIVER is defined. - if (!newDriverName.empty() && !disallowForwarding && - (ExecName == "swift" || ExecName == "swiftc")) { + if (!shouldDisallowNewDriver(ExecName, argv)) { SmallString<256> NewDriverPath(llvm::sys::path::parent_path(Path)); llvm::sys::path::append(NewDriverPath, newDriverName); - if (!llvm::sys::fs::exists(NewDriverPath)) { - Diags.diagnose(SourceLoc(), diag::remark_forwarding_driver_not_there, - NewDriverPath); - } else { + if (llvm::sys::fs::exists(NewDriverPath)) { SmallVector subCommandArgs; // Rewrite the program argument. subCommandArgs.push_back(NewDriverPath.c_str()); @@ -246,7 +257,7 @@ static int run_driver(StringRef ExecName, std::unique_ptr TQ = TheDriver.buildTaskQueue(*C); if (!TQ) return 1; - return C->performJobs(std::move(TQ)); + return C->performJobs(std::move(TQ)).exitCode; } return 0; diff --git a/tools/sil-opt/SILOpt.cpp b/tools/sil-opt/SILOpt.cpp index 685e31205f2f2..129534c126b48 100644 --- a/tools/sil-opt/SILOpt.cpp +++ b/tools/sil-opt/SILOpt.cpp @@ -106,6 +106,10 @@ static llvm::cl::opt EnableExperimentalConcurrency("enable-experimental-concurrency", llvm::cl::desc("Enable experimental concurrency model.")); +static llvm::cl::opt EnableExperimentalPrespecialization( + "enable-experimental-prespecialization", + llvm::cl::desc("Enable experimental prespecialziation.")); + static llvm::cl::opt VerifyExclusivity("enable-verify-exclusivity", llvm::cl::desc("Verify the access markers used to enforce exclusivity.")); @@ -352,6 +356,9 @@ int main(int argc, char **argv) { Invocation.getLangOptions().EnableExperimentalConcurrency = EnableExperimentalConcurrency; + Invocation.getLangOptions().EnableExperimentalPrespecialization = + EnableExperimentalPrespecialization; + Invocation.getLangOptions().EnableObjCInterop = EnableObjCInterop ? true : DisableObjCInterop ? false : llvm::Triple(Target).isOSDarwin(); diff --git a/tools/swift-ide-test/swift-ide-test.cpp b/tools/swift-ide-test/swift-ide-test.cpp index 4b0e5a35ddf5a..b3e94d9d22be1 100644 --- a/tools/swift-ide-test/swift-ide-test.cpp +++ b/tools/swift-ide-test/swift-ide-test.cpp @@ -397,6 +397,11 @@ DisableObjCAttrRequiresFoundationModule( llvm::cl::cat(Category), llvm::cl::init(false)); +static llvm::cl::opt EnableExperimentalPrespecialization( + "enable-experimental-prespecialization", + llvm::cl::desc("Enable experimental prespecialization"), + llvm::cl::cat(Category), llvm::cl::init(false)); + static llvm::cl::opt PrintStats("print-stats", llvm::cl::desc("Print statistics"), @@ -3903,6 +3908,8 @@ int main(int argc, char *argv[]) { } InitInvok.getLangOptions().EnableObjCAttrRequiresFoundation = !options::DisableObjCAttrRequiresFoundationModule; + InitInvok.getLangOptions().EnableExperimentalPrespecialization = + options::EnableExperimentalPrespecialization; InitInvok.getTypeCheckerOptions().DebugForbidTypecheckPrefix = options::DebugForbidTypecheckPrefix; InitInvok.getTypeCheckerOptions().DebugConstraintSolver = diff --git a/tools/swift-syntax-test/swift-syntax-test.cpp b/tools/swift-syntax-test/swift-syntax-test.cpp index b48941c305e6d..7fe41f23966c5 100644 --- a/tools/swift-syntax-test/swift-syntax-test.cpp +++ b/tools/swift-syntax-test/swift-syntax-test.cpp @@ -95,6 +95,11 @@ Action(llvm::cl::desc("Action (required):"), "of the EOF token, and dump the buffer from the start of the" "file to the EOF token"))); +static llvm::cl::opt EnableExperimentalPrespecialization( + "enable-experimental-prespecialization", + llvm::cl::desc("Enable experimental prespecialization"), + llvm::cl::init(false)); + static llvm::cl::opt InputSourceFilename("input-source-filename", llvm::cl::desc("Path to the input .swift file")); @@ -610,6 +615,8 @@ int parseFile( Invocation.getLangOptions().VerifySyntaxTree = options::VerifySyntaxTree; Invocation.getLangOptions().RequestEvaluatorGraphVizPath = options::GraphVisPath; Invocation.getLangOptions().DisablePoundIfEvaluation = true; + Invocation.getLangOptions().EnableExperimentalPrespecialization = + options::EnableExperimentalPrespecialization; Invocation.getFrontendOptions().InputsAndOutputs.addInputFile(InputFileName); diff --git a/unittests/Sema/CMakeLists.txt b/unittests/Sema/CMakeLists.txt index 95346cde1dc1e..2fb851a43ca22 100644 --- a/unittests/Sema/CMakeLists.txt +++ b/unittests/Sema/CMakeLists.txt @@ -2,7 +2,8 @@ add_swift_unittest(swiftSemaTests SemaFixture.cpp BindingInferenceTests.cpp - ConstraintSimplificationTests.cpp) + ConstraintSimplificationTests.cpp + UnresolvedMemberLookupTests.cpp) target_link_libraries(swiftSemaTests PRIVATE diff --git a/unittests/Sema/SemaFixture.cpp b/unittests/Sema/SemaFixture.cpp index 94bf7437d3fe6..88f8719237f52 100644 --- a/unittests/Sema/SemaFixture.cpp +++ b/unittests/Sema/SemaFixture.cpp @@ -76,6 +76,39 @@ Type SemaTest::getStdlibType(StringRef name) const { return Type(); } +NominalTypeDecl *SemaTest::getStdlibNominalTypeDecl(StringRef name) const { + auto typeName = Context.getIdentifier(name); + + auto *stdlib = Context.getStdlibModule(); + + llvm::SmallVector results; + stdlib->lookupValue(typeName, NLKind::UnqualifiedLookup, results); + + if (results.size() != 1) + return nullptr; + + return dyn_cast(results.front()); +} + +VarDecl *SemaTest::addExtensionVarMember(NominalTypeDecl *decl, + StringRef name, Type type) const { + auto *ext = ExtensionDecl::create(Context, SourceLoc(), nullptr, { }, DC, + nullptr); + decl->addExtension(ext); + ext->setExtendedNominal(decl); + + auto *VD = new (Context) VarDecl(/*isStatic=*/ true, VarDecl::Introducer::Var, + /*nameLoc=*/ SourceLoc(), + Context.getIdentifier(name), ext); + + ext->addMember(VD); + auto *pat = new (Context) NamedPattern(VD); + VD->setNamingPattern(pat); + pat->setType(type); + + return VD; +} + ProtocolType *SemaTest::createProtocol(llvm::StringRef protocolName, Type parent) { auto *PD = new (Context) diff --git a/unittests/Sema/SemaFixture.h b/unittests/Sema/SemaFixture.h index 88741394e025e..90cedc3284061 100644 --- a/unittests/Sema/SemaFixture.h +++ b/unittests/Sema/SemaFixture.h @@ -67,6 +67,11 @@ class SemaTest : public SemaTestBase { protected: Type getStdlibType(StringRef name) const; + NominalTypeDecl *getStdlibNominalTypeDecl(StringRef name) const; + + VarDecl *addExtensionVarMember(NominalTypeDecl *decl, StringRef name, + Type type) const; + ProtocolType *createProtocol(llvm::StringRef protocolName, Type parent = Type()); diff --git a/unittests/Sema/UnresolvedMemberLookupTests.cpp b/unittests/Sema/UnresolvedMemberLookupTests.cpp new file mode 100644 index 0000000000000..056729c50dc3b --- /dev/null +++ b/unittests/Sema/UnresolvedMemberLookupTests.cpp @@ -0,0 +1,90 @@ +//===--- UnresolvedMemberLookupTests.cpp --------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "SemaFixture.h" +#include "swift/Sema/ConstraintSystem.h" + +using namespace swift; +using namespace swift::unittest; +using namespace swift::constraints; + +TEST_F(SemaTest, TestLookupAlwaysLooksThroughOptionalBase) { + auto *intTypeDecl = getStdlibNominalTypeDecl("Int"); + auto *optTypeDecl = getStdlibNominalTypeDecl("Optional"); + auto intType = intTypeDecl->getDeclaredType(); + auto intOptType = OptionalType::get(intType); + auto stringType = getStdlibType("String"); + + auto *intMember = addExtensionVarMember(intTypeDecl, "test", intOptType); + addExtensionVarMember(optTypeDecl, "test", stringType); + + auto *UME = new (Context) + UnresolvedMemberExpr(SourceLoc(), DeclNameLoc(), + DeclNameRef(Context.getIdentifier("test")), true); + auto *UMCRE = new (Context) UnresolvedMemberChainResultExpr(UME, UME); + + ConstraintSystem cs(DC, ConstraintSystemOptions()); + cs.generateConstraints(UMCRE, DC); + cs.addConstraint( + ConstraintKind::Conversion, cs.getType(UMCRE), intOptType, + cs.getConstraintLocator(UMCRE, ConstraintLocator::ContextualType)); + SmallVector solutions; + cs.solve(solutions); + + // We should have a solution. + ASSERT_EQ(solutions.size(), 1); + + auto &solution = solutions[0]; + auto *locator = cs.getConstraintLocator(UME, + ConstraintLocator::UnresolvedMember); + auto choice = solution.getOverloadChoice(locator).choice; + + // The `test` member on `Int` should be selected. + ASSERT_EQ(choice.getDecl(), intMember); +} + +TEST_F(SemaTest, TestLookupPrefersResultsOnOptionalRatherThanBase) { + auto *intTypeDecl = getStdlibNominalTypeDecl("Int"); + auto *optTypeDecl = getStdlibNominalTypeDecl("Optional"); + auto intType = intTypeDecl->getDeclaredType(); + auto intOptType = OptionalType::get(intType); + + addExtensionVarMember(intTypeDecl, "test", intOptType); + auto *optMember = addExtensionVarMember(optTypeDecl, "test", intType); + + auto *UME = new (Context) + UnresolvedMemberExpr(SourceLoc(), DeclNameLoc(), + DeclNameRef(Context.getIdentifier("test")), true); + auto *UMCRE = new (Context) UnresolvedMemberChainResultExpr(UME, UME); + + ConstraintSystem cs(DC, ConstraintSystemOptions()); + cs.generateConstraints(UMCRE, DC); + cs.addConstraint( + ConstraintKind::Conversion, cs.getType(UMCRE), intOptType, + cs.getConstraintLocator(UMCRE, ConstraintLocator::ContextualType)); + SmallVector solutions; + cs.solve(solutions); + + // We should have a solution. + ASSERT_EQ(solutions.size(), 1); + + auto &solution = solutions[0]; + auto *locator = cs.getConstraintLocator(UME, + ConstraintLocator::UnresolvedMember); + auto choice = solution.getOverloadChoice(locator).choice; + auto score = solution.getFixedScore(); + + // The `test` member on `Optional` should be chosen over the member on `Int`, + // even though the score is otherwise worse. + ASSERT_EQ(score.Data[SK_ValueToOptional], 1); + ASSERT_EQ(choice.getDecl(), optMember); +} diff --git a/unittests/runtime/Actor.cpp b/unittests/runtime/Actor.cpp new file mode 100644 index 0000000000000..2a89024f04f93 --- /dev/null +++ b/unittests/runtime/Actor.cpp @@ -0,0 +1,345 @@ +//===--- Actor.cpp - Unit tests for the actor API -------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "swift/ABI/Actor.h" +#include "swift/Runtime/Concurrency.h" +#include "swift/Runtime/Metadata.h" +#include "swift/Basic/STLExtras.h" +#include "llvm/ADT/Optional.h" +#include "gtest/gtest.h" +#include +#include + +using namespace swift; + +/// The current location. +static unsigned progressIndex = 0; +#define EXPECT_PROGRESS(NUMBER) \ + EXPECT_EQ((unsigned) (NUMBER), progressIndex++) + +enum { + FinishedIndex = 100000 +}; +static void finishTest() { + progressIndex = FinishedIndex; +} + +static std::vector globalQueue; +SWIFT_CC(swift) +static void enqueueGlobal(Job *job) { + assert(job); + + // Check that the job isn't already on the queue. + for (auto oldJob: globalQueue) { + EXPECT_NE(job, oldJob); + } + + // The queue will actually be executed starting from the back. + // Add the job after (i.e. before in execution order) all jobs + // with lower priority. + for (auto i = globalQueue.begin(), e = globalQueue.end(); i != e; ++i) { + if (job->getPriority() <= (*i)->getPriority()) { + globalQueue.insert(i, job); + return; + } + } + + // If the job's priority is higher than everything in the existing + // queue, set it as the new front of the queue. + globalQueue.push_back(job); +} + +static void run(llvm::function_ref fn) { + swift_task_enqueueGlobal_hook = &enqueueGlobal; + + progressIndex = 0; + + // Run the setup function. + fn(); + + // The setup function needs to add something to the queue. + EXPECT_FALSE(globalQueue.empty()); + + // Insertion does a priority sort, so we can just process in-order. + // But that order starts from the back. + while (!globalQueue.empty()) { + auto job = globalQueue.back(); + globalQueue.pop_back(); + + job->run(ExecutorRef::generic()); + } + + EXPECT_EQ(FinishedIndex, progressIndex); + + swift_task_enqueueGlobal_hook = nullptr; +} + +namespace { + +/// A simple actor class. +class TestActor : public DefaultActor { +public: + TestActor(); + ~TestActor() { + swift_defaultActor_destroy(this); + } + bool HasBeenDestructed = false; +}; +static SWIFT_CC(swift) +void destroyTestActor(SWIFT_CONTEXT HeapObject *_object) { + delete static_cast(_object); +} +static const FullMetadata TestActorMetadata = { + { { &destroyTestActor }, { &VALUE_WITNESS_SYM(Bo) } }, + { { nullptr }, ClassFlags::UsesSwiftRefcounting, 0, 0, 0, 0, 0, 0 } +}; +TestActor::TestActor() : DefaultActor(&TestActorMetadata) { + swift_defaultActor_initialize(this); +} + +static TestActor *createActor() { + return new TestActor(); +} + +/// A very silly template that stores the latest instance of a particular +/// lambda in global storage and then returns a function pointer that +/// matches an async task continuation function signature. +template +class TaskContinuationFromLambda { + static llvm::Optional lambdaStorage; + + SWIFT_CC(swiftasync) + static void invoke(AsyncTask *task, ExecutorRef executor, + AsyncContext *context) { + (*lambdaStorage)(task, executor, static_cast(context)); + } + +public: + static TaskContinuationFunction *get(Fn &&fn) { + lambdaStorage.emplace(std::move(fn)); + return &invoke; + } +}; + +template +llvm::Optional TaskContinuationFromLambda::lambdaStorage; + +} // end anonymous namespace + +template +static std::pair +createTaskWithContext(JobPriority priority, Fn &&fn) { + auto invoke = + TaskContinuationFromLambda::get(std::move(fn)); + + auto pair = swift_task_create_f(JobFlags(JobKind::Task, priority), + /*parent*/ nullptr, + invoke, + sizeof(Context)); + return std::make_pair(pair.Task, + static_cast(pair.InitialContext)); +} + +template +static AsyncTask *createTask(JobPriority priority, Fn &&fn) { + return createTaskWithContext(priority, std::move(fn)) + .first; +} + +template +static void parkTask(AsyncTask *task, Context *context, Fn &&fn) { + auto invoke = + TaskContinuationFromLambda::get(std::move(fn)); + task->ResumeTask = invoke; + task->ResumeContext = context; +} + +namespace { +template +class TupleContext : public AsyncContext { +public: + using TupleType = std::tuple; + TupleType values; + + template auto get() { return std::get(values); } +}; + +/// This extremely silly template repeatedly rotates an argument list +/// until the last argument is first, then returns a pair of that and +/// a tuple of the remaining arguments. +template struct Decomposer { + template + static auto decompose(FirstTy &&first, OtherTys &&...others) { + return Decomposer::decompose(std::forward(others)..., + std::forward(first)); + } +}; +template <> struct Decomposer<0> { + template + static auto decompose(FirstTy &&first, OtherTys &&...others) { + return std::make_pair(std::move(first), + std::make_tuple(std::forward(others)...)); + } +}; + +/// This moderately silly template forwards a template argument pack. +template struct TupleContextTypeFor; +template struct TupleContextTypeFor> { + using type = TupleContext; +}; +} // end anonymous namespace + +template +static AsyncTask *createTaskStoring(JobPriority priority, + ArgTypes... args) { + auto fnAndTuple = Decomposer::decompose(args...); + + using TupleType = decltype(fnAndTuple.second); + using ContextType = typename TupleContextTypeFor::type; + + auto taskAndContext = + createTaskWithContext(priority, std::move(fnAndTuple.first)); + auto ptr = &taskAndContext.second->values; + new(ptr) TupleType(std::move(fnAndTuple.second)); + return taskAndContext.first; +} + +TEST(ActorTest, validateTestHarness) { + run([] { + auto task0 = createTask(JobPriority::Background, + [](AsyncTask *task, ExecutorRef executor, AsyncContext *context) { + EXPECT_PROGRESS(5); + EXPECT_PROGRESS(6); + finishTest(); + return context->resumeParent(task, executor); + }); + auto task1 = createTask(JobPriority::Default, + [](AsyncTask *task, ExecutorRef executor, AsyncContext *context) { + EXPECT_PROGRESS(1); + EXPECT_PROGRESS(2); + return context->resumeParent(task, executor); + }); + auto task2 = createTask(JobPriority::Default, + [](AsyncTask *task, ExecutorRef executor, AsyncContext *context) { + EXPECT_PROGRESS(3); + EXPECT_PROGRESS(4); + return context->resumeParent(task, executor); + }); + + swift_task_enqueueGlobal(task0); + swift_task_enqueueGlobal(task1); + swift_task_enqueueGlobal(task2); + EXPECT_PROGRESS(0); + }); +} + + +TEST(ActorTest, actorSwitch) { + run([] { + using Context = TupleContext; + + auto actor = createActor(); + auto task0 = createTaskStoring(JobPriority::Default, + (AsyncTask*) nullptr, actor, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(1); + EXPECT_TRUE(executor.isGeneric()); + EXPECT_EQ(nullptr, context->get<0>()); + std::get<0>(context->values) = task; + + parkTask(task, context, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(2); + EXPECT_FALSE(executor.isGeneric()); + EXPECT_EQ(ExecutorRef::forDefaultActor(context->get<1>()), + executor); + EXPECT_EQ(task, context->get<0>()); + parkTask(task, context, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(3); + EXPECT_TRUE(executor.isGeneric()); + EXPECT_EQ(task, context->get<0>()); + finishTest(); + return context->resumeParent(task, executor); + }); + return swift_task_switch(task, executor, ExecutorRef::generic()); + }); + return swift_task_switch(task, executor, + ExecutorRef::forDefaultActor(context->get<1>())); + }); + swift_task_enqueueGlobal(task0); + EXPECT_PROGRESS(0); + }); +} + +TEST(ActorTest, actorContention) { + run([] { + using Context = TupleContext; + auto actor = createActor(); + + // This test only really works because actors are FIFO. + + auto task0 = createTaskStoring(JobPriority::Default, + (AsyncTask*) nullptr, actor, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(1); + EXPECT_TRUE(executor.isGeneric()); + EXPECT_EQ(nullptr, context->get<0>()); + std::get<0>(context->values) = task; + + parkTask(task, context, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(3); + EXPECT_FALSE(executor.isGeneric()); + EXPECT_EQ(ExecutorRef::forDefaultActor(context->get<1>()), + executor); + EXPECT_EQ(task, context->get<0>()); + parkTask(task, context, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(4); + EXPECT_TRUE(executor.isGeneric()); + EXPECT_EQ(task, context->get<0>()); + return context->resumeParent(task, executor); + }); + swift_task_enqueue(task, ExecutorRef::generic()); + }); + + swift_task_enqueue(task, ExecutorRef::forDefaultActor(context->get<1>())); + }); + swift_task_enqueueGlobal(task0); + + auto task1 = createTaskStoring(JobPriority::Background, + (AsyncTask*) nullptr, actor, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(2); + EXPECT_FALSE(executor.isGeneric()); + EXPECT_EQ(ExecutorRef::forDefaultActor(context->get<1>()), + executor); + EXPECT_EQ(nullptr, context->get<0>()); + std::get<0>(context->values) = task; + + parkTask(task, context, + [](AsyncTask *task, ExecutorRef executor, Context *context) { + EXPECT_PROGRESS(5); + EXPECT_TRUE(executor.isGeneric()); + EXPECT_EQ(task, context->get<0>()); + finishTest(); + return context->resumeParent(task, executor); + }); + + swift_task_enqueue(task, ExecutorRef::generic()); + }); + swift_task_enqueue(task1, ExecutorRef::forDefaultActor(actor)); + + EXPECT_PROGRESS(0); + }); +} diff --git a/unittests/runtime/CMakeLists.txt b/unittests/runtime/CMakeLists.txt index 5070c96876713..60f9944e1e64f 100644 --- a/unittests/runtime/CMakeLists.txt +++ b/unittests/runtime/CMakeLists.txt @@ -55,6 +55,7 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND if(SWIFT_ENABLE_EXPERIMENTAL_CONCURRENCY) list(APPEND PLATFORM_SOURCES + Actor.cpp TaskStatus.cpp ) list(APPEND PLATFORM_TARGET_LINK_LIBRARIES @@ -66,6 +67,7 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND set(LLVM_OPTIONAL_SOURCES weak.mm Refcounting.mm + Actor.cpp TaskStatus.cpp) add_swift_unittest(SwiftRuntimeTests @@ -77,6 +79,7 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND Enum.cpp Refcounting.cpp Stdlib.cpp + StackAllocator.cpp ${PLATFORM_SOURCES} # The runtime tests link to internal runtime symbols, which aren't exported diff --git a/unittests/runtime/CompatibilityOverride.cpp b/unittests/runtime/CompatibilityOverride.cpp index 6056f26117930..3c766952c3098 100644 --- a/unittests/runtime/CompatibilityOverride.cpp +++ b/unittests/runtime/CompatibilityOverride.cpp @@ -60,7 +60,7 @@ struct OverrideSection { #include "../../stdlib/public/runtime/CompatibilityOverride.def" }; -OverrideSection Overrides __attribute__((section("__DATA,__swift53_hooks"))) = { +OverrideSection Overrides __attribute__((section("__DATA,__swift54_hooks"))) = { 0, #define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ name ## Override, diff --git a/unittests/runtime/StackAllocator.cpp b/unittests/runtime/StackAllocator.cpp new file mode 100644 index 0000000000000..1989f37e4ce0f --- /dev/null +++ b/unittests/runtime/StackAllocator.cpp @@ -0,0 +1,116 @@ +//===--- StackAllocator.cpp - Unit tests for the StackAllocator -----------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "../../stdlib/public/runtime/StackAllocator.h" +#include "gtest/gtest.h" + +using namespace swift; + +static constexpr size_t slabCapacity = 256; +static constexpr size_t firstSlabBufferCapacity = 140; +static constexpr size_t fitsIntoFirstSlab = 16; +static constexpr size_t fitsIntoSlab = slabCapacity - 16; +static constexpr size_t twoFitIntoSlab = slabCapacity / 2 - 32; +static constexpr size_t exceedsSlab = slabCapacity + 16; + +TEST(StackAllocatorTest, withPreallocatedSlab) { + + char firstSlab[firstSlabBufferCapacity]; + StackAllocator allocator(firstSlab, firstSlabBufferCapacity); + + char *mem1 = (char *)allocator.alloc(fitsIntoFirstSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 0); + char *mem1a = (char *)allocator.alloc(fitsIntoFirstSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 0); + + char *mem2 = (char *)allocator.alloc(exceedsSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 1); + + char *mem3 = (char *)allocator.alloc(fitsIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 2); + + char *mem4 = (char *)allocator.alloc(fitsIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + + allocator.dealloc(mem4); + allocator.dealloc(mem3); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + + char *mem5 = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + char *mem6 = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + char *mem7 = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + + allocator.dealloc(mem7); + allocator.dealloc(mem6); + allocator.dealloc(mem5); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + + char *mem8 = (char *)allocator.alloc(exceedsSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 2); + + allocator.dealloc(mem8); + allocator.dealloc(mem2); + allocator.dealloc(mem1a); + allocator.dealloc(mem1); +} + +TEST(StackAllocatorTest, withoutPreallocatedSlab) { + + constexpr size_t slabCapacity = 256; + + StackAllocator allocator; + + size_t fitsIntoSlab = slabCapacity - 16; + size_t twoFitIntoSlab = slabCapacity / 2 - 32; + size_t exceedsSlab = slabCapacity + 16; + + char *mem1 = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 1); + char *mem1a = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 1); + + char *mem2 = (char *)allocator.alloc(exceedsSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 2); + + char *mem3 = (char *)allocator.alloc(fitsIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + + char *mem4 = (char *)allocator.alloc(fitsIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 4); + + allocator.dealloc(mem4); + allocator.dealloc(mem3); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 4); + + char *mem5 = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 4); + char *mem6 = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 4); + char *mem7 = (char *)allocator.alloc(twoFitIntoSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 4); + + allocator.dealloc(mem7); + allocator.dealloc(mem6); + allocator.dealloc(mem5); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 4); + + char *mem8 = (char *)allocator.alloc(exceedsSlab); + EXPECT_EQ(allocator.getNumAllocatedSlabs(), 3); + + allocator.dealloc(mem8); + allocator.dealloc(mem2); + allocator.dealloc(mem1a); + allocator.dealloc(mem1); +} diff --git a/unittests/runtime/TaskStatus.cpp b/unittests/runtime/TaskStatus.cpp index bb73b6f89b8f4..9b327a401e172 100644 --- a/unittests/runtime/TaskStatus.cpp +++ b/unittests/runtime/TaskStatus.cpp @@ -80,7 +80,7 @@ static void withSimpleTask(T &&value, } static ExecutorRef createFakeExecutor(uintptr_t value) { - return {reinterpret_cast(value)}; + return ExecutorRef::forDefaultActor(reinterpret_cast(value)); } } // end anonymous namespace diff --git a/utils/build-script b/utils/build-script index 3b0aba9c3aa20..ae2416db5f205 100755 --- a/utils/build-script +++ b/utils/build-script @@ -50,9 +50,6 @@ from swift_build_support.swift_build_support.toolchain import host_toolchain # ----------------------------------------------------------------------------- # Constants -# TODO: Remove this constant, it's really not helpful. -HOME = os.environ.get("HOME", "/") - # These versions are community sourced. At any given time only the Xcode # version used by Swift CI is officially supported. See ci.swift.org _SUPPORTED_XCODE_BUILDS = [ @@ -506,6 +503,7 @@ class BuildScriptInvocation(object): pipes.quote(opt) for opt in cmake.common_options()), "--build-args=%s" % ' '.join( pipes.quote(arg) for arg in cmake.build_args()), + "--dsymutil-jobs", str(args.dsymutil_jobs), ] # Compute any product specific cmake arguments. @@ -1180,7 +1178,8 @@ def main_preset(): "build-presets.ini") ] - user_presets_file = os.path.join(HOME, '.swift-build-presets') + user_presets_file = os.path.join(os.getenv("HOME", "/"), + '.swift-build-presets') if os.path.isfile(user_presets_file): args.preset_file_names.append(user_presets_file) diff --git a/utils/build-script-impl b/utils/build-script-impl index ee12934ec9cec..f558eb89f08fe 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -71,6 +71,7 @@ KNOWN_SETTINGS=( swift-tools-num-parallel-lto-link-jobs "" "The number of parallel link jobs to use when compiling swift tools" use-gold-linker "" "Enable using the gold linker" workspace "${HOME}/src" "source directory containing llvm, clang, swift" + dsymutil-jobs "1" "number of parallel invocations of dsymutil" ## Build Tools host-cc "" "the path to CC, the 'clang' compiler for the host platform. **This argument is required**" @@ -484,8 +485,8 @@ function set_build_options_for_host() { watchos-* | \ watchsimulator-*) swift_cmake_options+=( - -DPython2_EXECUTABLE=$(xcrun -f python2.7) - -DPython3_EXECUTABLE=$(xcrun -f python3) + -DPython2_EXECUTABLE="$(xcrun -f python2.7)" + -DPython3_EXECUTABLE="$(xcrun -f python3)" ) case ${host} in macosx-x86_64) @@ -3043,6 +3044,25 @@ for host in "${ALL_HOSTS[@]}"; do done done +function printJSONTimestamp() { + local command=$1 + local kind=$2 + + echo "{ \"command\": \"${command}\", \"${kind}\": \"$(date "+%Y-%m-%dT%H:%M:%S")\" }" +} + +function printJSONStartTimestamp() { + local command=$1 + + printJSONTimestamp ${command} "start" +} + +function printJSONEndTimestamp() { + local command=$1 + + printJSONTimestamp ${command} "end" +} + for host in "${ALL_HOSTS[@]}"; do # Check if we should perform this action. if ! [[ $(should_execute_action "${host}-extractsymbols") ]]; then @@ -3075,6 +3095,9 @@ for host in "${ALL_HOSTS[@]}"; do # Instead, just echo we do "darwin_intall_extract_symbols". if [[ "${DRY_RUN}" ]]; then call darwin_install_extract_symbols + printJSONStartTimestamp dsymutil + echo xargs -n 1 -P ${DSYMUTIL_JOBS} dsymutil + printJSONEndTimestamp dsymutil else set -x @@ -3097,13 +3120,16 @@ for host in "${ALL_HOSTS[@]}"; do # # Exclude shell scripts and static archives. # Exclude swift-api-digester dSYM to reduce debug toolchain size. - # Run sequentially -- dsymutil is multithreaded and can be memory intensive + # Tweak carefully the amount of parallelism -- dsymutil can be memory intensive and + # as such too many instance can exhaust the memory and slow down/panic the machine + printJSONStartTimestamp dsymutil (cd "${host_symroot}" && find ./"${CURRENT_PREFIX}" -perm -0111 -type f -print | \ grep -v '.py$' | \ grep -v '.a$' | \ grep -v 'swift-api-digester' | \ - xargs -P 1 ${dsymutil_path}) + xargs -n 1 -P ${DSYMUTIL_JOBS} ${dsymutil_path}) + printJSONEndTimestamp dsymutil # Strip executables, shared libraries and static libraries in # `host_install_destdir`. diff --git a/utils/build_swift/build_swift/defaults.py b/utils/build_swift/build_swift/defaults.py index 86beac7bb44bb..db1476df9e894 100644 --- a/utils/build_swift/build_swift/defaults.py +++ b/utils/build_swift/build_swift/defaults.py @@ -22,7 +22,7 @@ __all__ = [ - # Command line configuarable + # Command line configurable 'BUILD_VARIANT', 'CMAKE_GENERATOR', 'COMPILER_VENDOR', @@ -38,6 +38,7 @@ 'DARWIN_INSTALL_PREFIX', 'LLVM_MAX_PARALLEL_LTO_LINK_JOBS', 'SWIFT_MAX_PARALLEL_LTO_LINK_JOBS', + 'DSYMUTIL_JOBS' # Constants ] @@ -62,6 +63,8 @@ DARWIN_INSTALL_PREFIX = ('/Applications/Xcode.app/Contents/Developer/' 'Toolchains/XcodeDefault.xctoolchain/usr') +DSYMUTIL_JOBS = 1 + def _system_memory(): """Returns the system memory as an int. None if the system memory cannot diff --git a/utils/build_swift/build_swift/driver_arguments.py b/utils/build_swift/build_swift/driver_arguments.py index 3838c9463f06f..ad0a73122143b 100644 --- a/utils/build_swift/build_swift/driver_arguments.py +++ b/utils/build_swift/build_swift/driver_arguments.py @@ -501,6 +501,13 @@ def create_argument_parser(): help='the maximum number of parallel link jobs to use when ' 'compiling swift tools.') + option('--dsymutil-jobs', store_int, + default=defaults.DSYMUTIL_JOBS, + metavar='COUNT', + help='the maximum number of parallel dsymutil jobs to use when ' + 'extracting symbols. Tweak with caution, since dsymutil' + 'is memory intensive.') + option('--disable-guaranteed-normal-arguments', store_true, help='Disable guaranteed normal arguments') diff --git a/utils/build_swift/build_swift/migration.py b/utils/build_swift/build_swift/migration.py index 3a1a7be95705a..f08808183392c 100644 --- a/utils/build_swift/build_swift/migration.py +++ b/utils/build_swift/build_swift/migration.py @@ -33,17 +33,6 @@ ] -_SDK_TARGETS = { - 'OSX': StdlibDeploymentTarget.OSX.targets, - 'IOS': StdlibDeploymentTarget.iOS.targets, - 'IOS_SIMULATOR': StdlibDeploymentTarget.iOSSimulator.targets, - 'TVOS': StdlibDeploymentTarget.AppleTV.targets, - 'TVOS_SIMULATOR': StdlibDeploymentTarget.AppleTVSimulator.targets, - 'WATCHOS': StdlibDeploymentTarget.AppleWatch.targets, - 'WATCHOS_SIMULATOR': StdlibDeploymentTarget.AppleWatchSimulator.targets, -} - - # ----------------------------------------------------------------------------- class UnknownSDKError(Exception): @@ -70,7 +59,7 @@ def _flatten(iterable): return itertools.chain.from_iterable(iterable) def _swift_sdk_to_stdlib_targets(sdk): - targets = _SDK_TARGETS.get(sdk, None) + targets = StdlibDeploymentTarget.get_migrated_targets_for_sdk(sdk) if targets is None: raise UnknownSDKError(sdk) diff --git a/utils/build_swift/tests/build_swift/test_migration.py b/utils/build_swift/tests/build_swift/test_migration.py index 6f8c8c999e89f..7caf09920a6d0 100644 --- a/utils/build_swift/tests/build_swift/test_migration.py +++ b/utils/build_swift/tests/build_swift/test_migration.py @@ -18,6 +18,8 @@ import six +from swift_build_support.swift_build_support.targets import StdlibDeploymentTarget + # ----------------------------------------------------------------------------- # Helpers @@ -25,7 +27,7 @@ def _get_sdk_targets(sdk_names): targets = [] for sdk_name in sdk_names: - targets += migration._SDK_TARGETS[sdk_name] + targets += StdlibDeploymentTarget.get_migrated_targets_for_sdk(sdk_name) return targets @@ -43,7 +45,7 @@ class TestMigrateSwiftSDKsMeta(type): def __new__(cls, name, bases, attrs): # Generate tests for migrating each Swift SDK - for sdk_name in migration._SDK_TARGETS.keys(): + for sdk_name in StdlibDeploymentTarget.get_all_migrated_sdks(): test_name = 'test_migrate_swift_sdk_{}'.format(sdk_name) attrs[test_name] = cls.generate_migrate_swift_sdks_test(sdk_name) diff --git a/utils/build_swift/tests/expected_options.py b/utils/build_swift/tests/expected_options.py index fd1820ce9cf5f..8f13cca65b9c4 100644 --- a/utils/build_swift/tests/expected_options.py +++ b/utils/build_swift/tests/expected_options.py @@ -140,6 +140,7 @@ 'distcc': False, 'sccache': False, 'dry_run': False, + 'dsymutil_jobs': defaults.DSYMUTIL_JOBS, 'enable_asan': False, 'enable_experimental_differentiable_programming': True, 'enable_experimental_concurrency': True, @@ -662,6 +663,7 @@ class BuildScriptImplOption(_BaseOption): IntOption('--llvm-max-parallel-lto-link-jobs'), IntOption('--swift-tools-max-parallel-lto-link-jobs'), IntOption('-j', dest='build_jobs'), + IntOption('--dsymutil-jobs', dest='dsymutil_jobs'), AppendOption('--cross-compile-hosts'), AppendOption('--extra-cmake-options'), diff --git a/utils/swift_build_support/swift_build_support/products/ninja.py b/utils/swift_build_support/swift_build_support/products/ninja.py index d133c22fa9ded..97fc2722089bf 100644 --- a/utils/swift_build_support/swift_build_support/products/ninja.py +++ b/utils/swift_build_support/swift_build_support/products/ninja.py @@ -58,17 +58,19 @@ def build(self): osx_version_min = self.args.darwin_deployment_version_osx assert sysroot is not None env = { - "CXX": self.toolchain.cxx, + "CXX": shell._quote(self.toolchain.cxx), "CFLAGS": ( "-isysroot {sysroot} -mmacosx-version-min={osx_version}" - ).format(sysroot=sysroot, osx_version=osx_version_min), + ).format(sysroot=shell._quote(sysroot), + osx_version=osx_version_min), "LDFLAGS": ( "-isysroot {sysroot} -mmacosx-version-min={osx_version}" - ).format(sysroot=sysroot, osx_version=osx_version_min), + ).format(sysroot=shell._quote(sysroot), + osx_version=osx_version_min), } elif self.toolchain.cxx: env = { - "CXX": self.toolchain.cxx, + "CXX": shell._quote(self.toolchain.cxx), } # Ninja can only be built in-tree. Copy the source tree to the build diff --git a/utils/swift_build_support/swift_build_support/targets.py b/utils/swift_build_support/swift_build_support/targets.py index db92d053d99bf..90a19a5e4b43f 100644 --- a/utils/swift_build_support/swift_build_support/targets.py +++ b/utils/swift_build_support/swift_build_support/targets.py @@ -230,6 +230,16 @@ class StdlibDeploymentTarget(object): for platform in known_platforms for target in platform.targets) + _sdk_targets = { + 'OSX': OSX.targets, + 'IOS': iOS.targets, + 'IOS_SIMULATOR': iOSSimulator.targets, + 'TVOS': AppleTV.targets, + 'TVOS_SIMULATOR': AppleTVSimulator.targets, + 'WATCHOS': AppleWatch.targets, + 'WATCHOS_SIMULATOR': AppleWatchSimulator.targets, + } + @staticmethod def host_target(): """ @@ -315,6 +325,14 @@ def get_target_names(cls): return sorted([name for (name, target) in cls._targets_by_name.items()]) + @classmethod + def get_migrated_targets_for_sdk(cls, sdk_name): + return cls._sdk_targets.get(sdk_name, None) + + @classmethod + def get_all_migrated_sdks(cls): + return cls._sdk_targets.keys() + def install_prefix(): """ diff --git a/utils/webassembly/build-presets.ini b/utils/webassembly/build-presets.ini index b09672c06c86d..267f48c162e2a 100644 --- a/utils/webassembly/build-presets.ini +++ b/utils/webassembly/build-presets.ini @@ -9,6 +9,7 @@ skip-build-benchmarks llvm-targets-to-build=X86;AArch64;WebAssembly swift-darwin-supported-archs=x86_64 compiler-vendor=swiftwasm +enable-experimental-concurrency=0 [preset: webassembly-install] diff --git a/validation-test/BuildSystem/dsymutil_jobs.test b/validation-test/BuildSystem/dsymutil_jobs.test new file mode 100644 index 0000000000000..d20190363a36f --- /dev/null +++ b/validation-test/BuildSystem/dsymutil_jobs.test @@ -0,0 +1,10 @@ +# RUN: %empty-directory(%t) +# RUN: mkdir -p %t +# RUN: SKIP_XCODE_VERSION_CHECK=1 SWIFT_BUILD_ROOT=%t %swift_src_root/utils/build-script --dry-run --darwin-install-extract-symbols --dsymutil-jobs 5 --cmake %cmake 2>&1 | %FileCheck %s + +# REQUIRES: standalone_build,OS=macosx + +# CHECK: --- Extracting symbols --- +# CHECK: { "command": "dsymutil", "start": " +# CHECK-NEXT: xargs -n 1 -P 5 dsymutil +# CHECK-NEXT: { "command": "dsymutil", "end": " diff --git a/validation-test/SILOptimizer/large_nested_array.swift.gyb b/validation-test/SILOptimizer/large_nested_array.swift.gyb new file mode 100644 index 0000000000000..58160b576fb13 --- /dev/null +++ b/validation-test/SILOptimizer/large_nested_array.swift.gyb @@ -0,0 +1,27 @@ +// RUN: %empty-directory(%t) +// RUN: %gyb %s > %t/main.swift + +// The compiler should finish in less than 2 minautes. To give some slack, +// specify a timeout of 4 minutes. +// If the compiler needs more than 5 minutes, there is probably a real problem. +// So please don't just increase the timeout in case this fails. + +// RUN: %{python} %S/../../test/Inputs/timeout.py 240 %target-swift-frontend -O -parse-as-library -sil-verify-none -c %t/main.swift -o %t/main.o + +// REQUIRES: swift_stdlib_no_asserts,optimized_stdlib +// REQUIRES: long_test +// REQUIRES: CPU=arm64 || CPU=x86_64 + +public struct TestStruct { + public static var a: [[Int]] { + var a: [[Int]] = Array(repeating: Array(repeating: 0, count: 4), count: 2000) + +% for i in range(2000): + a[${i}] = [${i * 4}, ${i * 4 + 1}, ${i * 4 + 2}, ${i * 4 + 3}] +% end + + return a + } +} + + diff --git a/validation-test/StdlibUnittest/ChildProcessShutdown/FailIfChildCrashesDuringShutdown.swift b/validation-test/StdlibUnittest/ChildProcessShutdown/FailIfChildCrashesDuringShutdown.swift index 024a88d7f3703..b596a2015b9b3 100644 --- a/validation-test/StdlibUnittest/ChildProcessShutdown/FailIfChildCrashesDuringShutdown.swift +++ b/validation-test/StdlibUnittest/ChildProcessShutdown/FailIfChildCrashesDuringShutdown.swift @@ -32,7 +32,7 @@ TestSuiteChildCrashes.test("passes") { // CHECK: [ RUN ] TestSuiteChildCrashes.passes // CHECK: [ OK ] TestSuiteChildCrashes.passes // CHECK: TestSuiteChildCrashes: All tests passed -// CHECK: stderr>>> Fatal error: Crash at exit: +// CHECK: stderr>>> {{.*}}Fatal error: Crash at exit // CHECK: stderr>>> CRASHED: SIG // CHECK: The child process failed during shutdown, aborting. // CHECK: abort() diff --git a/validation-test/StdlibUnittest/CrashingTests.swift b/validation-test/StdlibUnittest/CrashingTests.swift index 8520f18ef85b6..d574965da64c5 100644 --- a/validation-test/StdlibUnittest/CrashingTests.swift +++ b/validation-test/StdlibUnittest/CrashingTests.swift @@ -40,7 +40,7 @@ TestSuiteCrashes.test("crashesUnexpectedly1") { fatalErrorWithDelayIfNeeded("This should crash") } // CHECK: stdout>>> crashesUnexpectedly1 -// CHECK: stderr>>> Fatal error: This should crash: +// CHECK: stderr>>> {{.*}}Fatal error: This should crash // CHECK: stderr>>> CRASHED: SIG // CHECK: [ FAIL ] TestSuiteCrashes.crashesUnexpectedly1 @@ -64,7 +64,7 @@ TestSuiteCrashes.test("crashesUnexpectedly2") { fatalErrorWithDelayIfNeeded("This should crash") } // CHECK: stdout>>> crashesUnexpectedly2 -// CHECK: stderr>>> Fatal error: This should crash: +// CHECK: stderr>>> {{.*}}Fatal error: This should crash // CHECK: stderr>>> CRASHED: SIG // CHECK: [ FAIL ] TestSuiteCrashes.crashesUnexpectedly2 @@ -89,7 +89,7 @@ TestSuiteCrashes.test("crashesAsExpected1") { fatalErrorWithDelayIfNeeded("This should crash") } // CHECK: stdout>>> crashesAsExpected1 -// CHECK: stderr>>> Fatal error: This should crash: +// CHECK: stderr>>> {{.*}}Fatal error: This should crash // CHECK: stderr>>> OK: saw expected "crashed: sig // CHECK: [ OK ] TestSuiteCrashes.crashesAsExpected1 @@ -114,7 +114,7 @@ TestSuiteCrashes.test("crashesUnexpectedlyXfail") fatalErrorWithDelayIfNeeded("This should crash") } // CHECK: stdout>>> crashesUnexpectedlyXfail -// CHECK: stderr>>> Fatal error: This should crash: +// CHECK: stderr>>> {{.*}}Fatal error: This should crash // CHECK: stderr>>> CRASHED: SIG // CHECK: [ XFAIL ] TestSuiteCrashes.crashesUnexpectedlyXfail @@ -125,7 +125,7 @@ TestSuiteCrashes.test("crashesAsExpectedXfail") fatalErrorWithDelayIfNeeded("This should crash") } // CHECK: stdout>>> crashesAsExpectedXfail -// CHECK: stderr>>> Fatal error: This should crash: +// CHECK: stderr>>> {{.*}}Fatal error: This should crash // CHECK: stderr>>> OK: saw expected "crashed: sig // CHECK: [ UXPASS ] TestSuiteCrashes.crashesAsExpectedXfail @@ -136,7 +136,7 @@ TestSuiteCrashes.test("crashesWithMessagePasses") fatalErrorWithDelayIfNeeded("This should crash") } // CHECK: stdout>>> abcd -// CHECK: stderr>>> Fatal error: This should crash: +// CHECK: stderr>>> {{.*}}Fatal error: This should crash // CHECK: stderr>>> OK: saw expected "crashed: sig // CHECK: [ OK ] TestSuiteCrashes.crashesWithMessagePasses @@ -147,7 +147,7 @@ TestSuiteCrashes.test("crashesWithMessageFails") fatalErrorWithDelayIfNeeded("unexpected message") } // CHECK: stdout>>> This should crash -// CHECK: stderr>>> Fatal error: unexpected message: +// CHECK: stderr>>> {{.*}}Fatal error: unexpected message // CHECK: stderr>>> OK: saw expected "crashed: sig // CHECK: did not find expected string after crash: "This should crash" // CHECK: [ FAIL ] TestSuiteCrashes.crashesWithMessageFails @@ -162,7 +162,7 @@ TestSuiteCrashes.test("crashesWithMultipleMessagesPasses") fatalErrorWithDelayIfNeeded("This should crash and your little dog too") } // CHECK: stdout>>> abcd -// CHECK: stderr>>> Fatal error: This should crash and your little dog too: +// CHECK: stderr>>> {{.*}}Fatal error: This should crash and your little dog too // CHECK: stderr>>> OK: saw expected "crashed: sig // CHECK: [ OK ] TestSuiteCrashes.crashesWithMultipleMessagesPasses @@ -177,7 +177,7 @@ TestSuiteCrashes.test("crashesWithMultipleMessagesFails") fatalErrorWithDelayIfNeeded("unexpected message and your little dog too") } // CHECK: stdout>>> This should crash -// CHECK: stderr>>> Fatal error: unexpected message and your little dog too: +// CHECK: stderr>>> {{.*}}Fatal error: unexpected message and your little dog too // CHECK: stderr>>> OK: saw expected "crashed: sig // CHECK: did not find expected string after crash: "This should crash" // CHECK: did not find expected string after crash: "big dog" diff --git a/validation-test/compiler_crashers_2_fixed/rdar71816041.swift b/validation-test/compiler_crashers_2_fixed/rdar71816041.swift new file mode 100644 index 0000000000000..928b84c2e37b5 --- /dev/null +++ b/validation-test/compiler_crashers_2_fixed/rdar71816041.swift @@ -0,0 +1,9 @@ +// RUN: %target-swift-frontend -emit-ir -primary-file %s -enable-experimental-concurrency +// REQUIRES: concurrency + +func getIntAndString() async -> (Int, String) { (5, "1") } + +func testDecompose() async -> Int { + async let (i, s) = await getIntAndString() + return await i +} diff --git a/validation-test/stdlib/Dictionary.swift b/validation-test/stdlib/Dictionary.swift index e1c8f60b68005..14da61e731d1a 100644 --- a/validation-test/stdlib/Dictionary.swift +++ b/validation-test/stdlib/Dictionary.swift @@ -7,6 +7,10 @@ // RUN: %target-codesign %t/Dictionary && %line-directive %t/main.swift -- %target-run %t/Dictionary // REQUIRES: executable_test +// rdar71933996 +// UNSUPPORTED: swift_test_mode_optimize +// UNSUPPORTED: swift_test_mode_optimize_size + import StdlibUnittest import StdlibCollectionUnittest