diff --git a/README.md b/README.md index 90af7937eff91..1defcd009a1bf 100644 --- a/README.md +++ b/README.md @@ -151,8 +151,8 @@ Below is more information about TensorFlow-related build arguments. * Default value: None. * `tensorflow-swift-apis`: A path to the [tensorflow/swift-apis](https://github.com/tensorflow/swift-apis) deep learning library repository. * Default value: `tensorflow-swift-apis` if the [tensorflow/swift-apis](https://github.com/tensorflow/swift-apis) repository is cloned. Otherwise, none. -* `tensorflow-swift-bindings`: A generated TensorFlow Swift bindings file (`RawOpsGenerated.swift`) obtained from [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings). - * Default value: `tensorflow-swift-bindings/RawOpsGenerated.swift` if the [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings) repository is cloned. Otherwise, none. +* `tensorflow-swift-bindings`: A path to the [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings) repository. + * Default value: `tensorflow-swift-bindings` if the [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings) repository is cloned. Otherwise, none. ### Build systems diff --git a/cmake/modules/SwiftSource.cmake b/cmake/modules/SwiftSource.cmake index a7fd7ec02eac1..208ab905b2bbc 100644 --- a/cmake/modules/SwiftSource.cmake +++ b/cmake/modules/SwiftSource.cmake @@ -240,7 +240,8 @@ function(_compile_swift_files # Also, disable it for DifferentiationUnittest because resilience changes # the AD code # that gets generated (leading to additional leaks) # (see: TF-328) - if(NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "TensorFlow" AND + if(NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "TensorFlowCore" AND + NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "TensorFlow" AND NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "DifferentiationUnittest") list(APPEND swift_flags "-Xfrontend" "-enable-resilience") endif() diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index 8ad2d6f4ab31b..403c21518da4e 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -481,16 +481,16 @@ class ASTContext final { CanType getAnyObjectType() const; // SWIFT_ENABLE_TENSORFLOW - /// Retrieve the decl for TensorFlow.TensorHandle iff the TensorFlow module - /// has been imported. Otherwise, this returns null. + /// Retrieve the decl for TensorFlowCore.TensorHandle iff the TensorFlowCore + /// module has been imported. Otherwise, this returns null. ClassDecl *getTensorHandleDecl() const; - /// Retrieve the decl for TensorFlow.TensorShape iff the TensorFlow module - /// has been imported. Otherwise, this returns null. + /// Retrieve the decl for TensorFlowCore.TensorShape iff the TensorFlowCore + /// module has been imported. Otherwise, this returns null. StructDecl *getTensorShapeDecl() const; - /// Retrieve the decl for TensorFlow.TensorDataType iff the TensorFlow module - /// has been imported. Otherwise, this returns null. + /// Retrieve the decl for TensorFlowCore.TensorDataType iff the TensorFlowCore + /// module has been imported. Otherwise, this returns null. StructDecl *getTensorDataTypeDecl() const; /// Retrieve the type for Swift._AutoDiffTape. diff --git a/include/swift/AST/KnownIdentifiers.def b/include/swift/AST/KnownIdentifiers.def index 949e9f55281ba..d1f7062ef494d 100644 --- a/include/swift/AST/KnownIdentifiers.def +++ b/include/swift/AST/KnownIdentifiers.def @@ -121,6 +121,7 @@ IDENTIFIER(withArguments) IDENTIFIER(withKeywordArguments) // SWIFT_ENABLE_TENSORFLOW +IDENTIFIER(TensorFlowCore) IDENTIFIER(TensorFlow) // KeyPathIterable IDENTIFIER(AllKeyPaths) diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index 5f90215d026e1..4590484cfefec 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -821,14 +821,14 @@ CanType ASTContext::getAnyObjectType() const { } // SWIFT_ENABLE_TENSORFLOW -/// Retrieve the decl for TensorFlow.TensorHandle iff the TensorFlow module has -/// been imported. Otherwise, this returns null. +/// Retrieve the decl for TensorFlowCore.TensorHandle iff the TensorFlow module +/// has been imported. Otherwise, this returns null. ClassDecl *ASTContext::getTensorHandleDecl() const { if (getImpl().TensorHandleDecl) return getImpl().TensorHandleDecl; // See if the TensorFlow module was imported. If not, return null. - auto tfModule = getLoadedModule(Id_TensorFlow); + auto tfModule = getLoadedModule(Id_TensorFlowCore); if (!tfModule) return nullptr; @@ -842,14 +842,14 @@ ClassDecl *ASTContext::getTensorHandleDecl() const { return nullptr; } -/// Retrieve the decl for TensorFlow.TensorShape iff the TensorFlow module has -/// been imported. Otherwise, this returns null. +/// Retrieve the decl for TensorFlowCore.TensorShape iff the TensorFlow module +/// has been imported. Otherwise, this returns null. StructDecl *ASTContext::getTensorShapeDecl() const { if (getImpl().TensorShapeDecl) return getImpl().TensorShapeDecl; // See if the TensorFlow module was imported. If not, return null. - auto tfModule = getLoadedModule(Id_TensorFlow); + auto tfModule = getLoadedModule(Id_TensorFlowCore); if (!tfModule) return nullptr; @@ -863,14 +863,14 @@ StructDecl *ASTContext::getTensorShapeDecl() const { return nullptr; } -/// Retrieve the decl for TensorFlow.TensorDataType iff the TensorFlow module has -/// been imported. Otherwise, this returns null. +/// Retrieve the decl for TensorFlowCore.TensorDataType iff the TensorFlow +/// module has been imported. Otherwise, this returns null. StructDecl *ASTContext::getTensorDataTypeDecl() const { if (getImpl().TensorDataTypeDecl) return getImpl().TensorDataTypeDecl; // See if the TensorFlow module was imported. If not, return null. - auto tfModule = getLoadedModule(Id_TensorFlow); + auto tfModule = getLoadedModule(Id_TensorFlowCore); if (!tfModule) return nullptr; @@ -987,7 +987,7 @@ ProtocolDecl *ASTContext::getProtocol(KnownProtocolKind kind) const { case KnownProtocolKind::TensorFlowDataTypeCompatible: case KnownProtocolKind::TensorSendableReceivable: case KnownProtocolKind::TensorProtocol: - M = getLoadedModule(Id_TensorFlow); + M = getLoadedModule(Id_TensorFlowCore); break; default: M = getStdlibModule(); @@ -1886,7 +1886,7 @@ ASTContext::getModule(ArrayRef> ModulePath) { (ModulePath[0].first == StdlibModuleName || ModulePath[0].first == Id_Foundation || // SWIFT_ENABLE_TENSORFLOW - ModulePath[0].first == Id_TensorFlow)) + ModulePath[0].first == Id_TensorFlowCore)) recordKnownProtocols(M); return M; } diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index eca280d5a25f1..21432012fca76 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -2007,7 +2007,7 @@ void IRGenSILFunction::visitGraphOperationInst(GraphOperationInst *i) { tf::GraphOperationInfo opInfo(i); // TODO: As an optimization, do this lookup once per CurSILFn - auto tfModule = astCtx.getLoadedModule(astCtx.Id_TensorFlow); + auto tfModule = astCtx.getLoadedModule(astCtx.Id_TensorFlowCore); assert(tfModule && "could not find TensorFlow module"); auto inputTensorGroupProto = astCtx.getProtocol(KnownProtocolKind::TensorArrayProtocol); diff --git a/lib/SIL/SILFunctionBuilder.cpp b/lib/SIL/SILFunctionBuilder.cpp index f9b8f4fda3aa1..0d8a4b84c56c6 100644 --- a/lib/SIL/SILFunctionBuilder.cpp +++ b/lib/SIL/SILFunctionBuilder.cpp @@ -87,6 +87,8 @@ void SILFunctionBuilder::addFunctionAttributes(SILFunction *F, vjpName = SILDeclRef(vjpFn).mangle(); // Get lowered argument indices. auto paramIndices = A->getParameterIndices(); + if (paramIndices == nullptr) + continue; auto loweredParamIndices = paramIndices->getLowered( decl->getInterfaceType()->castTo()); SILAutoDiffIndices indices(/*source*/ 0, loweredParamIndices); diff --git a/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp b/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp index 03100dba375e0..e6b565a8c9c49 100644 --- a/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp +++ b/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp @@ -2622,7 +2622,9 @@ void TFDeabstractionPass::run() { // If the TensorFlow module hasn't been imported by the program, don't do // anything. This avoids impacting compile time for non-TensorFlow using // Swift programs by doing extraneous analysis. - auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); + auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlowCore); + if (!tfModule) + tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); if (!tfModule) return; @@ -2634,7 +2636,8 @@ void TFDeabstractionPass::run() { // TODO: Rework the heuristics in inlineCalls() to be smarter. In an ideal // world, we would be lazy about inlining, and only inline calls due to actual // inter-op value uses. - if (module->getSwiftModule() == tfModule) + if (module->getSwiftModule() == ctx.getLoadedModule(ctx.Id_TensorFlowCore) || + module->getSwiftModule() == ctx.getLoadedModule(ctx.Id_TensorFlow)) return; TensorFunctionClassifier tfc; diff --git a/lib/SILOptimizer/Mandatory/TFPartition.cpp b/lib/SILOptimizer/Mandatory/TFPartition.cpp index b5653c090acf3..7d46c1c94d3b3 100644 --- a/lib/SILOptimizer/Mandatory/TFPartition.cpp +++ b/lib/SILOptimizer/Mandatory/TFPartition.cpp @@ -4482,7 +4482,9 @@ void TFPartition::run() { // If the TensorFlow module hasn't been imported by the program, don't do // anything. This avoids impacting compile time for non-TensorFlow using // Swift programs by doing extraneous analysis. - tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); + tfModule = ctx.getLoadedModule(ctx.Id_TensorFlowCore); + if (!tfModule) + tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); if (!tfModule) return; diff --git a/lib/SILOptimizer/PassManager/Passes.cpp b/lib/SILOptimizer/PassManager/Passes.cpp index af9dae2571d6e..4788da8f2d44f 100644 --- a/lib/SILOptimizer/PassManager/Passes.cpp +++ b/lib/SILOptimizer/PassManager/Passes.cpp @@ -144,7 +144,6 @@ void swift::runSILTFPartitionPass(SILModule &Module) { // Verify the module, if required. if (Module.getOptions().VerifyAll) Module.verify(); - } void swift::runSILOptimizationPassesWithFileSpecification(SILModule &M, diff --git a/lib/Sema/CSApply.cpp b/lib/Sema/CSApply.cpp index 9587140ce8670..4ba7e12dca696 100644 --- a/lib/Sema/CSApply.cpp +++ b/lib/Sema/CSApply.cpp @@ -2424,8 +2424,8 @@ namespace { // The result type must conform to TensorGroup or be a tuple of types that // conform to TensorGroup. - auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); - assert(tfModule && "could not find TensorFlow module"); + auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlowCore); + assert(tfModule && "could not find TensorFlowCore module"); auto tensorGroupProto = ctx.getProtocol(KnownProtocolKind::TensorGroup); assert(tensorGroupProto && "could not find TensorGroup protocol"); diff --git a/stdlib/public/CMakeLists.txt b/stdlib/public/CMakeLists.txt index 76ed423f4b6d7..b7e287a30b17f 100644 --- a/stdlib/public/CMakeLists.txt +++ b/stdlib/public/CMakeLists.txt @@ -69,6 +69,7 @@ endif() if(SWIFT_BUILD_STDLIB AND SWIFT_ENABLE_TENSORFLOW) # TODO: Add TensorFlow support for iOS/Raspberry Pi. add_subdirectory(CTensorFlow) + add_subdirectory(TensorFlowCore) add_subdirectory(TensorFlow) endif() diff --git a/stdlib/public/TensorFlow/ArrayOps.swift b/stdlib/public/TensorFlow/ArrayOps.swift deleted file mode 100644 index 619d2ad539171..0000000000000 --- a/stdlib/public/TensorFlow/ArrayOps.swift +++ /dev/null @@ -1,199 +0,0 @@ -//===-- ArrayOps.swift ----------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains some Array ops that cannot be properly handled by #tfop. -// -// TODO: These should be deleted once we can properly generate raw ops for these. -// -//===----------------------------------------------------------------------===// - -import CTensorFlow - -public extension Raw { - /// Saves tensors in V2 checkpoint format. - /// - /// By default, saves the named tensors in full. If the caller wishes to save - /// specific slices of full tensors, "shape_and_slices" should be non-empty strings - /// and correspondingly well-formed. - /// - /// - Parameters: - /// - prefix: Must have a single element. The prefix of the V2 checkpoint to which we - /// write the tensors. - /// - tensor_names: shape {N}. The names of the tensors to be saved. - /// - shape_and_slices: shape {N}. The slice specs of the tensors to be saved. - /// Empty strings indicate that they are non-partitioned tensors. - /// - tensors: `N` tensors to save. - @inlinable @inline(__always) - static func saveV2( - prefix: StringTensor, - tensorNames: StringTensor, - shapeAndSlices: StringTensor, - tensors: [AnyTensor] - ) { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "SaveV2", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, prefix, s) - let _ = _TFCOpAddInputFromTensorGroup(op, tensorNames, s) - let _ = _TFCOpAddInputFromTensorGroup(op, shapeAndSlices, s) - let _ = _TFCOpAddInputFromAnyTensors(op, tensors, s) - let _ = _TFCOpSetAttrTypeArray(op, "dtypes", tensors.map { $0._tensorFlowDataType }) - return _TFCExecuteOp(op, s) - } - - /// Restores tensors from a V2 checkpoint. - /// - /// For backward compatibility with the V1 format, this Op currently allows - /// restoring from a V1 checkpoint as well: - /// - This Op first attempts to find the V2 index file pointed to by "prefix", and - /// if found proceed to read it as a V2 checkpoint; - /// - Otherwise the V1 read path is invoked. - /// Relying on this behavior is not recommended, as the ability to fall back to read - /// V1 might be deprecated and eventually removed. - /// - /// By default, restores the named tensors in full. If the caller wishes to restore - /// specific slices of stored tensors, "shape_and_slices" should be non-empty - /// strings and correspondingly well-formed. - /// - /// Callers must ensure all the named tensors are indeed stored in the checkpoint. - /// - /// - Parameters: - /// - prefix: Must have a single element. The prefix of a V2 checkpoint. - /// - tensor_names: shape {N}. The names of the tensors to be restored. - /// - shape_and_slices: shape {N}. The slice specs of the tensors to be restored. - /// Empty strings indicate that they are non-partitioned tensors. - /// - /// - Attr dtypes: shape {N}. The list of expected dtype for the tensors. Must match - /// those stored in the checkpoint. - /// - /// - Output tensors: shape {N}. The restored tensors, whose shapes are read from the - /// checkpoint directly. - @inlinable @inline(__always) - static func restoreV2( - prefix: StringTensor, - tensorNames: StringTensor, - shapeAndSlices: StringTensor, - dtypes: [TensorDataType] - ) -> [AnyTensor] { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "RestoreV2", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, prefix, s) - let _ = _TFCOpAddInputFromTensorGroup(op, tensorNames, s) - let _ = _TFCOpAddInputFromTensorGroup(op, shapeAndSlices, s) - let _ = _TFCOpSetAttrTypeArray(op, "dtypes", dtypes) - - var count: Int32 = Int32(dtypes.count) - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) - - var out: [AnyTensor] = [] - var cursor = buffer - for type in dtypes { - out.append(makeTensor(dataType: type, owning: cursor.pointee)) - cursor = cursor.advanced(by: 1) - } - return out - } - - /// Splits a tensor into `numSplit` tensors along one dimension. - /// - /// - Parameters: - /// - splitDim: 0-D. The dimension along which to split. Must be in the range - /// `[-rank(value), rank(value))`. - /// - value: The tensor to split. - /// - numSplit: The number of splits to create. - /// - /// - Returns: Tensors whose shape matches that of `value` - /// except along `axis`, where their sizes are - /// `value.shape[axis] / numSplit`. - @inlinable @inline(__always) - static func split( - splitDim: Tensor, - value: Tensor, - numSplit: Int64 - ) -> [Tensor] { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "Split", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, splitDim, s) - let _ = _TFCOpAddInputFromTensorGroup(op, value, s) - TFE_OpSetAttrInt(op, "num_split", numSplit) - TFE_OpSetAttrType(op, "T", T.tensorFlowDataType._cDataType) - var count: Int32 = Int32(numSplit) - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) - - var out: [Tensor] = [] - var cursor = buffer - for _ in 0..(handle: TensorHandle(_owning: cursor.pointee))) - cursor = cursor.advanced(by: 1) - } - return out - } - - /// Splits a tensor into `numSplit` tensors along one dimension. - /// - /// - Parameters: - /// - value: The tensor to split. - /// - sizeSplits: list containing the sizes of each output tensor along the split - /// dimension. Must sum to the dimension of value along split_dim. - /// Can contain one -1 indicating that dimension is to be inferred. - /// - splitDim: 0-D. The dimension along which to split. Must be in the range - /// `[-rank(value), rank(value))`. - /// - /// - Returns: Tensors whose shape matches that of `value` - /// except along `axis`, where their sizes are - /// `size_splits[i]`. - @inlinable @inline(__always) - static func splitV( - value: Tensor, - sizeSplits: Tensor, - splitDim: Tensor, - numSplit: Int64 - ) -> [Tensor] { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "SplitV", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, value, s) - let _ = _TFCOpAddInputFromTensorGroup(op, sizeSplits, s) - let _ = _TFCOpAddInputFromTensorGroup(op, splitDim, s) - TFE_OpSetAttrInt(op, "num_split", numSplit) - TFE_OpSetAttrType(op, "T", T.tensorFlowDataType._cDataType) - TFE_OpSetAttrType(op, "Tlen", Tlen.tensorFlowDataType._cDataType) - var count: Int32 = Int32(numSplit) - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) - - var out: [Tensor] = [] - var cursor = buffer - for _ in 0..(handle: TensorHandle(_owning: cursor.pointee))) - cursor = cursor.advanced(by: 1) - } - return out - } -} diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index e3bb11fff4ceb..0c0d2f63d24c2 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -28,35 +28,17 @@ list(APPEND swift_stdlib_compile_flags "-Xllvm" "-sil-partial-specialization") list(APPEND swift_stdlib_compile_flags "-Xfrontend" "-enable-sil-ownership") list(APPEND swift_stdlib_compile_flags "-force-single-frontend-invocation") # FIXME(SR-7972): Some tests fail when TensorFlow is optimized. +# list(APPEND swift_stdlib_compile_flags "-O" "-whole-module-optimization") list(APPEND swift_stdlib_compile_flags "-Onone") list(APPEND swift_stdlib_compile_flags "-DCOMPILING_TENSORFLOW_MODULE") -set(SOURCES - CompilerRuntime.swift - CompositeMath.swift - Dataset.swift - DataTypes.swift - Execution.swift - Gradients.swift - Ops.swift - ShapedArray.swift - StringOps.swift - StringTensor.swift - Tensor.swift - TensorGroup.swift - TensorHandle.swift - TensorProtocol.swift - TensorShape.swift - Utilities.swift - ArrayOps.swift - Threading.swift - ExecuteOp.swift.gyb - # NumPy bridging for `ShapedArray` and `Tensor`. - PythonConversion.swift) +set(SOURCES "") -# Copy TensorFlow bindings file, if it exists. +# Copy TensorFlow bindings sources, if they exist. if (TENSORFLOW_SWIFT_BINDINGS) - list(APPEND SOURCES "${TENSORFLOW_SWIFT_BINDINGS}") + file(GLOB_RECURSE TENSORFLOW_SWIFT_BINDINGS_SOURCES + "${TENSORFLOW_SWIFT_BINDINGS}/*.swift") + list(APPEND SOURCES "${TENSORFLOW_SWIFT_BINDINGS_SOURCES}") endif() # Copy TensorFlow high-level API sources, if they exist. @@ -80,6 +62,7 @@ add_swift_target_library(swiftTensorFlow ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} IS_ INCORPORATE_OBJECT_LIBRARIES swiftCTensorFlow TARGET_SDKS OSX LINUX PRIVATE_LINK_LIBRARIES "${TF_LIBRARIES}" + SWIFT_MODULE_DEPENDS TensorFlowCore SWIFT_MODULE_DEPENDS SwiftOnoneSupport SWIFT_MODULE_DEPENDS_IOS Darwin SWIFT_MODULE_DEPENDS_OSX Darwin diff --git a/stdlib/public/TensorFlow/CompositeMath.swift b/stdlib/public/TensorFlow/CompositeMath.swift deleted file mode 100644 index 6a1541536eada..0000000000000 --- a/stdlib/public/TensorFlow/CompositeMath.swift +++ /dev/null @@ -1,51 +0,0 @@ -//===-- CompositeMath.swift -----------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains composite math functions. Functions in this file are -// defined in terms of core ops that are differentiable, and therefore do not -// need custom gradients. -// -//===----------------------------------------------------------------------===// - -/// Computes `sigmoid` of the specified tensor element-wise. -/// Specifically, computes `1 / (1 + exp(-x))`. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSigmoid(_:) where T : TensorFlowFloatingPoint) -public func sigmoid(_ x: Tensor) -> Tensor { - return Raw.sigmoid(x) -} - -/// Computes `relu` of the specified tensor element-wise. -/// Specifically, computes `max(0, x)`. -@inlinable @inline(__always) -@differentiable(vjp: _vjpRelu(_:) where T : TensorFlowFloatingPoint) -public func relu(_ x: Tensor) -> Tensor { - return max(0, x) -} - -/// Computes the softmax of the specified tensor along the last axis. -/// Specifically, computes `exp(x) / exp(x).sum(alongAxes: -1)`. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSoftmax(_:) where T : TensorFlowFloatingPoint) -public func softmax(_ x: Tensor) -> Tensor { - return Raw.softmax(logits: x) -} - -/// Computes the softmax of the specified tensor along the specified axis. -/// Specifically, computes `exp(x) / exp(x).sum(alongAxes: axis)`. -@inlinable @inline(__always) -public func softmax( - _ x: Tensor, alongAxis axis: Int -) -> Tensor { - let expx = exp(x) - return expx / expx.sum(alongAxes: axis) -} diff --git a/stdlib/public/TensorFlow/Dataset.swift b/stdlib/public/TensorFlow/Dataset.swift deleted file mode 100644 index 568f386d00f42..0000000000000 --- a/stdlib/public/TensorFlow/Dataset.swift +++ /dev/null @@ -1,211 +0,0 @@ -//===-- Dataset.swift -----------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// The dataset API. -// -//===----------------------------------------------------------------------===// - -/// The default graph seed. -/// -/// - Note: See TensorFlow's `python.framework.random_seed.DEFAULT_GRAPH_SEED`. -@usableFromInline let _defaultGraphSeed: Int64 = 87654321 - -/// Returns the local seeds an operation should use given an op-specific seed. -/// -/// Given operation-specific seed, `seed`, this helper function returns two -/// seeds derived from graph-level and op-level seeds. Many random operations -/// internally use the two seeds to allow user to change the seed globally for a -/// graph, or for only specific operations. -/// -/// - Note: See TensorFlow's `python.framework.random_seed.get_seed`. -/// -// TODO: There's no support for TF's "global seed" yet, so we always use the -// default graph seed as the first seed. Need to investigate the best way to -// model TF's "global seed". -@usableFromInline @inline(__always) -func _tensorSeeds(_ seed: Tensor) -> (Tensor, Tensor) { - return (Tensor(_defaultGraphSeed), seed) -} - -//===----------------------------------------------------------------------===// -// Single value dataset -//===----------------------------------------------------------------------===// - -/// Represents a potentially large set of elements. -/// -/// A `Dataset` can be used to represent an input pipeline as a collection of -/// element tensors. -@_fixed_layout -public struct Dataset { - public let _handle: VariantHandle - - @inlinable @inline(__always) - public init(_handle: VariantHandle) { - self._handle = _handle - } -} - -public extension Dataset { - @inlinable @inline(__always) - init(randomSeed: Int64) { - let (seed1, seed2) = _tensorSeeds(Tensor(randomSeed)) - self.init( - _handle: #tfop("RandomDataset", seed1, seed2, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) - ) - } -} - -public extension Dataset { - /// Creates a dataset from a batch of elements as a tensor. - @inlinable @inline(__always) - init(elements: Element) { - // A dataset creation op only runs on TF CPU. - self.init( - _handle: #tfop( - "TensorSliceDataset", [elements], - Toutput_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } -} - -extension Dataset : Sequence { - public typealias Iterator = DatasetIterator - - /// Returns an iterator over the elements of this dataset. - @inlinable @inline(__always) - public func makeIterator() -> DatasetIterator { - let resource: ResourceHandle = - #tfop("AnonymousIterator", output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) - #tfop("MakeIterator", _handle, resource) as Void - return DatasetIterator(_handle: resource) - } -} - -public extension Dataset { - // Note that this Dataset API implementation uses an experimental tracing - // feature, which is not robust and does not have great diagnostics yet. - @inlinable @inline(__always) - func map( - _ transform: (Element) -> ResultElement - ) -> Dataset { - return Dataset( - _handle: #tfop( - "MapDataset", _handle, [Tensor(0)], - f$func: _tffunc(transform), - Targuments$dtype: [Int32.tensorFlowDataType], - output_types$dtype: ResultElement._typeList, - output_shapes: ResultElement._unknownShapeList - ) - ) - } - - @inlinable @inline(__always) - func filter( - _ isIncluded: (Element) -> Tensor - ) -> Dataset { - return Dataset( - _handle: #tfop( - "FilterDataset", _handle, [Tensor(0)], - predicate$func: _tffunc(isIncluded), - Targuments$dtype: [Int32.tensorFlowDataType], - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } -} - -public extension Dataset { - @inlinable @inline(__always) - func shuffled( - sampleCount: Int, randomSeed: Int64 - ) -> Dataset { - let (seed1, seed2) = _tensorSeeds(Tensor(randomSeed)) - return Dataset( - _handle: #tfop( - "ShuffleDataset", _handle, Tensor(Int64(sampleCount)), seed1, seed2, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } - - @inlinable @inline(__always) - func batched(_ batchSize: Int) -> Dataset { - return Dataset( - _handle: #tfop( - "BatchDataset", _handle, Tensor(Int64(batchSize)), - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } -} - -/// The type that allows iteration over a dataset's elements. -@_fixed_layout -public struct DatasetIterator { - @usableFromInline let _handle: ResourceHandle - - @usableFromInline @inline(__always) - internal init(_handle: ResourceHandle) { - self._handle = _handle - } -} - -extension DatasetIterator : IteratorProtocol { - /// Advances to the next element and returns it, or `nil` if no next element - /// exists. - @inlinable @inline(__always) - public mutating func next() -> Element? { - let optional: VariantHandle = - #tfop("IteratorGetNextAsOptional", _handle, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) - guard _TFGetScalarOrDie(#tfop("OptionalHasValue", optional)) else { - return nil - } - return #tfop("OptionalGetValue", optional, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) as Element - } -} - -/// A 2-tuple-like struct that conforms to TensorGroup that represents a tuple -/// of 2 types conforming to TensorGroup. -@_fixed_layout -public struct Zip2TensorGroup : TensorGroup { - public var first: T - public var second: U - - public init(_ first: T, _ second: U) { - self.first = first - self.second = second - } -} - -// TODO(SR-9156): This does not work in graph mode. -@inlinable @inline(__always) -public func zip( - _ dataset1: Dataset, _ dataset2: Dataset -) -> Dataset> { - let handle: VariantHandle = #tfop( - "ZipDataset", Zip2TensorGroup(dataset1._handle, dataset2._handle), - output_types$dtype: Zip2TensorGroup._typeList, - output_shapes: Zip2TensorGroup._unknownShapeList) - return Dataset(_handle: handle) -} diff --git a/stdlib/public/TensorFlow/ExecuteOp.swift.gyb b/stdlib/public/TensorFlow/ExecuteOp.swift.gyb deleted file mode 100644 index 92e04ed5319f9..0000000000000 --- a/stdlib/public/TensorFlow/ExecuteOp.swift.gyb +++ /dev/null @@ -1,48 +0,0 @@ -//===-- ExecuteOp.swift.gyb -----------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains _TFCExecuteOp which allows dispatching an op and -// returning an arbitrary set of tensor-groups. -// -// TODO: A nice wrapper for TFEOp could possibly make this simpler to use. This -// may need to be extended in order to work with multiple tfops. -// -//===----------------------------------------------------------------------===// - -@usableFromInline -func _TFCExecuteOp(_ op: CTFEOp, _ s: CTFStatus) { - var count: Int32 = 0 - var unused: CTensorHandle? - _TFCEagerExecute(op, &unused, &count, s) - checkOk(s) -} - -%for n in range(1, 11): -// Calls _TFCEagerExecute under the hood and unpacks into TensorGroup conforming -// types. -@usableFromInline -func _TFCExecuteOp<${", ".join(["T" + str(i) + " : TensorGroup" for i in range(n)])}> - (_ op: CTFEOp, _ s: CTFStatus) - -> (${", ".join(["T" + str(i) for i in range(n)])}) { - - var count: Int32 = ${" + ".join(["T" + str(i) + "._tensorHandleCount" for i in range(n)])} - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) -%for i in range(n): -let off${i}: Int32 = ${"0" if i == 0 else "off" + str(i - 1) + " + T" + str(i - 1) + "._tensorHandleCount"} -%end - return (${", ".join(["T" + str(i) + ".init(_owning: buffer.advanced(by: Int(off" + str(i) + ")))" for i in range(n)])}) -} -%end diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift deleted file mode 100644 index fee63b347af06..0000000000000 --- a/stdlib/public/TensorFlow/Gradients.swift +++ /dev/null @@ -1,636 +0,0 @@ -//===-- Gradients.swift ---------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains vector-Jacobian product (VJP) definitions for Tensor ops. -// -// Terminology: -// - originalValue (f): The function being differentiated, or the result of that -// function. -// - VJP (f'): The function as the result of differentiation, computing -// the vector-Jacobian products with respect to all arguments, or the result -// of that function. -// -// For more information, visit: -// https://en.wikipedia.org/wiki/Automatic_differentiation -// -// Every function in this file is the VJP of some corresponding function -// defined in Ops.swift, with respect to all arguments. The attribute -// '@differentiable(vjp: ...)' is used to register a function's VJP. The -// automatic differentiation pass identifies these VJPs and chains them -// together to produce arbitrary differentiable programs. -// -// NOTE: -// - Currently, we do not want to expose VJP functions to users. The name of -// each VJP function should start with an underscore. -// -// TODO: -// - Fix VJPs for broadcasting ops (need to perform reduction). -// -//===----------------------------------------------------------------------===// - -infix operator .== : ComparisonPrecedence -infix operator .> : ComparisonPrecedence - -//===----------------------------------------------------------------------===// -// Method-style differential operators -//===----------------------------------------------------------------------===// - -public extension Differentiable { - @inlinable - func gradient( - in f: @differentiable (Self) -> Tensor - ) -> CotangentVector { - return self.pullback(in: f)(Tensor(1)) - } - - @inlinable - func valueWithGradient( - in f: @differentiable (Self) -> Tensor - ) -> (value: Tensor, gradient: CotangentVector) { - let (y, pb) = self.valueWithPullback(in: f) - return (y, pb(Tensor(1))) - } - - @inlinable - func gradient( - at x: T, in f: @differentiable (Self, T) -> Tensor - ) -> (CotangentVector, T.CotangentVector) { - return self.pullback(at: x, in: f)(Tensor(1)) - } - - @inlinable - func valueWithGradient( - at x: T, in f: @differentiable (Self, T) -> Tensor - ) -> (value: Tensor, gradient: (CotangentVector, T.CotangentVector)) { - let (y, pb) = self.valueWithPullback(at: x, in: f) - return (y, pb(Tensor(1))) - } -} - -//===----------------------------------------------------------------------===// -// Free-function-style differential operators -//===----------------------------------------------------------------------===// - -// Value with gradient - -@inlinable -public func valueWithGradient( - at x: T, in f: @differentiable (T) -> Tensor -) -> (value: Tensor, gradient: T.CotangentVector) -where T : Differentiable, R : TensorFlowFloatingPoint { - let (y, pullback) = valueWithPullback(at: x, in: f) - return (y, pullback(Tensor(1))) -} - -@inlinable -public func valueWithGradient( - at x: T, _ y: U, in f: @differentiable (T, U) -> Tensor -) -> (value: Tensor, gradient: (T.CotangentVector, U.CotangentVector)) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - let (y, pullback) = valueWithPullback(at: x, y, in: f) - return (y, pullback(Tensor(1))) -} - -@inlinable -public func valueWithGradient( - at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> Tensor -) -> (value: Tensor, - gradient: (T.CotangentVector, U.CotangentVector, V.CotangentVector)) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - let (y, pullback) = valueWithPullback(at: x, y, z, in: f) - return (y, pullback(Tensor(1))) -} - -// Value with gradient (curried) - -@inlinable -public func valueWithGradient( - of f: @escaping @differentiable (T) -> Tensor -) -> (T) -> (value: Tensor, gradient: T.CotangentVector) - where T : Differentiable, R : TensorFlowFloatingPoint { - return { x in valueWithGradient(at: x, in: f) } -} - -@inlinable -public func valueWithGradient( - of f: @escaping @differentiable (T, U) -> Tensor -) -> (T, U) - -> (value: Tensor, gradient: (T.CotangentVector, U.CotangentVector)) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y in valueWithGradient(at: x, y, in: f) } -} - -@inlinable -public func valueWithGradient( - of f: @escaping @differentiable (T, U, V) -> Tensor -) -> (T, U, V) - -> (value: Tensor, - gradient: (T.CotangentVector, U.CotangentVector, V.CotangentVector)) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y, z in valueWithGradient(at: x, y, z, in: f) } -} - -// Gradient - -@inlinable -public func gradient( - at x: T, in f: @differentiable (T) -> Tensor -) -> T.CotangentVector - where T : Differentiable, R : TensorFlowFloatingPoint { - return pullback(at: x, in: f)(Tensor(1)) -} - -@inlinable -public func gradient( - at x: T, _ y: U, in f: @differentiable (T, U) -> Tensor -) -> (T.CotangentVector, U.CotangentVector) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - return pullback(at: x, y, in: f)(Tensor(1)) -} - -@inlinable -public func gradient( - at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> Tensor -) -> (T.CotangentVector, U.CotangentVector, V.CotangentVector) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - return pullback(at: x, y, z, in: f)(Tensor(1)) -} - -// Gradient (curried) - -@inlinable -public func gradient( - of f: @escaping @differentiable (T) -> Tensor -) -> (T) -> T.CotangentVector - where T : Differentiable, R : TensorFlowFloatingPoint { - return { x in gradient(at: x, in: f) } -} - -@inlinable -public func gradient( - of f: @escaping @differentiable (T, U) -> Tensor -) -> (T, U) -> (T.CotangentVector, U.CotangentVector) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y in gradient(at: x, y, in: f) } -} - -@inlinable -public func gradient( - of f: @escaping @differentiable (T, U, V) -> Tensor -) -> (T, U, V) -> (T.CotangentVector, U.CotangentVector, V.CotangentVector) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y, z in gradient(at: x, y, z, in: f) } -} - -//===----------------------------------------------------------------------===// -// Elementwise binary -//===----------------------------------------------------------------------===// - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpAdd( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs + rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - return (v.unbroadcast(toShape: lhsShape), v.unbroadcast(toShape: rhsShape)) - }) - } - - @inlinable - static func _vjpSubtract( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs - rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - return (v.unbroadcast(toShape: lhsShape), - -v.unbroadcast(toShape: rhsShape)) - }) - } - - @inlinable - static func _vjpMultiply( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs * rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - ((rhs * v).unbroadcast(toShape: lhsShape), - (lhs * v).unbroadcast(toShape: rhsShape)) - }) - } - - @inlinable - static func _vjpDivide( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs / rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - ((v / rhs).unbroadcast(toShape: lhsShape), - ((-lhs) / rhs.squared() * v).unbroadcast(toShape: rhsShape)) - }) - } -} - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpAdd( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs + rhs, { v in (v, v.sum().scalarized()) }) - } - - @inlinable - static func _vjpAdd( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs + rhs, { v in (v.sum().scalarized(), v) }) - } - - @inlinable - static func _vjpSubtract( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs - rhs, { v in (v, 0 - v.sum().scalarized()) }) - } - - @inlinable - static func _vjpSubtract( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs - rhs, { v in (v.sum().scalarized(), 0 - v) }) - } - - @inlinable - static func _vjpMultiply( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs * rhs, { v in (v * rhs, (v * lhs).sum().scalarized()) }) - } - - @inlinable - static func _vjpMultiply( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs * rhs, { v in ((v * rhs).sum().scalarized(), v * lhs) }) - } - - @inlinable - static func _vjpDivide( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs / rhs, { v in - (v / rhs, (v * (0 - lhs) / Tensor(rhs).squared()).sum().scalarized()) - }) - } - - @inlinable - static func _vjpDivide( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs / rhs, { v in - ((v / rhs).sum().scalarized(), v * -lhs / rhs.squared()) - }) - } -} - -@inlinable -func _vjpMinMaxHelper( - _ x: Tensor, _ y: Tensor, originalValue: Tensor, vector: Tensor -) -> (Tensor, Tensor) { - let denom = 1 + Tensor(x .== y) - let dfdx = vector * Tensor(x .== originalValue) / denom - let dfdy = vector * Tensor(y .== originalValue) / denom - return (dfdx.unbroadcast(like: x), dfdy.unbroadcast(like: y)) -} - -@inlinable -func _vjpMax( - _ x: Tensor, _ y: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = max(x, y) - return (value, - { v in _vjpMinMaxHelper(x, y, originalValue: value, vector: v) }) -} - -@inlinable -func _vjpMin( - _ x: Tensor, _ y: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = min(x, y) - return (value, - { v in _vjpMinMaxHelper(x, y, originalValue: value, vector: v) }) -} - -@inlinable -func _vjpPow( - _ x: Tensor, _ y: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = pow(x, y) - return (value, { v in - ((v * y * pow(x, y-1)).unbroadcast(like: x), - (v * log(x) * value).unbroadcast(like: y)) - }) -} - -//===----------------------------------------------------------------------===// -// Elementwise unary -//===----------------------------------------------------------------------===// - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpNegate(_ x: Tensor) -> (Tensor, (Tensor) -> Tensor) { - return (-x, { v in -v }) - } -} - -@inlinable -func _vjpAbs( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let sign = Raw.sign(x) - return (abs(x), { v in v * sign }) -} - -@inlinable -func _vjpLog( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (log(x), { v in v / x }) -} - -@inlinable -func _vjpSin( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (sin(x), { v in v * cos(x) }) -} - -@inlinable -func _vjpCos( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (cos(x), { v in -v * sin(x) }) -} - -@inlinable -func _vjpTan( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = tan(x) - return (value, { v in v * (1 + value.squared()) }) -} - -@inlinable -func _vjpSinh( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (sinh(x), { v in v * cosh(x) }) -} - -@inlinable -func _vjpCosh( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (cosh(x), { v in v * sinh(x) }) -} - -@inlinable -func _vjpTanh( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = tanh(x) - return (value, { v in v * (1 - value.squared()) }) -} - -@inlinable -func _vjpExp( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = exp(x) - return (value, { v in value * v }) -} - -@inlinable -func _vjpCeil( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (ceil(x), { _ in Tensor(0).broadcast(like: x) }) -} - -@inlinable -func _vjpFloor( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (floor(x), { _ in Tensor(0).broadcast(like: x) }) -} - -@inlinable -func _vjpSqrt( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = sqrt(x) - return (value, { v in v / (2 * value) }) -} - -@inlinable -func _vjpRsqrt( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = rsqrt(x) - return (value, { v in -v / 2 * value }) -} - -@inlinable -func _vjpLogSoftmax( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = logSoftmax(x) - return (value, { v in - v - v.sum(alongAxes: -1) * exp(value) - }) -} - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - func _vjpSquared() -> (Tensor, (Tensor) -> Tensor) { - return (squared(), { 2 * self * $0 }) - } -} - -//===----------------------------------------------------------------------===// -// Linear algebra -//===----------------------------------------------------------------------===// - -@inlinable -func _vjpMatmul( - _ lhs: Tensor, _ rhs: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = matmul(lhs, rhs) - return (value, { v in - return (matmul(v, rhs.transposed()), matmul(lhs.transposed(), v)) - }) -} - -// TODO: We have to define a custom VJP on • because AD can't yet -// differentiate generic methods. After AD can differentiate generic methods, -// remove the custom VJP. -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpMatmulOperator( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return _vjpMatmul(lhs, rhs) - } - - @inlinable - func _vjpTransposed( - withPermutations permutations: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = transposed(withPermutations: permutations) - return (value, { $0.transposed(withPermutations: permutations) }) - } - - @inlinable - func _vjpTransposed( - withPermutations permutations: [Int] - ) -> (Tensor, (Tensor) -> Tensor) { - let value = transposed(withPermutations: permutations) - return (value, { $0.transposed(withPermutations: permutations) }) - } - - @inlinable - func _vjpTransposed( - withPermutations permutations: Int... - ) -> (Tensor, (Tensor) -> Tensor) { - let value = transposed(withPermutations: permutations) - return (value, { $0.transposed(withPermutations: permutations) }) - } - - @inlinable - func _vjpTransposed() -> (Tensor, (Tensor) -> Tensor) { - return (transposed(), { $0.transposed() }) - } -} - -//===----------------------------------------------------------------------===// -// Shape transformations -//===----------------------------------------------------------------------===// - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - func _vjpReshaped( - toShape newShape: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = reshaped(toShape: newShape) - return (value, { [shape = shapeTensor] v in - v.reshaped(toShape: shape) - }) - } - - @inlinable - func _vjpSqueezingShape(at axes: [Int]) -> (Tensor, (Tensor) -> Tensor) { - let value = squeezingShape(at: axes) - return (value, { [shape = shapeTensor] v in - v.reshaped(toShape: shape) - }) - } - - @inlinable - func _vjpExpandingShape(at axes: [Int]) -> (Tensor, (Tensor) -> Tensor) { - let value = self.expandingShape(at: axes) - return (value, { v in - v.squeezingShape(at: axes) - }) - } -} - -//===----------------------------------------------------------------------===// -// Reduction -//===----------------------------------------------------------------------===// - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - func _vjpSum(alongAxes axes: Tensor) -> (Tensor, (Tensor) -> Tensor) { - let value = sum(alongAxes: axes) - return (value, { [shape = shapeTensor] in $0.broadcast(toShape: shape) }) - } - - @inlinable - func _vjpSum( - squeezingAxes axes: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = sum(squeezingAxes: axes) - return (value, { [shape = shapeTensor] v in - let unsqueezed = v.expandingShape(at: axes.scalars.map { Int($0) }) - return unsqueezed.broadcast(toShape: shape) - }) - } - - @inlinable - func _vjpMean(alongAxes axes: Tensor) -> (Tensor, (Tensor) -> Tensor) { - let value = mean(alongAxes: axes) - let count = Raw.gather(params: shapeTensor, indices: axes).product() - return (value, { [shape = shapeTensor] in - $0.broadcast(toShape: shape) / Tensor(count) - }) - } - - @inlinable - func _vjpMean( - squeezingAxes axes: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = mean(squeezingAxes: axes) - let count = Raw.gather(params: shapeTensor, indices: axes).product() - return (value, { [shape = shapeTensor] v in - let unsqueezed = v.expandingShape(at: axes.scalars.map { Int($0) }) - return unsqueezed.broadcast(toShape: shape) / Tensor(count) - }) - } -} - -//===----------------------------------------------------------------------===// -// Composite math -//===----------------------------------------------------------------------===// - -@inlinable -func _vjpSigmoid( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = sigmoid(x) - return (value, { v in Raw.sigmoidGrad(value, dy: v) }) -} - -@inlinable -func _vjpSoftmax( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = softmax(x) - return (value, { v in - let sumChannels = (v * value).sum(alongAxes: -1) - return (v - sumChannels) * value - }) -} - -@inlinable -func _vjpRelu( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (relu(x), { v in Tensor(x .> 0) * v }) -} diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift deleted file mode 100644 index b3e7be2a665f6..0000000000000 --- a/stdlib/public/TensorFlow/Ops.swift +++ /dev/null @@ -1,1995 +0,0 @@ -//===-- Ops.swift ------------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains definitions of most tensor operations. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Ops and Convenience Methods -//===----------------------------------------------------------------------===// -// -// The majority of the Tensor API is implemented in terms of 'ops' that are -// partitioned out to the TensorFlow graph when the compiler runs. These -// ops are intentially designed to reflect TensorFlow ops, but provide nicer -// Swift syntax for accessing them. In addition to the core ops themselves, -// we also define some helper function wrappers, e.g. to make things symmetric -// and generally feel nice to use. -// -// The ops themselves are defined by the primitive #tfop(...) syntax, here are -// some examples: -// result = #tfop("Add", lhs, rhs) -// result = #tfop("Const", dtype: Float.self, value$tensor: 4.0) -// -// The first parameter to this syntax is the TensorFlow op name as a string. -// After that, the inputs are specified, and then attributes are specified -// with their name as the keyword argument. -// -// Inputs and outputs must be of TensorHandle, ResourceHandle, or VariantHandle -// type. These are magic types known to the compiler. -// - -infix operator ++ : AdditionPrecedence - -infix operator .< : ComparisonPrecedence -infix operator .<= : ComparisonPrecedence -infix operator .>= : ComparisonPrecedence -infix operator .> : ComparisonPrecedence -infix operator .== : ComparisonPrecedence -infix operator .!= : ComparisonPrecedence -infix operator .= - -// TODO: -// - Consider explicit broadcasting for elementwise binary ops when -// scalarization and rank getter are implemented. - -//===----------------------------------------------------------------------===// -// Scalar type cast -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Perform an element-wise type conversion from a `Bool` tensor. - @inlinable @inline(__always) - init(_ other: Tensor) { - self = Raw.cast(other) - } -} - -//===----------------------------------------------------------------------===// -// Additive group -//===----------------------------------------------------------------------===// - -extension Tensor : AdditiveArithmetic where Scalar : Numeric { - /// A scalar zero tensor. - @inlinable - public static var zero: Tensor { - @inline(__always) - get { - return Tensor(zeros: []) - } - } - - /// Adds two tensors and produces their sum. - /// - Note: `+` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpAdd(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - public static func + (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.add(lhs, rhs) - } - - /// Subtracts one tensor from another and produces their difference. - /// - Note: `-` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpSubtract(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - public static func - (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.sub(lhs, rhs) - } -} - -//===----------------------------------------------------------------------===// -// Vector space -//===----------------------------------------------------------------------===// - -extension Tensor : VectorNumeric where Scalar : Numeric { - /// Multiplies the scalar with every scalar of the tensor and produces the - /// product. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMultiply(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - public static func * (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) * rhs - } -} - -extension Tensor : ShapedVectorNumeric where Scalar : Numeric {} - -extension Tensor : Differentiable where Scalar : TensorFlowFloatingPoint { - public typealias TangentVector = Tensor - public typealias CotangentVector = Tensor - public typealias AllDifferentiableVariables = Tensor - @inlinable @inline(__always) - public func tangentVector(from cotangent: CotangentVector) -> TangentVector { - return cotangent - } -} - -//===----------------------------------------------------------------------===// -// Additional element-wise operators -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Adds the scalar to every scalar of the tensor and produces the sum. - @inlinable @inline(__always) - @differentiable(vjp: _vjpAdd(lhs:rhs:) where Scalar : TensorFlowFloatingPoint) - static func + (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) + rhs - } - - /// Adds the scalar to every scalar of the tensor and produces the sum. - @inlinable @inline(__always) - @differentiable(vjp: _vjpAdd(lhs:rhs:) where Scalar : TensorFlowFloatingPoint) - static func + (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs + Tensor(rhs) - } - - /// Subtracts the scalar from every scalar of the tensor and produces the - /// difference. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpSubtract(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func - (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) - rhs - } - - /// Subtracts the scalar from every scalar of the tensor and produces the - /// difference. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpSubtract(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func - (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs - Tensor(rhs) - } - - /// Adds two tensors and stores the result in the left-hand-side variable. - /// - Note: `+=` supports broadcasting. - @inlinable @inline(__always) - static func += (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs + rhs - } - - /// Adds the scalar to every scalar of the tensor and stores the result in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func += (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs + rhs - } - - /// Subtracts the second tensor from the first and stores the result in the - /// left-hand-side variable. - /// - Note: `-=` supports broadcasting. - @inlinable @inline(__always) - static func -= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs - rhs - } - - /// Subtracts the scalar from every scalar of the tensor and stores the result - /// in the left-hand-side variable. - @inlinable @inline(__always) - static func -= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs - rhs - } - - /// Multiplies two tensors and produces their product. - /// - Note: `*` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMultiply(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func * (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.mul(lhs, rhs) - } - - /// Multiplies the scalar with every scalar of the tensor and produces the - /// product. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMultiply(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func * (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs * Tensor(rhs) - } - - /// Multiplies two tensors and stores the result in the left-hand-side - /// variable. - /// - Note: `*=` supports broadcasting. - @inlinable @inline(__always) - static func *= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs * rhs - } - - @inlinable @inline(__always) - static func *= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs * rhs - } - - /// Returns the quotient of dividing the first tensor by the second. - /// - Note: `/` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpDivide(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func / (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.div(lhs, rhs) - } - - /// Returns the quotient of dividing the scalar by the tensor, broadcasting - /// the scalar. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpDivide(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func / (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) / rhs - } - - /// Returns the quotient of dividing the tensor by the scalar, broadcasting - /// the scalar. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpDivide(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func / (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs / Tensor(rhs) - } - - /// Divides the first tensor by the second and stores the quotient in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func /= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs / rhs - } - - /// Divides the tensor by the scalar, broadcasting the scalar, and stores the - /// quotient in the left-hand-side variable. - @inlinable @inline(__always) - static func /= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs / rhs - } - - /// Returns the remainder of dividing the first tensor by the second. - /// - Note: `%` supports broadcasting. - @inlinable @inline(__always) - static func % (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.mod(lhs, rhs) - } - - /// Returns the remainder of dividing the tensor by the scalar, broadcasting - /// the scalar. - @inlinable @inline(__always) - static func % (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs % Tensor(rhs) - } - - /// Returns the remainder of dividing the scalar by the tensor, broadcasting - /// the scalar. - @inlinable @inline(__always) - static func % (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) % rhs - } - - /// Divides the first tensor by the second and stores the remainder in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func %= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs % rhs - } - - /// Divides the tensor by the scalar and stores the remainder in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func %= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs % rhs - } -} - -//===----------------------------------------------------------------------===// -// Linear algebra -//===----------------------------------------------------------------------===// - -/// Performs matrix multiplication with another tensor and produces the -/// result. -@inlinable @inline(__always) -@differentiable( - vjp: _vjpMatmul(_:_:) - where Scalar : TensorFlowFloatingPoint -) -public func matmul( - _ lhs: Tensor, _ rhs: Tensor -) -> Tensor { - // Default arguments specified explicitly to avoid "external declarations of - // SILFunctions with shared visibility is not allowed" SILVerifier error in - // "tests/AutoDiff/tensor_autodiff_runtime.swift". - return Raw.matMul(lhs, rhs, transposeA: false, transposeB: false) -} - -infix operator • : MultiplicationPrecedence - -public extension Tensor where Scalar : Numeric { - // TODO: We have to define a custom VJP on • because AD can't yet - // differentiate generic methods. After AD can differentiate generic methods, - // remove the custom VJP. - - /// Performs matrix multiplication between two tensors and produces the - /// result. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMatmulOperator(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func • (lhs: Tensor, rhs: Tensor) -> Tensor { - return matmul(lhs, rhs) - } -} - -//===----------------------------------------------------------------------===// -// Element-wise binary comparison -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric & Comparable { - /// Computes `lhs < rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .< (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.less(lhs, rhs) - } - - /// Computes `lhs <= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .<= (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.lessEqual(lhs, rhs) - } - - /// Computes `lhs > rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .> (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.greater(lhs, rhs) - } - - /// Computes `lhs >= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .>= (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.greaterEqual(lhs, rhs) - } - - /// Computes `lhs < rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<` supports broadcasting. - @inlinable @inline(__always) - static func .< (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.less(Tensor(lhs), rhs) - } - - /// Computes `lhs <= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<=` supports broadcasting. - @inlinable @inline(__always) - static func .<= (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.lessEqual(Tensor(lhs), rhs) - } - - /// Computes `lhs > rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>` supports broadcasting. - @inlinable @inline(__always) - static func .> (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.greater(Tensor(lhs), rhs) - } - - /// Computes `lhs >= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>=` supports broadcasting. - @inlinable @inline(__always) - static func .>= (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.greaterEqual(Tensor(lhs), rhs) - } - - /// Computes `lhs < rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<` supports broadcasting. - @inlinable @inline(__always) - static func .< (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.less(lhs, Tensor(rhs)) - } - - /// Computes `lhs <= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<=` supports broadcasting. - @inlinable @inline(__always) - static func .<= (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.lessEqual(lhs, Tensor(rhs)) - } - - /// Computes `lhs > rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>` supports broadcasting. - @inlinable @inline(__always) - static func .> (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.greater(lhs, Tensor(rhs)) - } - - /// Computes `lhs >= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>=` supports broadcasting. - @inlinable @inline(__always) - static func .>= (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.greaterEqual(lhs, Tensor(rhs)) - } -} - -extension Tensor : Comparable where Scalar : Numeric & Comparable { - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than that of the second argument. - @inlinable @inline(__always) - public static func < (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .< rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than or equal to that of the second argument. - @inlinable @inline(__always) - public static func <= (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .<= rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than that of the second argument. - @inlinable @inline(__always) - public static func > (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .> rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than or equal to that of the second argument. - @inlinable @inline(__always) - public static func >= (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .>= rhs).all() - } -} - -public extension Tensor where Scalar : Numeric & Comparable { - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than that of the second argument. - @inlinable @inline(__always) - static func < (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .< rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than or equal to that of the second argument. - @inlinable @inline(__always) - static func <= (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .<= rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than that of the second argument. - @inlinable @inline(__always) - static func > (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .> rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than or equal to that of the second argument. - @inlinable @inline(__always) - static func >= (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .>= rhs).all() - } -} - -public extension Tensor where Scalar : Equatable { - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.==` supports broadcasting. - @inlinable @inline(__always) - static func .==(lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.equal(lhs, rhs) - } - - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.!=` supports broadcasting. - @inlinable @inline(__always) - static func .!=(lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.notEqual(lhs, rhs) - } - - /// Computes `lhs == rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.==` supports broadcasting. - @inlinable @inline(__always) - static func .==(lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) .== rhs - } - - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.!=` supports broadcasting. - @inlinable @inline(__always) - static func .!=(lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) .!= rhs - } - - /// Computes `lhs == rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.==` supports broadcasting. - @inlinable @inline(__always) - static func .==(lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs .== Tensor(rhs) - } - - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.!=` supports broadcasting. - @inlinable @inline(__always) - static func .!=(lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs .!= Tensor(rhs) - } -} - -infix operator ≈ : ComparisonPrecedence - -public extension Tensor where Scalar : FloatingPoint & Equatable { - /// Returns a `Tensor` of Boolean values indicating whether the elements of - /// `self` are approximately equal to those of `other`. - @inlinable @inline(__always) - func elementsApproximatelyEqual(_ other: Tensor, - tolerance: Double = 0.00001) -> Tensor { - return Raw.approximateEqual(self, other, tolerance: tolerance) - } -} - -public extension Tensor where Scalar == Bool { - /// Computes `!self` element-wise. - @inlinable @inline(__always) - func elementsLogicalNot() -> Tensor { - return Raw.logicalNot(self) - } - - /// Computes `self && other` element-wise. - /// - Note: `&&` supports broadcasting. - @inlinable @inline(__always) - func elementsLogicalAnd(_ other: Tensor) -> Tensor { - return Raw.logicalAnd(self, other) - } - - /// Computes `self && other` element-wise, broadcasting `other`. - @inlinable @inline(__always) - func elementsLogicalAnd(_ other: Scalar) -> Tensor { - return elementsLogicalAnd(Tensor(other)) - } - - /// Computes `self || other` element-wise. - @inlinable @inline(__always) - func elementsLogicalOr(_ other: Tensor) -> Tensor { - return Raw.logicalOr(self, other) - } - - /// Computes `self || other` element-wise, broadcasting `other`. - @inlinable @inline(__always) - func elementsLogicalOr(_ other: Scalar) -> Tensor { - return elementsLogicalOr(Tensor(other)) - } -} - -//===----------------------------------------------------------------------===// -// Transforms -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// Returns a transposed tensor, with dimensions permuted in the specified - /// order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed(withPermutations:) - where Scalar : TensorFlowFloatingPoint - ) - func transposed( - withPermutations permutations: Tensor - ) -> Tensor { - return Raw.transpose(self, perm: permutations) - } - - /// Returns a transposed tensor, with dimensions permuted in the specified - /// order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed(withPermutations:) - where Scalar : TensorFlowFloatingPoint - ) - func transposed(withPermutations permutations: [Int]) -> Tensor { - let permutations = permutations.map(Int32.init) - return transposed(withPermutations: Tensor(permutations)) - } - - /// Returns a transposed tensor, with dimensions permuted in the specified - /// order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed(withPermutations:) - where Scalar : TensorFlowFloatingPoint - ) - func transposed(withPermutations permutations: Int...) -> Tensor { - return transposed(withPermutations: permutations) - } - - /// Returns a transposed tensor, with dimensions permuted in reverse order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed() - where Scalar : TensorFlowFloatingPoint - ) - func transposed() -> Tensor { - let defaultPermutations = rankTensor - 1 - Tensor( - rangeFrom: 0, to: Int32(rank), stride: 1 - ) - return transposed(withPermutations: Tensor(defaultPermutations)) - } -} - -public extension Tensor { - /// Returns a concatenated tensor of the given tensors. - /// - Precondition: The tensors must have the same dimensions, except for the - /// specified axis. - /// - Precondition: The axis must be in the range `-rank..], alongAxis axis: Int = 0) { - self = Raw.concatV2(tensors, axis: Tensor(Int32(axis))) - } - - /// Concatenates tensors along the specified axis. - /// - Precondition: The tensors must have the same dimensions, except for the - /// specified axis. - /// - Precondition: The axis must be in the range `-rank.. Tensor { - return Raw.concatV2([self, other], axis: Tensor(Int32(axis))) - } - - /// Concatenation operator. - /// - Note: `++` is a custom operator that does not exist in Swift, but does - /// in Haskell/Scala. Its addition is not an insignificant language change - /// and may be controversial. The existence/naming of `++` will be discussed - /// during a later API design phase. - @inlinable @inline(__always) - @differentiable(where Scalar : TensorFlowFloatingPoint) - static func ++ (lhs: Tensor, rhs: Tensor) -> Tensor { - return lhs.concatenated(with: rhs) - } -} - -internal extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable @inline(__always) - func _vjpConcatenated(with other: Tensor, alongAxis axis: Int) - -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let idx = axis < 0 ? axis + rank : axis - let splits = Tensor([shapeTensor[idx], other.shapeTensor[idx]]) - return (concatenated(with: other, alongAxis: axis), { result in - let ret: (TensorHandle, TensorHandle) = #tfop("SplitV", - result, - splits, - Tensor(Int32(axis)), - num_split: Int64(2), - T$dtype: Scalar.tensorFlowDataType, - Tlen$dtype: Int32.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) - }) - } -} - -//===----------------------------------------------------------------------===// -// Element-wise math functions -//===----------------------------------------------------------------------===// - -// Export Glibc/Darwin math functions. We should not require users to import -// Foundation/Darwin/Glibc in order to use scalar math functions. -// -#if os(macOS) || os(iOS) || os(watchOS) || os(tvOS) -@_exported import Darwin.C -#else -@_exported import Glibc -#endif -// -// FIXME(rxwei): Scoped imports are not yet supported in parseable module -// interfaces, so `@_exported import` won't work. When that becomes supported, -// switch to `@_exported import` by removing `import Darwin.C/Glibc` above and -// uncommenting the following lines. In the meantime, consider using indirect -// wrappers for each function so that random libc symbols won't be leaked to -// users' code completion. -// -// #if os(macOS) || os(iOS) || os(watchOS) || os(tvOS) -// @_exported import func Darwin.C.sin -// @_exported import func Darwin.C.cos -// @_exported import func Darwin.C.tan -// @_exported import func Darwin.C.sinf -// @_exported import func Darwin.C.cosf -// @_exported import func Darwin.C.tanf -// @_exported import func Darwin.C.sinh -// @_exported import func Darwin.C.cosh -// @_exported import func Darwin.C.tanh -// @_exported import func Darwin.C.sinhf -// @_exported import func Darwin.C.coshf -// @_exported import func Darwin.C.tanhf -// @_exported import func Darwin.C.log -// @_exported import func Darwin.C.logf -// @_exported import func Darwin.C.exp -// @_exported import func Darwin.C.expf -// @_exported import func Darwin.C.pow -// @_exported import func Darwin.C.powf -// #else -// @_exported import func Glibc.sin -// @_exported import func Glibc.cos -// @_exported import func Glibc.tan -// @_exported import func Glibc.sinf -// @_exported import func Glibc.cosf -// @_exported import func Glibc.tanf -// @_exported import func Glibc.sinh -// @_exported import func Glibc.cosh -// @_exported import func Glibc.tanh -// @_exported import func Glibc.sinhf -// @_exported import func Glibc.coshf -// @_exported import func Glibc.tanhf -// @_exported import func Glibc.log -// @_exported import func Glibc.logf -// @_exported import func Glibc.exp -// @_exported import func Glibc.expf -// @_exported import func Glibc.pow -// @_exported import func Glibc.powf -// #endif - -public extension Tensor where Scalar : SignedNumeric { - /// Computes the negation of the specified tensor element-wise. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpNegate(_:) - where Scalar : TensorFlowFloatingPoint - ) - static prefix func - (rhs: Tensor) -> Tensor { - return Raw.neg(rhs) - } -} - -/// Computes the absolute value of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpAbs(_:) where T : TensorFlowFloatingPoint) -public func abs(_ x: Tensor) -> Tensor { - return Raw.abs(x) -} - -/// Computes the natural logarithm of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpLog(_:) where T : TensorFlowFloatingPoint) -public func log(_ x: Tensor) -> Tensor { - return Raw.log(x) -} - -/// Computes `sin` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSin(_:) where T : TensorFlowFloatingPoint) -public func sin(_ x: Tensor) -> Tensor { - return Raw.sin(x) -} - -/// Computes `cos` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpCos(_:) where T : TensorFlowFloatingPoint) -public func cos(_ x: Tensor) -> Tensor { - return Raw.cos(x) -} - -/// Computes `tan` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpTan(_:) where T : TensorFlowFloatingPoint) -public func tan(_ x: Tensor) -> Tensor { - return Raw.tan(x) -} - -/// Computes `sinh` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSinh(_:) where T : TensorFlowFloatingPoint) -public func sinh(_ x: Tensor) -> Tensor { - return Raw.sinh(x) -} - -/// Computes `cosh` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpCosh(_:) where T : TensorFlowFloatingPoint) -public func cosh(_ x: Tensor) -> Tensor { - return Raw.cosh(x) -} - -/// Computes `tanh` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpTanh(_:) where T : TensorFlowFloatingPoint) -public func tanh(_ x: Tensor) -> Tensor { - return Raw.tanh(x) -} - -/// Computes the square root of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSqrt(_:) where T : TensorFlowFloatingPoint) -public func sqrt(_ x: Tensor) -> Tensor { - return Raw.sqrt(x) -} - -/// Computes the inverse square root of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpRsqrt(_:) where T : TensorFlowFloatingPoint) -public func rsqrt(_ x: Tensor) -> Tensor { - return Raw.rsqrt(x) -} - -/// Computes `exp` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpExp(_:) where T : TensorFlowFloatingPoint) -public func exp(_ x: Tensor) -> Tensor { - return Raw.exp(x) -} - -/// Computes the ceiling of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpCeil(_:) where T : TensorFlowFloatingPoint) -public func ceil(_ x: Tensor) -> Tensor { - return Raw.ceil(x) -} - -/// Computes the floor of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpFloor(_:) where T : TensorFlowFloatingPoint) -public func floor(_ x: Tensor) -> Tensor { - return Raw.floor(x) -} - -/// Computes the power of the first tensor to the second tensor. -@inlinable @inline(__always) -@differentiable(vjp: _vjpPow(_:_:) where T : TensorFlowFloatingPoint) -public func pow(_ lhs: Tensor, _ rhs: Tensor) -> Tensor - where T : FloatingPoint { - return Raw.pow(lhs, rhs) -} - -/// Computes the power of the scalar to the tensor, broadcasting the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func pow(_ lhs: T, _ rhs: Tensor) -> Tensor - where T : FloatingPoint { - return pow(Tensor(lhs), rhs) -} - -/// Computes the power of the tensor to the scalar, broadcasting the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func pow(_ lhs: Tensor, _ rhs: T) -> Tensor - where T : FloatingPoint { - return pow(lhs, Tensor(rhs)) -} - -/// Computes the element-wise maximum of two tensors. -/// - Note: `max` supports broadcasting. -@inlinable @inline(__always) -@differentiable(vjp: _vjpMax(_:_:) where T : TensorFlowFloatingPoint) -public func max(_ lhs: Tensor, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return Raw.maximum(lhs, rhs) -} - -/// Computes the element-wise maximum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -//@differentiable(where T : TensorFlowFloatingPoint) -public func max(_ lhs: T, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return max(Tensor(lhs), rhs) -} - -/// Computes the element-wise maximum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func max(_ lhs: Tensor, _ rhs: T) -> Tensor - where T : Numeric & Comparable { - return max(lhs, Tensor(rhs)) -} - -/// Computes the element-wise minimum of two tensors. -/// - Note: `min` supports broadcasting. -@inlinable @inline(__always) -@differentiable(vjp: _vjpMin(_:_:) where T : TensorFlowFloatingPoint) -public func min(_ lhs: Tensor, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return Raw.minimum(lhs, rhs) -} - -/// Computes the element-wise minimum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func min(_ lhs: T, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return min(Tensor(lhs), rhs) -} - -/// Computes the element-wise minimum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func min(_ lhs: Tensor, _ rhs: T) -> Tensor - where T : Numeric & Comparable { - return min(lhs, Tensor(rhs)) -} - -/// Computes the square of the tensor. -public extension Tensor where Scalar : Numeric { - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpSquared() - where Scalar : TensorFlowFloatingPoint - ) - func squared() -> Tensor { - return Raw.square(self) - } -} - -/// Computes the log-softmax of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpLogSoftmax(_:) where T : TensorFlowFloatingPoint) -public func logSoftmax(_ x: Tensor) -> Tensor { - return Raw.logSoftmax(logits: x) -} - -//===----------------------------------------------------------------------===// -// Selection -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// Replaces elements of this tensor with `other` in the lanes where `mask` is - /// `true`. - /// - /// - Precondition: `self` and `other` must have the same shape. If - /// `self` and `other` are scalar, then `mask` must also be scalar. If - /// `self` and `other` have rank greater than or equal to `1`, then `mask` - /// must be either have the same shape as `self` or be a 1-D `Tensor` such - /// that `mask.scalarCount == self.shape[0]`. - @inlinable - @differentiable(wrt: (self, other), - vjp: _vjpReplacing where Scalar : TensorFlowFloatingPoint) - func replacing(with other: Tensor, - where mask: Tensor) -> Tensor { - return Raw.select(condition: mask, t: self, e: other) - } -} - -public extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - internal func _vjpReplacing(with other: Tensor, where mask: Tensor) - -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (replacing(with: other, where: mask), { v in - let zeros = Tensor(zeros: v.shape) - return (v.replacing(with: zeros, where: mask), - zeros.replacing(with: v, where: mask)) - }) - } -} - -//===----------------------------------------------------------------------===// -// Reduction -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar == Bool { - /// Returns `true` if all scalars are equal to `true`. Otherwise, returns - /// `false`. - // NOTE: This overload is necessary, otherwise `all()` would refer - // to the variadic method `all(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func all() -> Bool { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return _TFGetScalarOrDie(Raw.all(self, reductionIndices: axes).handle) - } - - /// Returns `true` if any scalars are equal to `true`. Otherwise, returns - /// `false`. - // NOTE: This overload is necessary, otherwise `any()` would refer - // to the variadic method `any(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func any() -> Bool { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return _TFGetScalarOrDie(Raw.any(self, reductionIndices: axes).handle) - } - - /// Performs a logical AND operation along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.all(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Performs a logical AND operation along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.any(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Performs a logical AND operation along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.all(self, reductionIndices: Tensor(axes), keepDims: true) - } - - /// Performs a logical OR operation along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.any(self, reductionIndices: Tensor(axes), keepDims: true) - } -} - -public extension Tensor where Scalar : Numeric & Comparable { - // NOTE: This overload is necessary, otherwise `min()` would refer - // to the variadic method `min(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func min() -> Tensor { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return Raw.min(self, reductionIndices: axes) - } - - // NOTE: This overload is necessary, otherwise `max()` would refer - // to the variadic method `max(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func max() -> Tensor { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return Raw.max(self, reductionIndices: axes) - } - - /// Returns the maximum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.max(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Returns the maximum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return max(squeezingAxes: axes) - } - - /// Returns the minimum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.min(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Returns the minimum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return min(squeezingAxes: axes) - } - - /// Returns the indices of the maximum values along the specified axes. The - /// reduced dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return Raw.argMax(self, dimension: Tensor(Int32(axis))) - } - - /// Returns the indices of the minimum values along the specified axes. The - /// reduced dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return Raw.argMin(self, dimension: Tensor(Int32(axis))) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.min(self, reductionIndices: Tensor(axes), keepDims: true) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return min(alongAxes: axes) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.max(self, reductionIndices: Tensor(axes), keepDims: true) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return max(alongAxes: axes) - } - - /// Returns the index of the maximum value of the flattened scalars. - @inlinable @inline(__always) - func argmax() -> Tensor { - return flattened().argmax(squeezingAxis: 0) - } - - /// Returns the index of the minimum value of the flattened scalars. - @inlinable @inline(__always) - func argmin() -> Tensor { - return flattened().argmin(squeezingAxis: 0) - } -} - -// MARK: - Numeric reduction - -public extension Tensor where Scalar : Numeric { - // MARK: - Sum - - /// Returns the sum along the specified axes. The reduced dimensions are - /// removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpSum(squeezingAxes:) - where Scalar : TensorFlowFloatingPoint - ) - func sum(squeezingAxes axes: Tensor) -> Tensor { - return Raw.sum(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum(squeezingAxes axes: [Int]) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return sum(squeezingAxes: Tensor(axes)) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum(squeezingAxes axes: Int...) -> Tensor { - return sum(squeezingAxes: axes) - } - - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum() -> Tensor { - return flattened().sum(squeezingAxes: 0) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return Raw.sum(self, reductionIndices: axes, keepDims: true) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return sum(alongAxes: Tensor(axes)) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return sum(alongAxes: axes) - } - - // MARK: - Product - - /// Returns the product along the specified axes. The reduced dimensions are - /// removed. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - // TODO: Make this @differentiable. - @inlinable @inline(__always) - func product(squeezingAxes axes: Tensor) -> Tensor { - return Raw.prod(self, reductionIndices: axes, keepDims: false) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// removed. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - func product(squeezingAxes axes: [Int]) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return product(squeezingAxes: Tensor(axes)) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// removed. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - func product(squeezingAxes axes: Int...) -> Tensor { - return product(squeezingAxes: axes) - } - - @inlinable @inline(__always) - func product() -> Tensor { - return flattened().product(squeezingAxes: 0) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return Raw.prod(self, reductionIndices: axes, keepDims: true) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return product(alongAxes: Tensor(axes)) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return product(alongAxes: axes) - } - - // MARK: - Mean - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpMean(squeezingAxes:) - where Scalar : TensorFlowFloatingPoint - ) - func mean(squeezingAxes axes: Tensor) -> Tensor { - return Raw.mean(self, reductionIndices: axes, keepDims: false) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean(squeezingAxes axes: [Int]) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return mean(squeezingAxes: Tensor(axes)) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean(squeezingAxes axes: Int...) -> Tensor { - return mean(squeezingAxes: axes) - } - - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean() -> Tensor { - return flattened().mean(squeezingAxes: [0]) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return Raw.mean(self, reductionIndices: axes, keepDims: true) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return mean(alongAxes: Tensor(axes)) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return mean(alongAxes: axes) - } - - // MARK: - Variance - - /// Returns the variance along the specified axes. The reduced dimensions are - /// removed. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - let squaredDiff = (self - mean(alongAxes: axes)).squared() - return squaredDiff.mean(squeezingAxes: axes) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// removed. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return variance(squeezingAxes: Tensor(axes)) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return variance(squeezingAxes: axes) - } - - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - @inlinable @inline(__always) - func variance() -> Tensor { - let mean = self.mean() - let squaredDiff = (self - mean).squared() - return squaredDiff.mean() - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - let squaredDiff = (self - mean(alongAxes: axes)).squared() - return squaredDiff.mean(alongAxes: axes) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return variance(alongAxes: Tensor(axes)) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return variance(alongAxes: axes) - } -} - -// TODO: Consider making the return type be generic over `FloatingPoint` types -// so that `self`'s scalar type can be any `Numeric` type. -public extension Tensor where Scalar : TensorFlowFloatingPoint { - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return sqrt(variance(squeezingAxes: axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return sqrt(variance(squeezingAxes: axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return standardDeviation(squeezingAxes: axes) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // Reduce along all dimensions. - return standardDeviation(squeezingAxes: Array(0..) -> Tensor { - return sqrt(variance(alongAxes: axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return standardDeviation(alongAxes: Tensor(axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return sqrt(variance(alongAxes: axes)) - } -} - -//===----------------------------------------------------------------------===// -// Tensor properties -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// The rank of the tensor, represented as a `Tensor`. - @inlinable - var rankTensor: Tensor { - @inline(__always) - get { - return Raw.rank(self) - } - } - - /// The dimensions of the tensor, represented as a `Tensor`. - @inlinable - var shapeTensor: Tensor { - @inline(__always) - get { - return Raw.shape(self) - } - } - - /// The number of scalars in the tensor, represented as a `Tensor`. - @inlinable - var scalarCountTensor: Tensor { - @inline(__always) - get { - return Raw.size(self) - } - } -} - -//===----------------------------------------------------------------------===// -// Broadcasting -//===----------------------------------------------------------------------===// - -public extension Tensor { - @inlinable @inline(__always) - func broadcast(toShape shape: Tensor) -> Tensor { - return Raw.broadcastTo(self, shape: shape) - } - - @inlinable @inline(__always) - func broadcast(to shape: TensorShape) -> Tensor { - return broadcast(toShape: Tensor(shape.dimensions.map(Int32.init))) - } - - /// Broadcast to the same shape as the specified `Tensor`. - /// - Precondition: The specified shape must be compatible for broadcasting. - @inlinable @inline(__always) - func broadcast(like other: Tensor) -> Tensor { - return broadcast(toShape: other.shapeTensor) - } -} - -public extension Tensor where Scalar : Numeric { - @inlinable - func unbroadcast(toShape otherShape: Tensor) -> Tensor { - let rankDiff = (rankTensor - otherShape.scalarCountTensor).rankLifted() - let ones: Tensor = Raw.fill(dims: rankDiff, value: Tensor(1)) - let paddedShape = ones ++ otherShape - let nonEqualIndices = paddedShape .!= shapeTensor - let broadcastIndices = Raw.where_(nonEqualIndices).flattened() - let unbroadcasted: Tensor = Raw.sum( - self, reductionIndices: Tensor(broadcastIndices), keepDims: false) - return Raw.reshape(unbroadcasted, shape: otherShape) - } - - @inlinable @inline(__always) - func unbroadcast(like other: Tensor) -> Tensor { - return unbroadcast(toShape: other.shapeTensor) - } - - @inlinable @inline(__always) - func unbroadcast(to shape: TensorShape) -> Tensor { - return unbroadcast(toShape: Tensor(shape.dimensions.map(Int32.init))) - } - - @inlinable @inline(__always) - static func .= (lhs: inout Tensor, rhs: Tensor) { - lhs = rhs.broadcast(like: lhs) - } -} - -//===----------------------------------------------------------------------===// -// Padding -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Returns a padded tensor according to the specified padding sizes. - @inlinable - func padded( - forSizes sizes: [(before: Int, after: Int)], - with value: Scalar = 0 - ) -> Tensor { - let paddings = Tensor( - shape: [sizes.count, 2], - scalars: sizes.flatMap { [Int32($0.before), Int32($0.after)] } - ) - return Raw.padV2(self, paddings: paddings, constantValues: Tensor(value)) - } -} - -//===----------------------------------------------------------------------===// -// Indexing and slicing -//===----------------------------------------------------------------------===// - -// TODO: Negative indexing and strides syntax. - -public extension Tensor { - /// Extracts a slice from the tensor defined by lower and upper bounds for - /// each dimension. - /// - /// - Parameter lowerBounds: The lower bounds at each dimension. - /// - Parameter upperBounds: The upper bounds at each dimension. - @inlinable - @differentiable(wrt: self) - func slice(lowerBounds: [Int], upperBounds: [Int]) -> Tensor { - // TODO: Precondition `lowerBounds.count == upperBounds.count`, - // preferably in graph. - // TODO: Differentiating control flow is not supported yet, thus the thunks. - let lowerBoundsTensor = Tensor({lowerBounds.map(Int32.init)}()) - let upperBoundsTensor = Tensor({upperBounds.map(Int32.init)}()) - return slice( - lowerBounds: lowerBoundsTensor, - sizes: upperBoundsTensor - lowerBoundsTensor) - } - - @inlinable - @differentiable(wrt: self, vjp: _vjpSlice) - func slice(lowerBounds: Tensor, sizes: Tensor) -> Tensor { - return Raw.slice(self, begin: lowerBounds, size: sizes) - } - - @inlinable - internal func _vjpSlice( - lowerBounds: Tensor, - sizes: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = slice(lowerBounds: lowerBounds, sizes: sizes) - let afterPaddings = shapeTensor - value.shapeTensor - lowerBounds - return (value, { [after = afterPaddings] v in - let beforePaddings = lowerBounds.expandingShape(at: 1) - let afterPaddings = after.expandingShape(at: 1) - let paddings = Tensor( - concatenating: [beforePaddings, afterPaddings], alongAxis: 1) - return Raw.pad(v, paddings: paddings) - }) - } -} - -public enum TensorRange : TensorRangeExpression { - case ellipsis - case newAxis - case squeezeAxis - case index(Int) - case range(Range, stride: Int) - case closedRange(ClosedRange, stride: Int) - case partialRangeFrom(PartialRangeFrom, stride: Int) - case partialRangeUpTo(PartialRangeUpTo, stride: Int) - case partialRangeThrough(PartialRangeThrough, stride: Int) - - public var tensorRange: TensorRange { return self } -} - -extension TensorRange : Equatable { - public static func == (lhs: TensorRange, rhs: TensorRange) -> Bool { - switch (lhs, rhs) { - case (.ellipsis, .ellipsis), - (.newAxis, .newAxis), - (.squeezeAxis, .squeezeAxis): - return true - case (let .index(i1), let .index(i2)): return i1 == i2 - case (let .range(r1, s1), let .range(r2, s2)): return r1 == r2 && s1 == s2 - case (let .closedRange(r1, s1), let .closedRange(r2, s2)): - return r1 == r2 && s1 == s2 - case (let .partialRangeFrom(r1, s1), let .partialRangeFrom(r2, s2)): - return r1.lowerBound == r2.lowerBound && s1 == s2 - case (let .partialRangeUpTo(r1, s1), let .partialRangeUpTo(r2, s2)): - return r1.upperBound == r2.upperBound && s1 == s2 - case (let .partialRangeThrough(r1, s1), let .partialRangeThrough(r2, s2)): - return r1.upperBound == r2.upperBound && s1 == s2 - default: return false - } - } -} - -public protocol TensorRangeExpression { - var tensorRange: TensorRange { get } -} - -// TODO: Cannot extend non-nominal type 'UnboundedRange'. -// extension UnboundedRange : TensorRangeExpression { -// public var tensorRange: TensorRange { return .ellipsis } -// } - -extension Int : TensorRangeExpression { - public var tensorRange: TensorRange { return .index(self) } -} - -extension Range : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .range(self, stride: 1) - } -} - -extension ClosedRange : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .closedRange(self, stride: 1) - } -} - -extension PartialRangeFrom : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .partialRangeFrom(self, stride: 1) - } -} - -extension PartialRangeUpTo : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .partialRangeUpTo(self, stride: 1) - } -} - -extension PartialRangeThrough : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .partialRangeThrough(self, stride: 1) - } -} - -infix operator .. : StridedRangeFormationPrecedence -precedencegroup StridedRangeFormationPrecedence { - associativity: left - higherThan: CastingPrecedence - lowerThan: RangeFormationPrecedence -} - -public extension Range where Bound == Int { - static func .. (range: Range, stride: Int) -> TensorRange { - return .range(range, stride: stride) - } -} - -public extension ClosedRange where Bound == Int { - static func .. (range: ClosedRange, stride: Int) -> TensorRange { - return .closedRange(range, stride: stride) - } -} - -public extension PartialRangeFrom where Bound == Int { - static func .. (range: PartialRangeFrom, stride: Int) -> TensorRange { - return .partialRangeFrom(range, stride: stride) - } -} - -public extension PartialRangeUpTo where Bound == Int { - static func .. (range: PartialRangeUpTo, stride: Int) -> TensorRange { - return .partialRangeUpTo(range, stride: stride) - } -} - -public extension PartialRangeThrough where Bound == Int { - static func .. (range: PartialRangeThrough, stride: Int) -> TensorRange { - return .partialRangeThrough(range, stride: stride) - } -} - -public extension Tensor { - @_fixed_layout @usableFromInline - internal struct IndexPath { - @usableFromInline - let begin, end, strides: Tensor - - @usableFromInline - let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 - - @inlinable - public init( - begin: Tensor, end: Tensor, strides: Tensor, - beginMask: Int64, endMask: Int64, ellipsisMask: Int64, newAxisMask: Int64, - squeezeAxisMask: Int64 - ) { - self.begin = begin - self.end = end - self.strides = strides - self.beginMask = beginMask - self.endMask = endMask - self.ellipsisMask = ellipsisMask - self.newAxisMask = newAxisMask - self.squeezeAxisMask = squeezeAxisMask - } - } - - @inlinable - @differentiable(wrt: self, vjp: _vjpSubscript) - internal subscript(_ indexPath: IndexPath) -> Tensor { - get { - return Raw.stridedSlice( - self, begin: indexPath.begin, end: indexPath.end, - strides: indexPath.strides, beginMask: indexPath.beginMask, - endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, - newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - } - set { - self = Raw.tensorStridedSliceUpdate( - self, begin: indexPath.begin, end: indexPath.end, - strides: indexPath.strides, value: newValue, - beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, - newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - } - } - - @inlinable - // TODO: @differentiable(wrt: self) - subscript(_ ranges: TensorRangeExpression...) -> Tensor { - get { - return self[IndexPath(ranges.map { $0.tensorRange })] - } - set { - self[IndexPath(ranges.map { $0.tensorRange })] = newValue - } - } - - @usableFromInline - internal func _vjpSubscript( - _ indexPath: IndexPath - ) -> (Tensor, (Tensor) -> Tensor) { - return (self[indexPath], { [shape = shapeTensor] v in - Raw.stridedSliceGrad( - shape: shape, begin: indexPath.begin, end: indexPath.end, - strides: indexPath.strides, dy: v, beginMask: indexPath.beginMask, - endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, - newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - }) - } -} - -internal extension Tensor.IndexPath { - @inlinable - init(_ ranges: [TensorRange]) { - precondition(!ranges.isEmpty, "The tensor range collection cannot be empty.") - precondition(ranges.count { $0 == TensorRange.ellipsis } < 2, - "Only one ellipsis is allowed per tensor range collection.") - - var begin = [Int32](repeating: 0, count: ranges.count) - var end = [Int32](repeating: 0, count: ranges.count) - var strides = [Int32](repeating: 1, count: ranges.count) - var beginMask: Int64 = 0 - var endMask: Int64 = 0 - var ellipsisMask: Int64 = 0 - var newAxisMask: Int64 = 0 - var squeezeAxisMask: Int64 = 0 - for (i, index) in ranges.enumerated() { - switch index { - case .ellipsis: ellipsisMask |= 1 << i - case .newAxis: newAxisMask |= 1 << i - case .squeezeAxis: squeezeAxisMask |= 1 << i - case .index(let index): - begin[i] = Int32(index) - end[i] = Int32(index) + 1 - squeezeAxisMask |= 1 << i - case .range(let range, let stride): - begin[i] = Int32(range.lowerBound) - end[i] = Int32(range.upperBound) - strides[i] = Int32(stride) - case .closedRange(let range, let stride): - begin[i] = Int32(range.lowerBound) - switch Int32(range.upperBound) { - case -1: endMask |= 1 << i - case let u: end[i] = u + 1 - } - strides[i] = Int32(stride) - case .partialRangeFrom(let range, let stride): - begin[i] = Int32(range.lowerBound) - strides[i] = Int32(stride) - endMask |= 1 << i - case .partialRangeUpTo(let range, let stride): - end[i] = Int32(range.upperBound) - strides[i] = Int32(stride) - beginMask |= 1 << i - case .partialRangeThrough(let range, let stride): - end[i] = Int32(range.upperBound) + 1 - strides[i] = Int32(stride) - beginMask |= 1 << i - } - } - - self.begin = Tensor(begin) - self.end = Tensor(end) - self.strides = Tensor(strides) - self.beginMask = beginMask - self.endMask = endMask - self.ellipsisMask = ellipsisMask - self.newAxisMask = newAxisMask - self.squeezeAxisMask = squeezeAxisMask - } -} diff --git a/stdlib/public/TensorFlow/PythonConversion.swift b/stdlib/public/TensorFlow/PythonConversion.swift deleted file mode 100644 index 7677c9d97496e..0000000000000 --- a/stdlib/public/TensorFlow/PythonConversion.swift +++ /dev/null @@ -1,172 +0,0 @@ -//===-- PythonConversion.swift --------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file defines conversions between Python types & custom TensorFlow types. -// -//===----------------------------------------------------------------------===// - -#if canImport(Python) -import Python - -/// The `numpy` Python module. -/// Note: Global variables are lazy, so the following declaration won't produce -// a Python import error until it is first used. -private let np = Python.import("numpy") - -private func debugLogNumpyError(_ message: String) { - debugLog("NumPy conversion error: " + message) -} - -extension ShapedArray : ConvertibleFromNumpyArray - where Scalar : NumpyScalarCompatible { - /// Creates a `ShapedArray` with the same shape and scalars as the specified - /// `numpy.ndarray` instance. - /// - /// - Parameter numpyArray: The `numpy.ndarray` instance to convert. - /// - Precondition: The `numpy` Python package must be installed. - /// - Precondition: `numpyArray` must have a compatible scalar `dtype`. - public init?(numpy numpyArray: PythonObject) { - // Check if input is a `numpy.ndarray` instance. - guard Python.isinstance(numpyArray, np.ndarray) == true else { - debugLogNumpyError(""" - PythonObject input has type '\(Python.type(numpyArray))' and is not \ - an instance of 'numpy.ndarray'. - """) - return nil - } - // Check if the dtype of the `ndarray` is compatible with the `Scalar` - // type. - guard Scalar.numpyScalarTypes.contains(numpyArray.dtype) else { - debugLogNumpyError(""" - 'numpy.ndarray' dtype '\(numpyArray.dtype)' is incompatible with \ - Swift type '\(Scalar.self)'. - """) - return nil - } - - let pyShape = numpyArray.__array_interface__["shape"] - guard let shape = [Int](pyShape) else { - debugLogNumpyError("cannot access shape of 'numpy.ndarray' instance.") - return nil - } - - // Make sure that the array is contiguous in memory. This does a copy if - // the array is not already contiguous in memory. - let contiguousNumpyArray = np.ascontiguousarray(numpyArray) - - guard let ptrVal = - UInt(contiguousNumpyArray.__array_interface__["data"].tuple2.0) else { - debugLogNumpyError("cannot access data of 'numpy.ndarray' instance.") - return nil - } - // Note: `ptr` is not nil even if the `ndarray` is empty (i.e. has a shape - // of `(0,)`). - guard let ptr = UnsafePointer(bitPattern: ptrVal) else { - fatalError("'numpy.ndarray' data pointer was nil") - } - // This code avoids calling `init(shape: [Int], scalars: S)`, - // which inefficiently copies scalars one by one. Instead, - // `init(shape: [Int], scalars: [Scalar])` is called, which efficiently - // does a `memcpy` of the entire `scalars` array. - // Unecessary copying is minimized. - let dummyPointer = UnsafeMutablePointer.allocate(capacity: 1) - let scalarCount = shape.reduce(1, *) - var scalars: [Scalar] = Array(repeating: dummyPointer.move(), - count: scalarCount) - dummyPointer.deallocate() - scalars.withUnsafeMutableBufferPointer { buffPtr in - buffPtr.baseAddress!.assign(from: ptr, count: scalarCount) - } - self.init(shape: shape, scalars: scalars) - } -} - -extension Tensor : ConvertibleFromNumpyArray - where Scalar : NumpyScalarCompatible { - /// Creates a tensor with the same shape and scalars as the specified - /// `numpy.ndarray` instance. - /// - /// - Parameter numpyArray: The `numpy.ndarray` instance to convert. - /// - Precondition: The `numpy` Python package must be installed. - /// - Returns: `numpyArray` converted to an `Array`. Returns `nil` if - /// `numpyArray` does not have a compatible scalar `dtype`. - public init?(numpy numpyArray: PythonObject) { - // Check if input is a `numpy.ndarray` instance. - guard Python.isinstance(numpyArray, np.ndarray) == true else { - debugLogNumpyError(""" - PythonObject input has type '\(Python.type(numpyArray))' and is not \ - an instance of 'numpy.ndarray'. - """) - return nil - } - // Check if the dtype of the `ndarray` is compatible with the `Scalar` - // type. - guard Scalar.numpyScalarTypes.contains(numpyArray.dtype) else { - debugLogNumpyError(""" - 'numpy.ndarray' dtype '\(numpyArray.dtype)' is incompatible with \ - Swift type '\(Scalar.self)'. - """) - return nil - } - - let pyShape = numpyArray.__array_interface__["shape"] - guard let dimensions = [Int](pyShape) else { - debugLogNumpyError("cannot access shape of 'numpy.ndarray' instance.") - return nil - } - let shape = TensorShape(dimensions) - - // Make sure that the array is contiguous in memory. This does a copy if - // the array is not already contiguous in memory. - let contiguousNumpyArray = np.ascontiguousarray(numpyArray) - - guard let ptrVal = - UInt(contiguousNumpyArray.__array_interface__["data"].tuple2.0) else { - debugLogNumpyError("cannot access data of 'numpy.ndarray' instance.") - return nil - } - // Note: `ptr` is not nil even if the `ndarray` is empty (i.e. has a shape - // of `(0,)`). - guard let ptr = UnsafePointer(bitPattern: ptrVal) else { - fatalError("'numpy.ndarray' data pointer was nil") - } - let buffPtr = UnsafeBufferPointer(start: ptr, - count: Int(shape.contiguousSize)) - self.init(shape: shape, scalars: buffPtr) - } -} - -extension ShapedArray where Scalar : NumpyScalarCompatible { - /// Creates a `numpy.ndarray` instance with the same shape and scalars as - /// this `ShapedArray`. - /// - /// - Precondition: The `numpy` Python package must be installed. - public func makeNumpyArray() -> PythonObject { - return scalars.makeNumpyArray().reshape(shape) - } -} - -extension Tensor where Scalar : NumpyScalarCompatible { - /// Creates a `numpy.ndarray` instance with the same shape and scalars as - /// this tensor. - /// - /// - Precondition: The `numpy` Python package must be installed. - public func makeNumpyArray() -> PythonObject { return array.makeNumpyArray() } -} - -extension TensorShape : PythonConvertible { - public var pythonObject: PythonObject { - return dimensions.pythonObject - } -} - -#endif // canImport(Python) diff --git a/stdlib/public/TensorFlow/ShapedArray.swift b/stdlib/public/TensorFlow/ShapedArray.swift deleted file mode 100644 index 43de753d4acf4..0000000000000 --- a/stdlib/public/TensorFlow/ShapedArray.swift +++ /dev/null @@ -1,1201 +0,0 @@ -//===-- ShapedArray.swift -------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// - -import Swift -import CTensorFlow - -//===----------------------------------------------------------------------===// -// TensorBuffer -//===----------------------------------------------------------------------===// - -/// `TensorBuffer` is the internal storage of `ShapedArray`. This buffer has -/// two modes of storage: 'native' and 'tensorFlow'. In 'native' mode, the -/// buffer object stores a pointer to contiguous scalars; in 'tensorFlow' -/// mode, the buffer object stores a `TF_Tensor*` and bridges to TensorFlow. -/// In either mode, the buffer object owns the memory and will deallocate it -/// on `deinit`. -@_fixed_layout @usableFromInline -internal final class TensorBuffer { - typealias Shape = [Int] - - /// A reference type wrapping a Swift Array. - /// - Note: an Array is used as the native storage for `TensorBuffer`. To make - /// in-place mutation possible when the array is stored in an enum value, the - /// array must be wrapped in a reference type. - @_fixed_layout @usableFromInline - final class BoxedArray { - var array: [Scalar] - - init(_ array: __owned [Scalar]) { - self.array = array - } - } - - enum Allocation { - case native(BoxedArray) - case tensorFlow(CTensor) - } - - let allocation: Allocation - let count: Int - - deinit { - debugLog("De-initializing tensor buffer.") - switch allocation { - case .native: - debugLog("Deallocating underlying buffer.") - case let .tensorFlow(cTensor): - debugLog("Deleting underlying tensor.") - TF_DeleteTensor(cTensor) - } - debugLog("Returning from deinit of TensorBuffer.") - } - - init(allocation: Allocation, count: Int) { - self.allocation = allocation - self.count = count - } -} - -// TF Tensor-specific initializer. -extension TensorBuffer where Scalar : _TensorFlowDataTypeCompatible { - /// Creates a local tensor buffer from a C `TF_Tensor*` value and takes - /// ownership of the value. - convenience init(owning cTensor: CTensor, count: Int) { - debugLog("Initializing TensorBuffer with a cTensor of \(count) elements.") - let actualCount = (0..) -> Void - ) -> TensorBuffer { - /// Since `Scalar` may be any generic type, it is not possible to construct - /// an instance of `Scalar` directly for use with the - /// `Array(repeating:count:)` initializer. The workaround here is to - /// allocate a dummy `Scalar` pointer of size 1 and to use the pointee value - /// as the `repeatedValue` of the initializer. - let dummyPointer = UnsafeMutablePointer.allocate(capacity: 1) - var array = Array(repeating: dummyPointer.move(), count: count) - array.withUnsafeMutableBufferPointer { body($0) } - dummyPointer.deallocate() - return TensorBuffer(allocation: .native(BoxedArray(array)), count: count) - } -} - -// Unsafe address accessor. -extension TensorBuffer { - func withUnsafeMutableBufferPointer( - _ body: (inout UnsafeMutableBufferPointer) throws -> R - ) rethrows -> R { - switch allocation { - case let .native(box): - return try box.array.withUnsafeMutableBufferPointer { bufferPtr in - try body(&bufferPtr) - } - case let .tensorFlow(cTensor): - let startAddress = TF_TensorData(cTensor) - .assumingMemoryBound(to: Scalar.self) - var bufferPointer = UnsafeMutableBufferPointer( - start: startAddress, count: count - ) - return try body(&bufferPointer) - } - } - - func withUnsafeBufferPointer( - _ body: (UnsafeBufferPointer) throws -> R - ) rethrows -> R { - switch allocation { - case let .native(box): - return try box.array.withUnsafeBufferPointer { bufferPtr in - return try body(bufferPtr) - } - case let .tensorFlow(cTensor): - let startAddress = TF_TensorData(cTensor) - .assumingMemoryBound(to: Scalar.self) - let bufferPointer = UnsafeBufferPointer( - start: startAddress, count: count - ) - return try body(bufferPointer) - } - } -} - -//===----------------------------------------------------------------------===// -// ShapedArrayProtocol, the protocol unifying ShapedArray and ShapedArraySlice. -//===----------------------------------------------------------------------===// - -public protocol _ShapedArrayProtocol - : RandomAccessCollection, MutableCollection { - associatedtype Scalar - - /// The number of dimensions of the array. - var rank: Int { get } - /// The shape of the array. - var shape: [Int] { get } - /// The total number of scalars in the array. - var scalarCount: Int { get } - - /// Creates an array with the specified shape and contiguous scalars in - /// row-major order. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - init(shape: [Int], scalars: [Scalar]) - - /// Creates an array with the specified shape and sequence of scalars in - /// row-major order. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - init(shape: [Int], scalars: S) where S.Element == Scalar - - /// Calls a closure with a pointer to the array’s contiguous storage. - /// - Parameter body: A closure with an `UnsafeBufferPointer` parameter that - /// points to the contiguous storage for the array. If no such storage - /// exists, it is created. If body has a return value, that value is also - /// used as the return value for the `withUnsafeBufferPointer(_:)` method. - /// The pointer argument is valid only for the duration of the method’s - /// execution. - func withUnsafeBufferPointer( - _ body: (UnsafeBufferPointer) throws -> R - ) rethrows -> R - - /// Calls the given closure with a pointer to the array’s mutable contiguous - /// storage. - /// - Parameter body: A closure with an `UnsafeMutableBufferPointer` parameter - /// that points to the contiguous storage for the array. If no such storage - /// exists, it is created. If body has a return value, that value is also - /// used as the return value for the `withUnsafeMutableBufferPointer(_:)` - /// method. The pointer argument is valid only for the duration of the - /// method’s execution. - mutating func withUnsafeMutableBufferPointer( - _ body: (inout UnsafeMutableBufferPointer) throws -> R - ) rethrows -> R -} - -public extension _ShapedArrayProtocol { - /// The scalars of the array in row-major order. - var scalars: [Scalar] { - get { - return withUnsafeBufferPointer(Array.init) - } - set { - precondition(newValue.count == scalarCount, "Scalar count mismatch.") - withUnsafeMutableBufferPointer { ptr in - ptr.baseAddress!.initialize(from: newValue, count: newValue.count) - } - } - } - - /// Returns `true` if the array has rank 0. - var isScalar: Bool { - return rank == 0 - } - - /// Returns the single scalar element if the array has rank 0 and `nil` - /// otherwise. - var scalar: Scalar? { - get { - guard rank == 0 else { return nil } - return scalars.first - } - set { - precondition(isScalar, "Array does not have shape [].") - guard let newValue = newValue else { - preconditionFailure("New scalar value cannot be nil.") - } - scalars[0] = newValue - } - } -} - -public extension _ShapedArrayProtocol where Scalar : Equatable { - static func == (lhs: Self, rhs: Other) -> Bool - where Other : _ShapedArrayProtocol, Scalar == Other.Scalar { - return lhs.shape == rhs.shape && lhs.scalars.elementsEqual(rhs.scalars) - } -} - -public extension _ShapedArrayProtocol { - /// Returns the number of element arrays in an array (equivalent to the first - /// dimension). - /// - Note: `count` is distinct from `scalarCount`, which represents the total - /// number of scalars. - var count: Int { - return shape.first ?? 0 - } -} - -internal extension _ShapedArrayProtocol { - /// Returns the scalar count for an element of the array. - var scalarCountPerElement: Int { - return shape.isEmpty ? 0 : shape.dropFirst().reduce(1, *) - } - - /// Returns the scalar index corresponding to an index in the leading - /// dimension of the array. - func scalarIndex(fromIndex index: Int) -> Int { - return scalarCountPerElement * index - } - - /// Returns the range of scalars corresponding to a range in the leading - /// dimension of the array. - func scalarSubrange( - from arraySubrange: Range - ) -> Range { - return scalarIndex(fromIndex: arraySubrange.lowerBound) - ..< scalarIndex(fromIndex: arraySubrange.upperBound) - } -} - -fileprivate extension String { - /// Returns a string of the specified length, padded with whitespace to the - /// left. - func leftPadded(toLength length: Int) -> String { - return repeatElement(" ", count: max(0, length - count)) + self - } -} - -// Common public protocol implementations. - -fileprivate extension _ShapedArrayProtocol - where Element : _ShapedArrayProtocol, Element == Element.Element -{ - /// Returns the whitespace separator between elements, given the current - /// indent level. - func separator(indentLevel: Int) -> String { - if rank == 1 { - return ", " - } - return String(repeating: "\n", count: rank - 1) + - String(repeating: " ", count: indentLevel + 1) - } - - /// A textual representation of the 1-D shaped array, starting at the given - /// indent level. Returns a summarized description if `summarizing` is true - /// and the element count exceeds twice the `edgeElementCount`. - /// - /// - Parameters: - /// - indentLevel: The indentation level. - /// - edgeElementCount: The maximum number of elements to print before and - /// after summarization via ellipses (`...`). - /// - maxScalarLength: The length of the longest scalar description in the - /// entire original array-to-print. - /// - maxScalarCountPerLine: The maximum number of scalars to print per - /// line, used when printing 1-D vectors. - /// - summarizing: If true, summarize description if element count exceeds - /// twice `edgeElementCount`. - func vectorDescription( - indentLevel: Int, edgeElementCount: Int, maxScalarLength: Int, - maxScalarCountPerLine: Int, summarizing: Bool - ) -> String { - // Get scalar descriptions. - func scalarDescription(_ element: Element) -> String { - let description = String(describing: element) - return description.leftPadded(toLength: maxScalarLength) - } - var scalarDescriptions: [String] = [] - if summarizing && count > 2 * edgeElementCount { - scalarDescriptions += - prefix(edgeElementCount).map(scalarDescription) - scalarDescriptions += ["..."] - scalarDescriptions += - suffix(edgeElementCount).map(scalarDescription) - } else { - scalarDescriptions += map(scalarDescription) - } - - // Combine scalar descriptions into lines, based on the scalar count per - // line. - let lines = stride( - from: scalarDescriptions.startIndex, to: scalarDescriptions.endIndex, - by: maxScalarCountPerLine - ).map { i -> ArraySlice in - let upperBound = Swift.min( - i.advanced(by: maxScalarCountPerLine), scalarDescriptions.count) - return scalarDescriptions[i.. String { - // Handle scalars. - if let scalar = scalar { - return String(describing: scalar) - } - // Handle vectors, which have special line-width-sensitive logic. - if rank == 1 { - return vectorDescription( - indentLevel: indentLevel, edgeElementCount: edgeElementCount, - maxScalarLength: maxScalarLength, - maxScalarCountPerLine: maxScalarCountPerLine, summarizing: summarizing) - } - // Handle higher-rank tensors. - func elementDescription(_ element: Element) -> String { - return element.description( - indentLevel: indentLevel + 1, edgeElementCount: edgeElementCount, - maxScalarLength: maxScalarLength, - maxScalarCountPerLine: maxScalarCountPerLine, summarizing: summarizing) - } - var elementDescriptions: [String] = [] - if summarizing && count > 2 * edgeElementCount { - elementDescriptions += prefix(edgeElementCount).map(elementDescription) - elementDescriptions += ["..."] - elementDescriptions += suffix(edgeElementCount).map(elementDescription) - } else { - elementDescriptions += map(elementDescription) - } - // Return lines joined with separators. - let lineSeparator = "," + - String(repeating: "\n", count: rank - 1) + - String(repeating: " ", count: indentLevel + 1) - return elementDescriptions.enumerated().reduce(into: "[") { result, entry in - let (i, elementDescription) = entry - result += elementDescription - result += i != elementDescriptions.count - 1 ? lineSeparator : "" - } + "]" - } -} - -public extension _ShapedArrayProtocol - where Element : _ShapedArrayProtocol, Element == Element.Element -{ - /// A textual representation of the shaped array. Returns a summarized - /// description if `summarizing` is true and the element count exceeds twice - /// the `edgeElementCount`. - /// - /// - Parameters: - /// - lineWidth: The max line width for printing. Used to determine number - /// of scalars to print per line. - /// - edgeElementCount: The maximum number of elements to print before and - /// after summarization via ellipses (`...`). - /// - summarizing: If true, summarizing description if element count exceeds - /// twice `edgeElementCount`. - func description( - lineWidth: Int = 80, edgeElementCount: Int = 3, summarizing: Bool = false - ) -> String { - // Compute number of scalars to print per line. - let maxScalarLength = - scalars.lazy.map { String(describing: $0).count }.max() ?? 3 - let maxScalarCountPerLine = Swift.max(1, lineWidth / maxScalarLength) - // Call helper. - return description( - indentLevel: 0, edgeElementCount: edgeElementCount, - maxScalarLength: maxScalarLength, - maxScalarCountPerLine: maxScalarCountPerLine, - summarizing: summarizing) - } - - /// A full, non-pretty-printed textual representation of the shaped array, - /// showing all scalars. - var fullDescription: String { - if let scalar = scalar { - return String(describing: scalar) - } - return "[\( map({"\($0.fullDescription)"}).joined(separator: ", ") )]" - } -} - -fileprivate extension _ShapedArrayProtocol where Scalar : Equatable { - func _isEqual(to other: Self) -> Bool { - return shape == other.shape && withUnsafeBufferPointer { selfBuf in - other.withUnsafeBufferPointer { otherBuf in - selfBuf.elementsEqual(otherBuf) - } - } - } -} - -//===----------------------------------------------------------------------===// -// ShapedArray -//===----------------------------------------------------------------------===// - -/// `ShapedArray` is a multi-dimensional array. It has a shape, which has type -/// `[Int]` and defines the array dimensions, and uses a `TensorBuffer` -/// internally as storage. -@_fixed_layout -public struct ShapedArray : _ShapedArrayProtocol { - /// Contiguous memory storing scalars. - internal var buffer: TensorBuffer - - /// The dimensions of the array. - public private(set) var shape: [Int] - - /// Creates a `ShapedArray` from a `TensorBuffer` and a shape. - internal init(buffer: __owned TensorBuffer, shape: __owned [Int]) { - precondition(buffer.count == shape.reduce(1, *), - "The scalar count of the buffer does not match the shape.") - self.buffer = buffer - self.shape = shape - debugLog("Done initializing ShapedArray from TensorBuffer.") - } -} - -fileprivate extension ShapedArray { - mutating func ensureUniquelyReferenced() { - if isKnownUniquelyReferenced(&buffer) { return } - let oldBuffer = buffer - debugLog("Unique reference check") - buffer = TensorBuffer.create(count: scalarCount) { buffPtr in - let ptr = buffPtr.baseAddress! - oldBuffer.withUnsafeBufferPointer { oldBuffPtr in - let oldPtr = oldBuffPtr.baseAddress! - ptr.initialize(from: oldPtr, count: scalarCount) - } - } - } -} - -internal extension ShapedArray where Scalar : _TensorFlowDataTypeCompatible { - @usableFromInline - init(owning cTensor: CTensor) { - // Including \(Scalar.self) into the message would cause non-deterministic - // crashes. - debugLog("Initializing ShapedArray from CTensor.") - shape = (0..(allocation: .native(.init(scalars)), - count: scalars.count) - self.init(buffer: buffer, shape: shape) - } - - /// Creates a `ShapedArray` with the specified shape and sequence of scalars - /// in row-major order. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - init(shape: __owned [Int], - scalars: __shared S) where S.Element == Scalar { - let scalarCount = shape.reduce(1, *) - let buffer = TensorBuffer.create(count: scalarCount) { buffPtr in - let ptr = buffPtr.baseAddress! - // TODO: Refactor with better pointer initializers in Swift 4.1. - var i = 0 - for scalar in scalars { - guard i < scalarCount else { break } - ptr.advanced(by: i).initialize(to: scalar) - i += 1 - } - // If the sequence has fewer elements than the shape needs, this is a - // precondition failure. - precondition(i == scalarCount, - "The sequence has fewer elements than needed by the shape.") - } - self.init(buffer: buffer, shape: shape) - } - - /// Creates a `ShapedArray` from a scalar value. - init(_ scalar: __owned Scalar) { - self.init( - buffer: TensorBuffer(allocation: .native(.init([scalar])), count: 1), - shape: [] - ) - } - - /// Creates a `ShapedArray` with the specified shape and a single, repeated - /// scalar value. - /// - Parameters: - /// - shape: The shape of the `ShapedArray`. - /// - repeatedValue: The scalar value to repeat. - @inlinable @inline(__always) - @available(*, deprecated, renamed: "init(repeating:shape:)") - init(shape: __owned [Int], repeating repeatedValue: __owned Scalar) { - self.init(repeating: repeatedValue, shape: shape) - } - - /// Creates a `ShapedArray` with the specified shape and a single, repeated - /// scalar value. - /// - Parameters: - /// - repeatedValue: The scalar value to repeat. - /// - shape: The shape of the `ShapedArray`. - init(repeating repeatedValue: __owned Scalar, shape: __owned [Int]) { - let scalarCount = shape.reduce(1, *) - let buffer = TensorBuffer( - allocation: .native(.init(Array(repeating: repeatedValue, - count: scalarCount))), - count: scalarCount - ) - self.init(buffer: buffer, shape: shape) - } -} - -extension ShapedArray : RandomAccessCollection, MutableCollection { - public typealias Index = Int - public typealias Element = ShapedArraySlice - public typealias SubSequence = ShapedArraySlice - - public var indices: Range { - return 0.. Element { - get { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted.") - precondition(index < endIndex, "ShapedArray index is out of range") - precondition(index >= startIndex, - "Negative ShapedArray index is out of range") - return ShapedArraySlice(base: self, baseIndices: [index]) - } - set { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted.") - precondition(index < endIndex, "ShapedArray index is out of range") - precondition(index >= startIndex, - "Negative ShapedArray index is out of range") - precondition(shape.dropFirst().elementsEqual(newValue.shape), - "Element shape mismatch") - let scalarIndex = self.scalarIndex(fromIndex: index) - withUnsafeMutableBufferPointer { destBuffPtr in - let ptr = destBuffPtr.baseAddress!.advanced(by: scalarIndex) - newValue.withUnsafeBufferPointer { srcBuffPtr in - ptr.initialize(from: srcBuffPtr.baseAddress!, count: srcBuffPtr.count) - } - } - } - } - - /// Access the subarray specified by a contiguous range of indices. - /// - Parameter bounds: Contiguous range of indices. - public subscript(bounds: Range) -> SubSequence { - get { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted.") - precondition( - bounds.lowerBound >= startIndex && bounds.lowerBound <= endIndex && - bounds.upperBound >= startIndex && bounds.upperBound <= endIndex, - "ShapedArray indices are out of range") - return ShapedArraySlice(base: self, bounds: bounds) - } - set { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted.") - precondition( - indices ~= bounds.lowerBound && indices ~= bounds.upperBound - 1, - "ShapedArray indices are out of range") - let subArrayShape = [bounds.count] + shape.dropFirst() - precondition(subArrayShape == newValue.shape, - "Subarray shape mismatch.") - let scalarIndex = self.scalarIndex(fromIndex: bounds.lowerBound) - withUnsafeMutableBufferPointer { destBuffPtr in - let ptr = destBuffPtr.baseAddress!.advanced(by: scalarIndex) - newValue.withUnsafeBufferPointer { srcBuffPtr in - ptr.initialize(from: srcBuffPtr.baseAddress!, count: srcBuffPtr.count) - } - } - } - } -} - -public extension ShapedArray { - /// Calls a closure with a pointer to the array’s contiguous storage. - /// - Parameter body: A closure with an `UnsafeBufferPointer` parameter that - /// points to the contiguous storage for the array. If no such storage - /// exists, it is created. If body has a return value, that value is also - /// used as the return value for the `withUnsafeBufferPointer(_:)` method. - /// The pointer argument is valid only for the duration of the method’s - /// execution. - func withUnsafeBufferPointer( - _ body: (UnsafeBufferPointer) throws -> Result - ) rethrows -> Result { - return try buffer.withUnsafeBufferPointer { ptr in - try body(ptr) - } - } - - /// Calls the given closure with a pointer to the array’s mutable contiguous - /// storage. - /// - Parameter body: A closure with an `UnsafeMutableBufferPointer` parameter - /// that points to the contiguous storage for the array. If no such storage - /// exists, it is created. If body has a return value, that value is also - /// used as the return value for the `withUnsafeMutableBufferPointer(_:)` - /// method. The pointer argument is valid only for the duration of the - /// method’s execution. - mutating func withUnsafeMutableBufferPointer( - _ body: (inout UnsafeMutableBufferPointer) throws -> Result - ) rethrows -> Result { - ensureUniquelyReferenced() - return try buffer.withUnsafeMutableBufferPointer { ptr in - try body(&ptr) - } - } -} - -// Tensor conversion. -extension ShapedArray where Scalar : TensorFlowScalar { - var byteCount: Int { - return MemoryLayout.stride * scalarCount - } - - @usableFromInline - __consuming func makeTensorHandle() -> TensorHandle { - // This initializer is designed to optimize conversion from TF-allocated - // `ShapedArray` instances. - switch buffer.allocation { - case let .native(box): - precondition(rank <= Int32.max, """ - Conversion to TensorHandle is undefined when rank exceeds Int32.max. - """) - precondition(shape.allSatisfy { $0 <= Int32.max }, """ - Conversion to TensorHandle is undefined when shape dimensions exceed \ - Int32.max. - """) - return TensorHandle( - shape: shape, - scalarsInitializer: { addr in - addr.initialize(from: box.array, count: scalarCount) - } - ) - case let .tensorFlow(cTensor): - return TensorHandle(copyingFromCTensor: cTensor) - } - } -} - -// Tensor conversion. -public extension Tensor { - init(_ array: __owned ShapedArray) { - self.init(handle: array.makeTensorHandle()) - } -} - -// Array literal conversion. -extension ShapedArray : ExpressibleByArrayLiteral - where Scalar : TensorFlowScalar { - public typealias ArrayLiteralElement = _TensorElementLiteral - @inlinable @inline(__always) - public init(arrayLiteral elements: _TensorElementLiteral...) { - self = Tensor(_tensorElementLiterals: elements).array - } -} - -// Equatable conformance. -extension ShapedArray : Equatable where Scalar : Equatable { - public static func == (lhs: ShapedArray, rhs: ShapedArray) -> Bool { - return lhs._isEqual(to: rhs) - } -} - -// Hashable conformance. -extension ShapedArray : Hashable where Scalar : Hashable { - public func hash(into hasher: inout Hasher) { - hasher.combine(shape) - hasher.combine(scalars) - } -} - -// String conversion. -extension ShapedArray : CustomStringConvertible { - /// A textual representation of this `ShapedArray`. - /// - /// - Note: use `fullDescription` for a non-pretty-printed description showing - /// all scalars. - public var description: String { - // Summarize if there are more than 1000 scalars. - let summarizing = scalarCount > 1000 - return description(summarizing: summarizing) - } -} - -// Xcode Playground display conversion. -extension ShapedArray : CustomPlaygroundDisplayConvertible { - public var playgroundDescription: Any { - return description - } -} - -// Mirror representation, used by debugger/REPL. -extension ShapedArray : CustomReflectable { - public var customMirror: Mirror { - return Mirror(self, children: [], displayStyle: .struct) - } -} - -// Codable conformance. -extension ShapedArray : Codable where Scalar : Codable { - private enum CodingKeys: String, CodingKey { - case shape - case scalars - } - - public init(from decoder: Decoder) throws { - let container = try decoder.container(keyedBy: CodingKeys.self) - let shape = try container.decode([Int].self, forKey: .shape) - let scalars = try container.decode([Scalar].self, forKey: .scalars) - self.init(shape: shape, scalars: scalars) - } - - public func encode(to encoder: Encoder) throws { - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encode(shape, forKey: .shape) - try container.encode(scalars, forKey: .scalars) - } -} - -//===----------------------------------------------------------------------===// -// ShapedArraySlice -//===----------------------------------------------------------------------===// - -/// A contiguous slice of a `ShapedArray` or `ShapedArraySlice` instance. -/// -/// `ShapedArraySlice` enables fast, efficient operations on contiguous slices -/// of `ShapedArray` instances. `ShapedArraySlice` instances do not have their -/// own storage. Instead, they provides a view onto the storage of their base -/// `ShapedArray`. `ShapedArraySlice` can represent two different kinds of -/// slices: element arrays and subarrays. -/// -/// Element arrays are subdimensional elements of a `ShapedArray`: their rank -/// is one less than that of their base. Element array slices are obtained by -/// indexing a `ShapedArray` instance with a singular `Int32` index. -/// -/// For example: -/// -/// var matrix = ShapedArray(shape: [2, 2], scalars: [0, 1, 2, 3]) -/// // `matrix` represents [[0, 1], [2, 3]]. -/// -/// let element = matrix[0] -/// // `element` is a `ShapedArraySlice` with shape [2]. It is an element -/// // array, specifically the first element in `matrix`: [0, 1]. -/// -/// matrix[1] = ShapedArraySlice(shape: [2], scalars: [4, 8]) -/// // The second element in `matrix` has been mutated. -/// // `matrix` now represents [[0, 1, 4, 8]]. -/// -/// Subarrays are a contiguous range of the elements in a `ShapedArray`. -/// The rank of a subarray is the same as that of its base, but its leading -/// dimension is the count of the slice range. Subarray slices are obtained by -/// indexing a `ShapedArray` with a `Range` that represents a range of -/// elements (in the leading dimension). Methods like `prefix(:)` and -/// `suffix(:)` that internally index with a range also produce subarray. -/// -/// For example: -/// -/// let zeros = ShapedArray(repeating: 0, shape: [3, 2]) -/// var matrix = ShapedArray(shape: [3, 2], scalars: Array(0..<6)) -/// // `zeros` represents [[0, 0], [0, 0], [0, 0]]. -/// // `matrix` represents [[0, 1], [2, 3], [4, 5]]. -/// -/// let subarray = matrix.prefix(2) -/// // `subarray` is a `ShapedArraySlice` with shape [2, 2]. It is a slice -/// // of the first 2 elements in `matrix` and represents [[0, 1], [2, 3]]. -/// -/// matrix[0..<2] = zeros.prefix(2) -/// // The first 2 elements in `matrix` have been mutated. -/// // `matrix` now represents [[0, 0], [0, 0], [4, 5]]. -@_fixed_layout -public struct ShapedArraySlice : _ShapedArrayProtocol { - /// The underlying `ShapedArray` of the slice. - @usableFromInline internal var base: ShapedArray - /// The subdimensional indices of a slice. - @usableFromInline internal var baseIndices: [Int] - /// The subarray bounds of a slice. - @usableFromInline internal var bounds: Range? - - /// Creates a `ShapedArraySlice` from a base `ShapedArray`, with the specified - /// subdimensional indices and subarray bounds. - @inlinable - internal init( - base: __owned ShapedArray, - baseIndices indices: __owned [Int] = [], - bounds: Range? = nil - ) { - precondition(indices.count <= base.rank, - "Number of base indices exceeds base rank") - precondition(zip(base.shape, indices).allSatisfy { $1 >= 0 && $1 < $0 }, - "Base indices are out of range") - self.base = base - self.baseIndices = indices - self.bounds = bounds - } -} - -public extension ShapedArraySlice { - /// Indexing depth of this slice, i.e. the difference in rank between the base - /// and the slice. - internal var indexingDepth: Int { - return baseIndices.count - } - - /// The number of dimensions of the array. - var rank: Int { - return base.rank - indexingDepth - } - - /// The shape of the array. - var shape: [Int] { - if let bounds = bounds { - return [bounds.count] + Array(base.shape.dropFirst(indexingDepth + 1)) - } - return Array(base.shape.dropFirst(indexingDepth)) - } - - /// The total number of scalars in the array. - var scalarCount: Int { - return shape.reduce(1, *) - } -} - -// Slice initializers. -public extension ShapedArraySlice { - /// Creates a `ShapedArraySlice` with the specified shape and contiguous - /// scalars in row-major order. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - init(shape: __owned [Int], scalars: __owned [Scalar]) { - self.init(base: ShapedArray(shape: shape, scalars: scalars)) - } - - /// Creates an `ShapedArraySlice` with the specified shape and sequence of - /// scalars in row-major order. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - init(shape: __owned [Int], - scalars: __shared S) where S.Element == Scalar { - self.init(base: ShapedArray(shape: shape, scalars: scalars)) - } - - /// Creates a `ShapedArraySlice` from a scalar value. - init(_ scalar: __owned Scalar) { - self.init(base: ShapedArray(scalar)) - } - - /// Creates a `ShapedArraySlice` with the specified shape and a single, - /// repeated scalar value. - /// - Parameters: - /// - repeatedValue: The scalar value to repeat. - /// - shape: The shape of the `ShapedArraySlice`. - @inlinable @inline(__always) - @available(*, deprecated, renamed: "init(repeating:shape:)") - init(shape: __owned [Int], repeating repeatedValue: __owned Scalar) { - self.init(repeating: repeatedValue, shape: shape) - } - - /// Creates a `ShapedArraySlice` with the specified shape and a single, - /// repeated scalar value. - /// - Parameters: - /// - repeatedValue: The scalar value to repeat. - /// - shape: The shape of the `ShapedArraySlice`. - init(repeating repeatedValue: __owned Scalar, shape: __owned [Int]) { - self.init(base: ShapedArray(repeating: repeatedValue, shape: shape)) - } -} - -internal extension ShapedArraySlice { - /// The range of scalars from the base `ShapedArray` represented by a - /// `ShapedArraySlice`. - var scalarRange: Range { - let trimmedShape = base.shape.dropFirst() - var (start, end) = baseIndices.enumerated() - .reduce((0, base.scalarCount)) { (acc, next) in - let stride = trimmedShape.dropFirst(next.offset).reduce(1, *) - if next.offset == indexingDepth - 1 { - let temp = acc.0 + next.element * stride - return (temp, temp + stride) - } - return (acc.0 + next.element * stride, acc.1) - } - if let bounds = bounds { - let stride = trimmedShape.dropFirst(indexingDepth).reduce(1, *) - let oldStart = start - start = start + bounds.startIndex * stride - end = oldStart + bounds.endIndex * stride - } - return start..( - _ body: (UnsafeBufferPointer) throws -> Result - ) rethrows -> Result { - return try base.withUnsafeBufferPointer { baseBuffPtr in - let basePtr = baseBuffPtr.baseAddress! - let ptr = UnsafeBufferPointer( - start: basePtr.advanced(by: scalarRange.startIndex), - count: scalarRange.count - ) - return try body(ptr) - } - } - - /// Calls the given closure with a pointer to the `ShapedArraySlice`’s mutable - /// contiguous storage. - /// - Parameter body: A closure with an `UnsafeMutableBufferPointer` parameter - /// that points to the contiguous storage for the `ShapedArraySlice`. If no - /// such storage exists, it is created. If body has a return value, that - /// value is also used as the return value for the - /// `withUnsafeMutableBufferPointer(_:)` method. The pointer argument is - /// valid only for the duration of the method’s execution. - mutating func withUnsafeMutableBufferPointer( - _ body: (inout UnsafeMutableBufferPointer) throws -> Result - ) rethrows -> Result { - // NOTE: Copying `scalarRange` to a local variable here is necessary for - // exclusive access. - let scalarRange = self.scalarRange - return try base.withUnsafeMutableBufferPointer { baseBuffPtr in - let basePtr = baseBuffPtr.baseAddress! - var ptr = UnsafeMutableBufferPointer( - start: basePtr.advanced(by: scalarRange.startIndex), - count: scalarRange.count - ) - return try body(&ptr) - } - } -} - -extension ShapedArraySlice : RandomAccessCollection, MutableCollection { - public typealias Index = Int - public typealias Element = ShapedArraySlice - public typealias SubSequence = ShapedArraySlice - - public var indices: Range { - if let bounds = bounds { - return bounds - } else if indexingDepth < base.rank { - return 0.. Element { - get { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted.") - precondition(index < endIndex, "ShapedArraySlice index is out of range") - precondition(index >= startIndex, - "ShapeArraySlice index is out of range (before startIndex)") - return ShapedArraySlice(base: base, - baseIndices: baseIndices + [index], - bounds: nil) - } - set { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted.") - precondition(index < endIndex, "ShapedArraySlice index is out of range") - precondition(index >= startIndex, - "ShapeArraySlice index is out of range (before startIndex)") - precondition(shape.dropFirst().elementsEqual(newValue.shape), - "Element shape mismatch") - let scalarIndex = self.scalarIndex(fromIndex: index) - withUnsafeMutableBufferPointer { destBuffPtr in - let ptr = destBuffPtr.baseAddress!.advanced(by: scalarIndex) - newValue.withUnsafeBufferPointer { srcBuffPtr in - ptr.initialize(from: srcBuffPtr.baseAddress!, count: srcBuffPtr.count) - } - } - } - } - - /// Access the subarray specified by a contiguous range of indices. - /// - Parameter bounds: Contiguous range of indices. - public subscript(bounds: Range) -> SubSequence { - get { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted") - precondition( - indices ~= bounds.lowerBound && indices ~= bounds.upperBound - 1, - "ShapedArraySlice indices are out of range") - return ShapedArraySlice(base: base, - baseIndices: baseIndices, - bounds: bounds) - } - set { - precondition(!isScalar, - "Scalar has no elements and cannot be subscripted") - precondition( - indices ~= bounds.lowerBound && indices ~= bounds.upperBound - 1, - "ShapedArraySlice indices are out of range") - let subArrayShape = [bounds.count] + shape.dropFirst() - precondition(subArrayShape == newValue.shape, "Subarray shape mismatch") - let scalarIndex = self.scalarIndex(fromIndex: bounds.lowerBound) - withUnsafeMutableBufferPointer { destBuffPtr in - let ptr = destBuffPtr.baseAddress!.advanced(by: scalarIndex) - newValue.withUnsafeBufferPointer { srcBuffPtr in - ptr.initialize(from: srcBuffPtr.baseAddress!, count: srcBuffPtr.count) - } - } - } - } -} - -// Tensor conversion. -public extension ShapedArraySlice where Scalar : TensorFlowScalar { - init(_ tensor: __shared Tensor) { - self.init(base: tensor.array) - } -} - -// Array literal conversion. -extension ShapedArraySlice : ExpressibleByArrayLiteral - where Scalar : TensorFlowScalar { - public typealias ArrayLiteralElement = _TensorElementLiteral - @inlinable @inline(__always) - public init(arrayLiteral elements: _TensorElementLiteral...) { - self.init(base: Tensor(_tensorElementLiterals: elements).array) - } -} - -// Equatable conformance. -extension ShapedArraySlice : Equatable where Scalar : Equatable { - public static func == (lhs: ShapedArraySlice, rhs: ShapedArraySlice) -> Bool { - return lhs._isEqual(to: rhs) - } -} - -// Hashable conformance. -extension ShapedArraySlice : Hashable where Scalar : Hashable { - public func hash(into hasher: inout Hasher) { - hasher.combine(shape) - hasher.combine(scalars) - } -} - -// String conversion. -extension ShapedArraySlice : CustomStringConvertible { - /// A textual representation of this `ShapedArraySlice`. - /// - /// - Note: use `fullDescription` for a non-pretty-printed representation - /// showing all scalars. - public var description: String { - // Summarize if there are more than 1000 scalars. - let summarizing = scalarCount > 1000 - return description(summarizing: summarizing) - } -} - -// Xcode Playground display conversion. -extension ShapedArraySlice : CustomPlaygroundDisplayConvertible { - public var playgroundDescription: Any { - return description - } -} - -// Mirror representation, used by debugger/REPL. -extension ShapedArraySlice : CustomReflectable { - public var customMirror: Mirror { - return Mirror(self, children: [], displayStyle: .struct) - } -} - -// Codable conformance. -extension ShapedArraySlice : Codable where Scalar : Codable { - private enum CodingKeys : String, CodingKey { - case shape - case scalars - } - - public func encode(to encoder: Encoder) throws { - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encode(shape, forKey: .shape) - try container.encode(scalars, forKey: .scalars) - } - - public init(from decoder: Decoder) throws { - let container = try decoder.container(keyedBy: CodingKeys.self) - let shape = try container.decode([Int].self, forKey: .shape) - let scalars = try container.decode([Scalar].self, forKey: .scalars) - self.init(shape: shape, scalars: scalars) - } -} diff --git a/stdlib/public/TensorFlow/StringOps.swift b/stdlib/public/TensorFlow/StringOps.swift deleted file mode 100644 index 7c2ff9715cfb4..0000000000000 --- a/stdlib/public/TensorFlow/StringOps.swift +++ /dev/null @@ -1,29 +0,0 @@ -//===-- StringOps.swift --------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains definitions of most string tensor operations. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Element-wise binary comparison -//===----------------------------------------------------------------------===// - -public extension StringTensor { - /// Computes `self == other` element-wise. - /// - Note: `elementsEqual` supports broadcasting. - @inlinable @inline(__always) - func elementsEqual(_ other: StringTensor) -> Tensor { - return #tfop("Equal", self.handle, other.handle, - T$dtype: String.tensorFlowDataType) - } -} diff --git a/stdlib/public/TensorFlow/StringTensor.swift b/stdlib/public/TensorFlow/StringTensor.swift deleted file mode 100644 index 162a9dc48bbd4..0000000000000 --- a/stdlib/public/TensorFlow/StringTensor.swift +++ /dev/null @@ -1,151 +0,0 @@ -//===-- StringTensor.swift -----------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// Defines the StringTensor type. -// -//===----------------------------------------------------------------------===// - -import CTensorFlow - -//===----------------------------------------------------------------------===// -// StringTensor -//===----------------------------------------------------------------------===// - -/// `StringTensor` is a multi-dimensional array whose elements are `String`s. -@_fixed_layout -public struct StringTensor { - /// The underlying `TensorHandle`. - /// - Note: `handle` is public to allow user defined ops, but should not - /// normally be used otherwise. - public let handle: TensorHandle - - @inlinable - public init(handle: TensorHandle) { - self.handle = handle - } -} - -//===----------------------------------------------------------------------===// -// Compiler intrinsics -//===----------------------------------------------------------------------===// - -/// This compiler builtin is known by the partitioning pass, which recognizes it -/// and promotes calls to it to being in graph when it can. This signature was -/// designed to align with the requirements of the `Const` TensorFlow operation. -@usableFromInline @inline(never) -@_silgen_name("__tf_string_tensor_from_strings") -func _TFStringTensorFromStrings( - _ scalars: [String], shape: [Int] -) -> TensorHandle { - let contiguousSize = shape.reduce(1, *) - precondition(scalars.count == contiguousSize, - "The number of scalars does not match the shape.") - - // utf8CString is null-terminated. TF APIs want the strings without - // null-terminators. - let cStrings = scalars.map { $0.utf8CString.dropLast() } - - let tfEncodedSizes = cStrings.map { TF_StringEncodedSize($0.count) } - - // Format information copied from tensorflow/c/c_api.h: - // The format for TF_STRING tensors is: - // start_offset: array[uint64] - // data: byte[...] - // - // The string length (as a varint), followed by the contents of the string - // is encoded at data[start_offset[i]]]. - - // The size of the "start_offset" region. - let startOffsetsByteCount = scalars.count * MemoryLayout.stride - - // The size of the "data" region. - let dataByteCount = tfEncodedSizes.reduce(0, +) * MemoryLayout.stride - - return TensorHandle( - shape: shape, - byteCount: startOffsetsByteCount + dataByteCount, - bufferInitializer: { tensorBuffer in - // Initialize the "start_offset" region. - var startOffset: UInt64 = 0 - var startOffsetAddr = - tensorBuffer.bindMemory(to: UInt64.self, capacity: scalars.count) - for tfEncodedSize in tfEncodedSizes { - startOffsetAddr.initialize(to: startOffset) - startOffsetAddr = startOffsetAddr.advanced(by: 1) - startOffset = startOffset + UInt64(tfEncodedSize) - } - - // Initialize the "data" region. - var dataAddr = tensorBuffer.advanced(by: startOffsetsByteCount) - .bindMemory(to: Int8.self, capacity: dataByteCount) - let status = TF_NewStatus() - for (cString, tfEncodedSize) in zip(cStrings, tfEncodedSizes) { - _ = cString.withUnsafeBufferPointer { buffer in - TF_StringEncode(buffer.baseAddress, buffer.count, dataAddr, - tfEncodedSize, status) - } - checkOk(status) - dataAddr = dataAddr.advanced(by: tfEncodedSize) - } - TF_DeleteStatus(status) - } - ) -} - -@usableFromInline @inline(never) -@_silgen_name("__tf_string_tensor_from_string") -func _TFStringTensorFromString(_ scalar: String) -> TensorHandle { - return _TFStringTensorFromStrings([scalar], shape: []) -} - -@usableFromInline @inline(never) -@_silgen_name("__tf_string_tensor_from_strings_1d") -func _TFStringTensorFromStrings1D(_ scalars: [String]) -> TensorHandle { - return _TFStringTensorFromStrings(scalars, shape: [scalars.count]) -} - -//===----------------------------------------------------------------------===// -// Initialization -//===----------------------------------------------------------------------===// - -public extension StringTensor { - /// Creates a tensor from a scalar value. - @inlinable @inline(__always) - init(_ value: String) { - self.init(handle: _TFStringTensorFromString(value)) - } - - /// Creates a 1D tensor in from contiguous scalars. - /// - /// - Parameters: - /// - vector: The scalar contents of the tensor. - /// - @inlinable @inline(__always) - init(_ vector: [String]) { - self.init(handle: _TFStringTensorFromStrings1D(vector)) - } -} - -//===----------------------------------------------------------------------===// -// Array conversion -//===----------------------------------------------------------------------===// - -public extension StringTensor { - var array: ShapedArray { - debugLog("Returning a host copy of string array.") - return handle.makeHostCopy() - } - - var scalars: [String] { - return array.scalars - } -} diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlow/Tensor.swift deleted file mode 100644 index 9ab73ac850ea9..0000000000000 --- a/stdlib/public/TensorFlow/Tensor.swift +++ /dev/null @@ -1,915 +0,0 @@ -//===-- Tensor.swift ------------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This is the core Tensor abstraction, which is conceptually equivalent to a -// NumPy ndarray. It carries no rank information in its static type, so it can -// be used by model developers who don't want it. -// -//===----------------------------------------------------------------------===// - -#if os(macOS) || os(iOS) || os(watchOS) || os(tvOS) -import Darwin -#else -import Glibc -#endif -import CTensorFlow - -infix operator .== : ComparisonPrecedence - -//===----------------------------------------------------------------------===// -// Tensor -//===----------------------------------------------------------------------===// - -/// `Tensor` is a multi-dimensional array used for computation. It is a wrapper -/// around a `TensorHandle`. -@_fixed_layout -public struct Tensor : TensorProtocol { - /// The underlying `TensorHandle`. - /// - Note: `handle` is public to allow user defined ops, but should not - /// normally be used otherwise. - public let handle: TensorHandle - - @inlinable - public init(handle: TensorHandle) { - self.handle = handle - } -} - -//===----------------------------------------------------------------------===// -// Compiler intrinsics -//===----------------------------------------------------------------------===// -// -// By default, when a `Tensor` value is implicitly passed between host and -// tensor code, the partitioning pass will generate a warning. Users can -// indicate that they are doing something intentionally by using these methods, -// which silences the warning. -// -// TODO: These would be nicer defined as builtins rather than "well known -// functions". - -@usableFromInline @inline(never) -@_silgen_name("__tf_to_accel") -func _TFToAcclerator(_ handle: TensorHandle) -> TensorHandle { - return handle -} - -@usableFromInline @inline(never) -@_silgen_name("__tf_to_host") -func _TFToHost(_ handle: TensorHandle) - -> TensorHandle { - return handle -} - -/// This function converts a `TensorHandle` that is known to have a 0-d value -/// into the scalar that it produces. This is intended for use in op definitions -/// where it is known that the op always returns a 0-d tensor. It is not for use -/// in general code. -@inlinable @inline(__always) -func _TFGetScalarOrDie( - _ handle: TensorHandle -) -> Scalar { - return Scalar._getScalarOrDie(handle) -} - -/// This function converts a `TensorHandle` into a scalar if it is 0-d, or -/// returns nil otherwise. -@inlinable @inline(__always) -func _TFGetScalar( - _ handle: TensorHandle -) -> Scalar? { - return Scalar._getScalar(handle) -} - -/// This compiler builtin is known by the partitioning pass, which recognizes it -/// and promotes calls to it to being in graph when it can. This signature was -/// designed to align with the requirements of the `Const` TensorFlow operation. -@usableFromInline @inline(never) -@_silgen_name("__tf_tensor_from_scalars") -func _TFTensorFromScalars( - _ scalars: [Scalar], shape: [Int] -) -> TensorHandle { - let contiguousSize = shape.reduce(1, *) - precondition(scalars.count == contiguousSize, - "The number of scalars does not match the shape.") - return TensorHandle( - shape: shape, - scalarsInitializer: { addr in - scalars.withUnsafeBufferPointer { ptr in - addr.assign(from: ptr.baseAddress!, count: contiguousSize) - } - } - ) -} - -/// In graph mode, the deabstraction pass transforms this function call to -/// either a "Const" graph_op (if `scalar` is a compile-time constant), or a -/// "tfc.scalarToTensor" graph_op. In the latter case, the partition pass uses -/// it to do scalar promotion, and transforms it away before entering graph -/// lowering. e.g. For user code: -/// let x_scalar = x_tensor.mean() -/// let y_scalar = y_tensor.mean() -/// let z_scalar = x_scalar + y_scalar -/// let z_tensor = Tensor(z_scalar) -/// -/// The scalar addition can be promoted into graph, through the -/// "tfc.scalarToTensor" graph_op generated from Tensor(z_scalar). In this -/// example, the _getScalarOrDie() call generated from mean() will be "cancelled -/// out" with "tfc.scalarToTensor", such that we avoid generating scalar on the -/// host, and then converting it back to a graph tensor. -/// -/// In eager mode, this function is executed directly. -@usableFromInline @inline(never) -@_silgen_name("__tf_tensor_from_scalar") -func _TFTensorFromScalar( - _ scalar: Scalar -) -> TensorHandle { - return _TFTensorFromScalars([scalar], shape: []) -} - -@usableFromInline @inline(never) -@_silgen_name("__tf_tensor_from_scalars_1d") -func _TFTensorFromScalars1D(_ scalars: [Scalar]) - -> TensorHandle { - return _TFTensorFromScalars(scalars, shape: [scalars.count]) -} - -@inlinable @inline(__always) -func _TFHoistable(_ fn: () -> TensorHandle) - -> TensorHandle { - return Scalar._hoistableClosure(fn) -} - -//===----------------------------------------------------------------------===// -// Memory transfer markers -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// Mark memory transfer to accelerator. - /// - Parameters: - /// - shape: When sending the tensor to a TF XLA device (including TPU), - /// must specify the tensor shape as required by XLA compilation. - @inlinable @inline(__always) - func toAccelerator(shape: TensorShape) -> Tensor { - let tensor = toAccelerator() - // If the tensor is to be sent from host to TPU, the shape is specified on - // TF CPU first, before TF CPU sends the tensor to TPU. - let ret: TensorHandle = #tfop( - "Identity", - tensor, - T$dtype: Scalar.tensorFlowDataType, - __shapes: [shape], - __device: "/job:localhost/replica:0/task:0/device:CPU:0") - return Tensor(handle: ret) - } - - /// Mark memory transfer to accelerator. - @inlinable @inline(__always) - func toAccelerator() -> Tensor { - return Tensor(handle: _TFToAcclerator(handle)) - } - - /// Mark memory transfer to host. - /// - Parameters: - /// - shape: When sending the tensor to a TF XLA device (including TPU), - /// must specify the tensor shape as required by XLA compilation. - @inlinable @inline(__always) - func toHost(shape: TensorShape) -> Tensor { - // If the `self` tensor resides on TPU, the shape is specified on that - // device first, before outfeeding the tensor to CPU, a required step for - // sending the tensor to the host. - let tensor: TensorHandle = - #tfop("Identity", self, T$dtype: Scalar.tensorFlowDataType, - __shapes: [shape]) - return Tensor(handle: tensor).toHost() - } - - /// Mark memory transfer to host. - @inlinable @inline(__always) - func toHost() -> Tensor { - return Tensor(handle: _TFToHost(handle)) - } -} - -//===----------------------------------------------------------------------===// -// Initialization -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Perform an element-wise conversion from another `Tensor`. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpCast where Scalar : TensorFlowFloatingPoint, - OtherScalar: TensorFlowFloatingPoint) - init(_ other: Tensor) { - self = Raw.cast(other) - } -} - -internal extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpCast( - _ other: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - return (Tensor(other), { v in Tensor(v) }) - } -} - -public extension Tensor { - /// Creates a tensor from a scalar value. - @inlinable @inline(__always) - @differentiable(vjp: _vjpScalarInit where Scalar : TensorFlowFloatingPoint) - init(_ value: Scalar) { - self.init(handle: _TFTensorFromScalar(value)) - } -} - -internal extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpScalarInit(_ value: Scalar) -> (Tensor, (Tensor) -> Scalar) { - return (Tensor(value), { $0.scalarized() }) - } -} - -public extension Tensor { - /// Creates a tensor from an array of tensors (which may themselves be - /// scalars). - @inlinable @inline(__always) - init(_ elements: [Tensor]) { - self = Raw.pack(elements) - } - - /// Creates a 1D tensor from contiguous scalars. - /// - /// - Parameters: - /// - vector: The scalar contents of the tensor. - /// - @inlinable @inline(__always) - init(_ vector: [Scalar]) { - self.init(handle: _TFTensorFromScalars1D(vector)) - } - - /// Creates a 1D tensor from contiguous scalars. - /// - /// - Parameters: - /// - vector: The scalar contents of the tensor. - /// - @inlinable @inline(__always) - init(_ vector: C) where C.Element == Scalar { - let handle = _TFHoistable { - TensorHandle( - shape: [vector.count], - scalarsInitializer: { addr in - var currentAddr = addr - for scalar in vector { - currentAddr.initialize(to: scalar) - currentAddr = currentAddr.advanced(by: 1) - } - } - ) - } - self.init(handle: handle) - } - - /// Creates a tensor with the specified shape and contiguous scalars in - /// row-major order. - /// - /// - Parameters: - /// - shape: The shape of the tensor. - /// - scalars: The scalar contents of the tensor. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - /// - @inlinable @inline(__always) - init(shape: TensorShape, scalars: [Scalar]) { - // NOTE: We use `_TFTensorFromScalars` here so the compiler can try to - // promote constants and avoid copies. - self.init(handle: _TFTensorFromScalars(scalars, shape: shape.dimensions)) - } - - /// Creates a tensor with the specified shape and contiguous scalars in - /// row-major order. - /// - /// - Parameters: - /// - shape: The shape of the tensor. - /// - scalars: The scalar contents of the tensor. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - /// - @inlinable @inline(__always) - init(shape: TensorShape, scalars: UnsafeBufferPointer) { - let handle: TensorHandle = _TFHoistable { - precondition(scalars.count == shape.contiguousSize) - return TensorHandle( - shape: shape.dimensions, - scalarsInitializer: { addr in - addr.initialize(from: scalars.baseAddress!, - count: shape.contiguousSize) - } - ) - } - self.init(handle: handle) - } - - /// Creates a tensor with the specified shape and contiguous scalars in - /// row-major order. - /// - /// - Parameters: - /// - shape: The shape of the tensor. - /// - scalars: The scalar contents of the tensor. - /// - Precondition: The number of scalars must equal the product of the - /// dimensions of the shape. - /// - @inlinable @inline(__always) - init(shape: TensorShape, scalars: C) - where C.Element == Scalar { - let handle: TensorHandle = _TFHoistable { - precondition(scalars.count == shape.contiguousSize) - return TensorHandle( - shape: shape.dimensions, - scalarsInitializer: { addr in - var currentAddr = addr - for scalar in scalars { - currentAddr.initialize(to: scalar) - currentAddr = currentAddr.advanced(by: 1) - } - } - ) - } - self.init(handle: handle) - } -} - -public extension Tensor { - /// Creates a tensor with the specified shape and a single, repeated scalar - /// value. - /// - /// - Parameters: - /// - shape: The dimensions of the tensor. - /// - repeatedValue: The scalar value to repeat. - @inlinable @inline(__always) - @available(*, deprecated, renamed: "init(repeating:shape:)") - init(shape: TensorShape, repeating repeatedValue: Scalar) { - self.init(repeating: repeatedValue, shape: shape) - } - - /// Creates a tensor with the specified shape and a single, repeated scalar value. - /// - /// - Parameters: - /// - repeatedValue: The scalar value to repeat. - /// - shape: The dimensions of the tensor. - @inlinable @inline(__always) - @differentiable(vjp: _vjpInit(repeating:shape:) - where Scalar : TensorFlowFloatingPoint) - init(repeating repeatedValue: Scalar, shape: TensorShape) { - self = Raw.fill(dims: Tensor(shape.dimensions.map(Int32.init)), - value: Tensor(repeatedValue)) - } -} - -internal extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpInit( - repeating repeatedValue: Scalar, - shape: TensorShape - ) -> (Tensor, (Tensor) -> Scalar) { - return (Tensor(repeating: repeatedValue, shape: shape), - { $0.sum().scalarized() }) - } -} - -public extension Tensor { - /// Creates a tensor by broadcasting the given scalar to a given rank with - /// all dimensions being 1. - @inlinable @inline(__always) - // @differentiable(where Scalar : TensorFlowFloatingPoint) - init(broadcasting scalar: Scalar, rank: Int) { - self = Tensor(scalar).reshaped(to: TensorShape(repeating: 1, count: rank)) - } - - /// Creates a tensor of shape `[4]` from a 4-tuple. - /// - Note: This is intended for internal use, for example, to initialize a - /// tensor attribute from `convolved2D`'s `strides` argument. - @inlinable @inline(__always) - internal init(_ scalars: (Scalar, Scalar, Scalar, Scalar)) { - self.init([scalars.0, scalars.1, scalars.2, scalars.3]) - } -} - -//===----------------------------------------------------------------------===// -// Initialization syntax -//===----------------------------------------------------------------------===// - -// Background story on `TensorElementLiteral` and why it's necessary: -// -// Very importantly, we want users to be able to implicitly convert an array -// literal to a tensor. At first glance, a straightfoward implementation would -// be conforming `Tensor` to `ExpressibleByArrayLiteral` with -// `ExpressibleBy(Float|Int|Bool)Literal` as a base case. However, it is not -// that simple. We have binary operators that take `(Tensor, Scalar)`, `(Scalar, -// Tensor)` as well as `(Tensor, Tensor)`. When `Tensor`s are convertible from -// both a scalar and an array literal, a scalar-tensor binary operator like `+` -// will not type check. -// -// One way to work around it is to define all tensor-tensor operators in a -// protocol extension, and all tensor-scalar and scalar-tensor operators on -// concrete `Tensor`. Protocol extensions are less favorable than concrete -// implementations, so the compiler will prefer the concrete implementation for -// a scalar-tensor operation. However, this would cause enormous code bloat and -// is entirely a hack. -// -// To resolve ambiguity, `Tensor` should not be expressible by scalar literal. -// There's already a lightweight syntax for converting a scalar to a tensor: -// `Tensor(x)`, so there is no strong need for implicit conversion. But we need -// to find a way to give `ExpressibleByArrayLiteral` a base case: what would the -// `ArrayLiteralElement` be if we want to support both `[1,2,3]` and `[[[1,2], -// [1,2]]]`? In the first case the array literal element is an interger, while -// in the second case the array literal itself should be a tensor. Based on this -// observation, we come up with an intermediate type: `TensorElementLiteral` as -// the `ArrayLiteralElement` of `Tensor`. By making `TensorElementLiteral` -// expressible by both array literal and scalar literal, `Tensor` can now be -// converted from an arbitrary-dimensional array literal. -// -// Due to protocol requirements, `TensorElementLiteral` has to be -// public. It is never supposed to be used directly by any user, so the library -// convention is to prepend an underscore to its name, making it -// `_TensorElementLiteral`. -// -// It would be nice to be able to remove this type when we can systematically -// resolve tensor-scalar/scalar-tensor op ambiguity someday, either through an -// improved `Expressible` model, or by introducing an attribute to tell the type -// checker which function to prefer when ambiguity occurs. - -/// Represents a literal element for conversion to a `Tensor`. -/// -/// - Note: Do not ever use this API directly. This is implicitly created -/// during the conversion from an array literal to a `Tensor`, and is purely -/// for implementation purposes. -@_fixed_layout -public struct _TensorElementLiteral : TensorProtocol - where Scalar : TensorFlowScalar { - - @usableFromInline let tensor: Tensor - - @inlinable - public var handle: TensorHandle { - return tensor.handle - } - - @inlinable - public init(handle: TensorHandle) { - tensor = Tensor(handle: handle) - } -} - -extension _TensorElementLiteral : ExpressibleByBooleanLiteral - where Scalar : ExpressibleByBooleanLiteral { - public typealias BooleanLiteralType = Scalar.BooleanLiteralType - @inlinable @inline(__always) - public init(booleanLiteral: BooleanLiteralType) { - tensor = Tensor(Scalar(booleanLiteral: booleanLiteral)) - } -} - -extension _TensorElementLiteral : ExpressibleByIntegerLiteral - where Scalar : ExpressibleByIntegerLiteral { - public typealias IntegerLiteralType = Scalar.IntegerLiteralType - @inlinable @inline(__always) - public init(integerLiteral: IntegerLiteralType) { - tensor = Tensor(Scalar(integerLiteral: integerLiteral)) - } -} - -extension _TensorElementLiteral : ExpressibleByFloatLiteral - where Scalar : ExpressibleByFloatLiteral { - public typealias FloatLiteralType = Scalar.FloatLiteralType - @inlinable @inline(__always) - public init(floatLiteral: FloatLiteralType) { - tensor = Tensor(Scalar(floatLiteral: floatLiteral)) - } -} - -extension _TensorElementLiteral : ExpressibleByArrayLiteral { - public typealias ArrayLiteralElement = _TensorElementLiteral - @inlinable @inline(__always) - public init(arrayLiteral elements: _TensorElementLiteral...) { - // Attr T (non-optional in the op definition) need not be specified when we - // run the op as part of a graph function, but need to be specified when we - // run it via eager C API. - let handle: TensorHandle = #tfop("Pack", elements, - T$dtype: Scalar.tensorFlowDataType) - tensor = Tensor(handle: handle) - } -} - -extension Tensor : ExpressibleByArrayLiteral { - /// The type of the elements of an array literal. - public typealias ArrayLiteralElement = _TensorElementLiteral - - /// Creates a tensor initialized with the given elements. - /// - Note: This is for conversion from tensor element literals. This is a - /// separate method because `ShapedArray` initializers need to call it. - @inlinable @inline(__always) - internal init( - _tensorElementLiterals elements: [_TensorElementLiteral] - ) { - self.init(handle: #tfop("Pack", elements, - T$dtype: Scalar.tensorFlowDataType)) - } - - /// Creates a tensor initialized with the given elements. - @inlinable @inline(__always) - public init(arrayLiteral elements: _TensorElementLiteral...) { - self.init(_tensorElementLiterals: elements) - } -} - -//===----------------------------------------------------------------------===// -// Properties -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// The number of dimensions of the `Tensor`. - @inlinable - var rank: Int { - @inline(__always) - @_semantics("autodiff.nonvarying") - get { - return Int(_TFGetScalarOrDie(rankTensor.handle)) - } - } - - /// The dimensions of the `Tensor`. - @inlinable - var shape: TensorShape { - @inline(__always) - @_semantics("autodiff.nonvarying") - get { - return TensorShape(shapeTensor.scalars.map(Int.init)) - } - } - - /// The number of scalars in the `Tensor`. - @inlinable - var scalarCount: Int { - @inline(__always) - get { - return Int(_TFGetScalarOrDie(scalarCountTensor.handle)) - } - } -} - -//===----------------------------------------------------------------------===// -// Numeric initialization -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Creates a tensor with all scalars set to zero. - /// - /// - Parameter shape: The dimensions of the tensor. - @inlinable @inline(__always) - init(zeros shape: TensorShape) { - self.init(repeating: 0, shape: shape) - } - - /// Creates a tensor with all scalars set to one. - /// - /// - Parameter shape: The dimensions of the tensor. - @inlinable @inline(__always) - init(ones shape: TensorShape) { - self.init(repeating: 1, shape: shape) - } - - /// Creates a 1-D tensor representing a sequence from a starting value to, but - /// not including, an end value, stepping by the specified amount. - /// - /// - Parameters: - /// - start: The starting value to use for the sequence. If the sequence - /// contains any values, the first one is `start`. - /// - end: An end value to limit the sequence. `end` is never an element of - /// the resulting sequence. - /// - stride: The amount to step by with each iteration. `stride` must be - /// positive. - /// - @inlinable @inline(__always) - init(rangeFrom start: Scalar, to end: Scalar, stride: Scalar) { - self = Raw.range( - start: Tensor(start), - limit: Tensor(end), - delta: Tensor(stride)) - } - - /// Creates a one-hot tensor at given indices. The locations represented by - /// `indices` take value `onValue` (`1` by default), while all other locations - /// take value `offValue` (`0` by default). If the input `indices` is rank - /// `n`, the new tensor will have rank `n+1`. The new axis is created at - /// dimension `axis` (by default, the new axis is appended at the end). - /// - /// If `indices` is a scalar, the new tensor's shape will be a vector of - /// length `depth`. - /// - /// If `indices` is a vector of length `features`, the output shape will be: - /// features x depth, if axis == -1 - /// depth x features, if axis == 0 - /// - /// If `indices` is a matrix (batch) with shape `[batch, features]`, the - /// output shape will be: - /// batch x features x depth, if axis == -1 - /// batch x depth x features, if axis == 1 - /// depth x batch x features, if axis == 0 - /// - /// - Parameters: - /// - indices: A `Tensor` of indices. - /// - depth: A scalar defining the depth of the one hot dimension. - /// - onValue: A scalar defining the value at the location referred to by - /// some index in `indices`. - /// - offValue: A scalar defining the value at a location that is not - /// referred to by any index in `indices`. - /// - axis: The axis to fill. The default is `-1`, a new inner-most axis. - /// - @inlinable @inline(__always) - init(oneHotAtIndices indices: Tensor, depth: Int, - onValue: Scalar = 1, offValue: Scalar = 0, axis: Int = -1) { - self = Raw.oneHot( - indices: indices, - depth: Tensor(Int32(depth)), - onValue: Tensor(onValue), - offValue: Tensor(offValue), - axis: Int64(axis) - ) - } -} - -//===----------------------------------------------------------------------===// -// Shape transformations -//===----------------------------------------------------------------------===// - -public extension TensorFlowScalar { - /// Convert to a tensor with the specified rank, with all dimensions equal to - /// 1. - @inlinable @inline(__always) - func makeTensor(rank: Int) -> Tensor { - return Tensor(repeating: self, shape: TensorShape(rank)) - } -} - -public extension Tensor { - /// Reshape to the shape of the specified `Tensor`. - /// - Precondition: The number of scalars matches the new shape. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func reshaped(like other: Tensor) -> Tensor { - return reshaped(toShape: other.shapeTensor) - } - - /// Reshape to the specified shape. - /// - Precondition: The number of scalars matches the new shape. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func reshaped(to newShape: TensorShape) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - return reshaped(toShape: Tensor({newShape.dimensions.map(Int32.init)}())) - } - - /// Reshape to the specified `Tensor` representing a shape. - /// - Precondition: The number of scalars matches the new shape. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpReshaped(toShape:) - where Scalar : TensorFlowFloatingPoint - ) - func reshaped(toShape newShape: Tensor) -> Tensor { - return Raw.reshape(self, shape: newShape) - } - - /// Return a copy of the tensor collapsed into a 1-D `Tensor`, in row-major - /// order. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func flattened() -> Tensor { - return reshaped(to: [-1]) - } - - /// Returns a rank-lifted `Tensor` with a leading dimension of 1. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func rankLifted() -> Tensor { - return expandingShape(at: 0) - } - - /// Returns a shape-expanded `Tensor`, with a dimension of 1 inserted at the - /// specified shape indices. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func expandingShape(at axes: Int...) -> Tensor { - return expandingShape(at: axes) - } - - /// Returns a shape-expanded `Tensor`, with a dimension of 1 inserted at the - /// specified shape indices. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpExpandingShape(at:) - where Scalar : TensorFlowFloatingPoint - ) - func expandingShape(at axes: [Int]) -> Tensor { - var res = self - for i in axes { res = Raw.expandDims(res, dim: Tensor(Int32(i))) } - return res - } - - /// Remove the specified dimensions of size 1 from the shape of a tensor. If - /// no dimensions are specified, then all dimensions of size 1 will be - /// removed. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func squeezingShape(at axes: Int...) -> Tensor { - return squeezingShape(at: axes) - } - - /// Remove the specified dimensions of size 1 from the shape of a tensor. If - /// no dimensions are specified, then all dimensions of size 1 will be - /// removed. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpSqueezingShape(at:) - where Scalar : TensorFlowFloatingPoint - ) - func squeezingShape(at axes: [Int]) -> Tensor { - return Raw.squeeze(self, squeezeDims: axes.map(Int32.init)) - } - - /// Reshape to scalar. - /// - Precondition: The tensor has exactly one scalar. - @inlinable - @differentiable(wrt: self, - vjp: _vjpScalarized where Scalar : TensorFlowFloatingPoint) - func scalarized() -> Scalar { - return _TFGetScalarOrDie(reshaped(to: []).handle) - } -} - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - func _vjpScalarized() -> (Scalar, (Scalar) -> Tensor) { - return (scalarized(), { v in Tensor(v) }) - } -} - -//===----------------------------------------------------------------------===// -// Scalar conversion -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// Returns `true` if `rank` is equal to 0 and `false` otherwise. - @inlinable - var isScalar: Bool { - @inline(__always) - get { - return rank == 0 - } - } - - /// Returns the single scalar element if `rank` is equal to 0 and `nil` - /// otherwise. - @inlinable - var scalar: Scalar? { - @inline(__always) - get { - return Scalar(self) - } - } -} - -public extension TensorFlowScalar { - @inlinable @inline(__always) - init?(_ tensor: Tensor) { - guard let scalar = _TFGetScalar(tensor.handle) else { - return nil - } - self = scalar - } -} - -//===----------------------------------------------------------------------===// -// Equality -//===----------------------------------------------------------------------===// - -extension Tensor : Equatable where Scalar : Equatable { - @inlinable @inline(__always) - public static func == (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .== rhs).all() - } - - @inlinable @inline(__always) - public static func != (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .== rhs).any() - } -} - -//===----------------------------------------------------------------------===// -// Description and visualization -//===----------------------------------------------------------------------===// - -// String conversion. -extension Tensor : CustomStringConvertible { - /// A textual representation of the tensor. - /// - /// - Note: use `fullDescription` for a non-pretty-printed description showing - /// all scalars. - public var description: String { - return array.description - } -} - -public extension Tensor { - /// A textual representation of the tensor. Returns a summarized description - /// if `summarize` is true and the element count exceeds twice the - /// `edgeElementCount`. - /// - /// - Parameters: - /// - lineWidth: The max line width for printing. Used to determine number - /// of scalars to print per line. - /// - edgeElementCount: The maximum number of elements to print before and - /// after summarization via ellipses (`...`). - /// - summarizing: If true, summarize description if element count exceeds - /// twice `edgeElementCount`. - func description( - lineWidth: Int = 80, edgeElementCount: Int = 3, summarizing: Bool = false - ) -> String { - return array.description( - lineWidth: lineWidth, edgeElementCount: edgeElementCount, - summarizing: summarizing) - } - - /// A full, non-pretty-printed textual representation of the tensor, showing - /// all scalars. - var fullDescription: String { - return array.fullDescription - } -} - -// Xcode Playground display conversion. -extension Tensor : CustomPlaygroundDisplayConvertible { - public var playgroundDescription: Any { - return description - } -} - -// Mirror representation, used by debugger/REPL. -extension Tensor : CustomReflectable { - public var customMirror: Mirror { - return Mirror(self, children: [], displayStyle: .struct) - } -} - -//===----------------------------------------------------------------------===// -// Array conversion -//===----------------------------------------------------------------------===// - -public extension Tensor { - @inlinable - var array: ShapedArray { - @inline(__always) - get { - debugLog("Returning a host copy of array.") - internalConsistencyCheck(toHost().handle.isConcrete) - - // This is considered to be a well known way to produce a copy to the - // host, so an "implicit copy to host" warning should not be produced. - return toHost().handle.makeHostCopy() - } - } - - @inlinable - var scalars: [Scalar] { - return array.scalars - } -} - -//===----------------------------------------------------------------------===// -// Codable conformance -//===----------------------------------------------------------------------===// - -extension Tensor : Codable where Scalar : Codable { - @inlinable - public func encode(to encoder: Encoder) throws { - var container = encoder.singleValueContainer() - try container.encode(array) - } - - @inlinable - public init(from decoder: Decoder) throws { - let container = try decoder.singleValueContainer() - let array = try container.decode(ShapedArray.self) - self.init(array) - } -} diff --git a/stdlib/public/TensorFlowCore/CMakeLists.txt b/stdlib/public/TensorFlowCore/CMakeLists.txt new file mode 100644 index 0000000000000..e63c7ed2545ec --- /dev/null +++ b/stdlib/public/TensorFlowCore/CMakeLists.txt @@ -0,0 +1,66 @@ +#===--- CMakeLists.txt - Build the TensorFlow support library ------------===# +# +# This source file is part of the Swift.org open source project +# +# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +# Licensed under Apache License v2.0 with Runtime Library Exception +# +# See https://swift.org/LICENSE.txt for license information +# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +# +#===----------------------------------------------------------------------===# +# +# SWIFT_ENABLE_TENSORFLOW +# +#===----------------------------------------------------------------------===# + +if(NOT SWIFT_ENABLE_TENSORFLOW) + return() +endif() + +find_package(TensorFlow REQUIRED) +message(STATUS "Building TensorFlowCore.") + +set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) +set(swift_stdlib_compile_flags "${SWIFT_RUNTIME_SWIFT_COMPILE_FLAGS}") +list(APPEND swift_stdlib_compile_flags "-Xllvm" "-sil-inline-generics") +list(APPEND swift_stdlib_compile_flags "-Xllvm" "-sil-partial-specialization") +list(APPEND swift_stdlib_compile_flags "-Xfrontend" "-enable-sil-ownership") +list(APPEND swift_stdlib_compile_flags "-force-single-frontend-invocation") +# FIXME(SR-7972): Some tests fail when TensorFlow is optimized. +# list(APPEND swift_stdlib_compile_flags "-O" "-whole-module-optimization") +list(APPEND swift_stdlib_compile_flags "-Onone") +list(APPEND swift_stdlib_compile_flags "-DCOMPILING_TENSORFLOW_MODULE") + +set(SOURCES + CompilerRuntime.swift + DataTypes.swift + Execution.swift + Helpers.swift + TensorGroup.swift + TensorHandle.swift + TensorProtocol.swift + TensorShape.swift + Threading.swift + Utilities.swift) + +add_swift_target_library(swiftTensorFlowCore ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} IS_STDLIB + "${SOURCES}" + + INCORPORATE_OBJECT_LIBRARIES swiftCTensorFlow + TARGET_SDKS OSX LINUX + PRIVATE_LINK_LIBRARIES "${TF_LIBRARIES}" + SWIFT_MODULE_DEPENDS SwiftOnoneSupport + SWIFT_MODULE_DEPENDS_IOS Darwin + SWIFT_MODULE_DEPENDS_OSX Darwin + SWIFT_MODULE_DEPENDS_TVOS Darwin + SWIFT_MODULE_DEPENDS_WATCHOS Darwin + SWIFT_MODULE_DEPENDS_LINUX Glibc + SWIFT_MODULE_DEPENDS_FREEBSD Glibc + SWIFT_MODULE_DEPENDS_CYGWIN Glibc + SWIFT_MODULE_DEPENDS_HAIKU Glibc + ${TENSORFLOW_DEPENDS_PYTHON} + SWIFT_COMPILE_FLAGS "${swift_stdlib_compile_flags}" + LINK_FLAGS "${SWIFT_RUNTIME_SWIFT_LINK_FLAGS}" + INSTALL_IN_COMPONENT stdlib + EXTRA_RPATHS "${SWIFT_TENSORFLOW_TARGET_LIB_DIR}") diff --git a/stdlib/public/TensorFlow/CompilerRuntime.swift b/stdlib/public/TensorFlowCore/CompilerRuntime.swift similarity index 96% rename from stdlib/public/TensorFlow/CompilerRuntime.swift rename to stdlib/public/TensorFlowCore/CompilerRuntime.swift index e2870f42d0b63..ce3554682de3f 100644 --- a/stdlib/public/TensorFlow/CompilerRuntime.swift +++ b/stdlib/public/TensorFlowCore/CompilerRuntime.swift @@ -71,7 +71,8 @@ public enum _ExecutionMode : Equatable { /// `addEagerOpToGraph()`). When the trace is finalized (via `finalize()`), the /// trace graph function can then be executed (via `execute()`) by the eager /// runtime. -private class TraceContext { +@usableFromInline +internal class TraceContext { let status: CTFStatus = TF_NewStatus() /// The trace graph, which will be converted to a trace graph function @@ -140,6 +141,7 @@ private class TraceContext { TF_DeleteStatus(status) } + @usableFromInline func addEagerOpToGraph(_ op: CTFEOp, _ retvals: UnsafeMutablePointer, _ retvalCount: UnsafeMutablePointer, @@ -432,11 +434,13 @@ private class TraceContext { } // This enum keeps track of whether we are building or executing a trace. -private enum TracingState { +@usableFromInline +internal enum TracingState { case notTracing case tracing(TraceContext) // Return nil if we are not in tracing mode. + @usableFromInline var context: TraceContext? { guard case let .tracing(context) = self else { return nil } return context @@ -448,7 +452,8 @@ private enum TracingState { // @_frozen // SR-9739 public enum _RuntimeConfig { // TODO: change this and subsequent properties from static to thread local. - fileprivate static var traceState: TracingState = .notTracing + @usableFromInline + internal static var traceState: TracingState = .notTracing /// Used to create unique trace graph function names. fileprivate static var traceGraphFunctionCounter = 0 @@ -626,7 +631,7 @@ public final class _ExecutionContext { public let tensorFlowConfig: UnsafeMutablePointer /// The TFE_Context object. - @usableFromInline let eagerContext: CTFEContext + public let eagerContext: CTFEContext // NOTE: the following properties are intentionally not implemented as an enum // due to high churn, *please do not refactor for Swiftiness*. @@ -972,17 +977,16 @@ public func _graph( +public func _graph( with state: State, in fn: (State, Data) -> State ) -> (State, Data) -> State { - let graphFunction: (State, Data) -> (State, Tensor?) = + let graphFunction: (State, Data) -> (State, TensorHandle?) = withoutActuallyEscaping(fn) { escapableFn in let wrappedFn = { // The result argument needs to a type that conforms to TensorGroup. - // We are arbitrarily picking Tensor here. - (s: State, d: Data) -> (State, Tensor?) in + // We are arbitrarily picking TensorHandle here. + (s: State, d: Data) -> (State, TensorHandle?) in (escapableFn(s, d), nil) } return _graphInternal(with: state, in: wrappedFn) @@ -1179,49 +1183,49 @@ public extension _ExecutionContext { } } -@usableFromInline -internal func dumpTensorContent( - _ inputTensor: CTensorHandle, _: Scalar.Type -) { - assert(TFE_TensorHandleIsConcrete(inputTensor) != 0) - - let array = ShapedArray(cTensorHandle: inputTensor) - debugLog("Rank is \(array.rank), shape is \(array.shape).") - debugLog(""" - The content of the \(array.scalars.count) scalars are: \ - \(array.scalars). - """) -} - -@usableFromInline -internal func dumpCTensorHandleContent( - _ idx: Int, - _ inputTensorHandle: CTensorHandle) { - if TFE_TensorHandleIsConcrete(inputTensorHandle) == 0 { - debugLog("Skip dumpping a symbolic tensor handle.") - return - } - - let dType: TF_DataType = TFE_TensorHandleDataType(inputTensorHandle) - debugLog("Tensor \(idx) has TF data type \(dType).") - switch dType { - case TF_UINT8: dumpTensorContent(inputTensorHandle, UInt8.self) - case TF_INT8: dumpTensorContent(inputTensorHandle, Int8.self) - case TF_UINT16: dumpTensorContent(inputTensorHandle, UInt16.self) - case TF_INT16: dumpTensorContent(inputTensorHandle, Int16.self) - case TF_UINT32: dumpTensorContent(inputTensorHandle, UInt32.self) - case TF_INT32: dumpTensorContent(inputTensorHandle, Int32.self) - case TF_UINT64: dumpTensorContent(inputTensorHandle, UInt64.self) - case TF_INT64: dumpTensorContent(inputTensorHandle, Int64.self) - case TF_FLOAT: dumpTensorContent(inputTensorHandle, Float.self) - case TF_DOUBLE: dumpTensorContent(inputTensorHandle, Double.self) - case TF_BOOL: dumpTensorContent(inputTensorHandle, Bool.self) - // TODO: Handle `TF_BFloat16`? BFloat16 does not have a host-side - // representation and cannot be printed directly. Consider calling into TF - // runtime. - default: fatalError("Unsupported dtype \(dType)") - } -} +// @usableFromInline +// internal func dumpTensorContent( +// _ inputTensor: CTensorHandle, _: Scalar.Type +// ) { +// assert(TFE_TensorHandleIsConcrete(inputTensor) != 0) + +// let array = ShapedArray(cTensorHandle: inputTensor) +// debugLog("Rank is \(array.rank), shape is \(array.shape).") +// debugLog(""" +// The content of the \(array.scalars.count) scalars are: \ +// \(array.scalars). +// """) +// } + +// @usableFromInline +// internal func dumpCTensorHandleContent( +// _ idx: Int, +// _ inputTensorHandle: CTensorHandle) { +// if TFE_TensorHandleIsConcrete(inputTensorHandle) == 0 { +// debugLog("Skip dumpping a symbolic tensor handle.") +// return +// } + +// let dType: TF_DataType = TFE_TensorHandleDataType(inputTensorHandle) +// debugLog("Tensor \(idx) has TF data type \(dType).") +// switch dType { +// case TF_UINT8: dumpTensorContent(inputTensorHandle, UInt8.self) +// case TF_INT8: dumpTensorContent(inputTensorHandle, Int8.self) +// case TF_UINT16: dumpTensorContent(inputTensorHandle, UInt16.self) +// case TF_INT16: dumpTensorContent(inputTensorHandle, Int16.self) +// case TF_UINT32: dumpTensorContent(inputTensorHandle, UInt32.self) +// case TF_INT32: dumpTensorContent(inputTensorHandle, Int32.self) +// case TF_UINT64: dumpTensorContent(inputTensorHandle, UInt64.self) +// case TF_INT64: dumpTensorContent(inputTensorHandle, Int64.self) +// case TF_FLOAT: dumpTensorContent(inputTensorHandle, Float.self) +// case TF_DOUBLE: dumpTensorContent(inputTensorHandle, Double.self) +// case TF_BOOL: dumpTensorContent(inputTensorHandle, Bool.self) +// // TODO: Handle `TF_BFloat16`? BFloat16 does not have a host-side +// // representation and cannot be printed directly. Consider calling into TF +// // runtime. +// default: fatalError("Unsupported dtype \(dType)") +// } +// } private class TFEState { let status: CTFStatus = TF_NewStatus() @@ -1434,9 +1438,9 @@ public final class _TensorComputation { debugLog("Populating the op's input list.") for (i, inputTensorHandle) in inputTensorHandles.enumerated() { - if _RuntimeConfig.printsDebugLog { - dumpCTensorHandleContent(i, inputTensorHandle) - } + // if _RuntimeConfig.printsDebugLog { + // dumpCTensorHandleContent(i, inputTensorHandle) + // } state.addInput(inputTensorHandle) } @@ -1567,12 +1571,12 @@ public extension _TensorComputation { } } -@usableFromInline +@inlinable @_cdecl("_swift_tfc_EagerExecute") -func _TFCEagerExecute(_ op: CTFEOp, - _ retvals: UnsafeMutablePointer, - _ retvalCount: UnsafeMutablePointer, - _ status: CTFStatus) { +public func _TFCEagerExecute(_ op: CTFEOp, + _ retvals: UnsafeMutablePointer, + _ retvalCount: UnsafeMutablePointer, + _ status: CTFStatus) { if _RuntimeConfig.printsDebugLog { debugLog("Calling _TFCEagerExecute() over: ") TFE_OpPrintDebugString(op) @@ -1585,6 +1589,7 @@ func _TFCEagerExecute(_ op: CTFEOp, } else { debugLog("Executing eager op \(op).") TFE_Execute(op, retvals, retvalCount, status) + checkOk(status) } } @@ -1771,11 +1776,6 @@ public protocol AnyTensor { var _tensorFlowDataType: TensorDataType { get } } -extension Tensor : AnyTensor { - public var _rawTensorHandle: CTensorHandle { return handle._cTensorHandle } - public var _tensorFlowDataType: TensorDataType { return Scalar.tensorFlowDataType } -} - @usableFromInline func _TFCOpAddInputFromAnyTensors( _ op: CTFEOp, _ tensors: [AnyTensor], _ status: CTFStatus diff --git a/stdlib/public/TensorFlow/DataTypes.swift b/stdlib/public/TensorFlowCore/DataTypes.swift similarity index 90% rename from stdlib/public/TensorFlow/DataTypes.swift rename to stdlib/public/TensorFlowCore/DataTypes.swift index 4b466d09b9691..4f8aab17ceca5 100644 --- a/stdlib/public/TensorFlow/DataTypes.swift +++ b/stdlib/public/TensorFlowCore/DataTypes.swift @@ -31,48 +31,11 @@ public struct TensorDataType { public var _cDataType: TF_DataType @inlinable - internal init(_ cDataType: TF_DataType) { + public init(_ cDataType: TF_DataType) { self._cDataType = cDataType } } -@usableFromInline -internal func makeTensor( - dataType: TensorDataType, - owning pointer: CTensorHandle -) -> AnyTensor { - switch dataType._cDataType { - case TF_BOOL: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_INT8: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_UINT8: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_INT16: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_UINT16: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_INT32: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_UINT32: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_INT64: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_UINT64: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_BFLOAT16: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_FLOAT: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_DOUBLE: - return Tensor(handle: TensorHandle(_owning: pointer)) - case TF_STRING: - fatalError("StringTensor does not conform to AnyTensor") - default: - fatalError("Unhandled type: \(dataType)") - } -} - /// A data type compatible with TensorFlow. public protocol _TensorFlowDataTypeCompatible { /// The underlying TensorFlow data type. @@ -133,7 +96,7 @@ extension Double : TensorFlowFloatingPoint {} private func _TFGetScalarOrDieImpl( _ handle: TensorHandle ) -> Scalar { - return handle.makeHostCopy().scalar! + return handle.hostScalar()! } // This is the implementation of the _getScalar requirement for each concrete @@ -144,7 +107,7 @@ private func _TFGetScalarOrDieImpl( private func _TFGetScalarImpl( _ handle: TensorHandle ) -> Scalar? { - return handle.makeHostCopy().scalar + return handle.hostScalar() } extension Bool : TensorFlowScalar { diff --git a/stdlib/public/TensorFlow/Execution.swift b/stdlib/public/TensorFlowCore/Execution.swift similarity index 100% rename from stdlib/public/TensorFlow/Execution.swift rename to stdlib/public/TensorFlowCore/Execution.swift diff --git a/stdlib/public/TensorFlowCore/Helpers.swift b/stdlib/public/TensorFlowCore/Helpers.swift new file mode 100644 index 0000000000000..decd3cf42da94 --- /dev/null +++ b/stdlib/public/TensorFlowCore/Helpers.swift @@ -0,0 +1,188 @@ +//===-- Helpers.swift ----------------------------------------*- swift -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CTensorFlow + +//===----------------------------------------------------------------------===// +// Tensor +//===----------------------------------------------------------------------===// + +@inlinable @inline(never) +@_silgen_name("__tf_to_accel") +public func _TFToAccelerator(_ handle: TensorHandle) -> TensorHandle { + return handle +} + +@inlinable @inline(never) +@_silgen_name("__tf_to_host") +public func _TFToHost(_ handle: TensorHandle) + -> TensorHandle { + return handle +} + +/// This function converts a `TensorHandle` that is known to have a 0-d value +/// into the scalar that it produces. This is intended for use in op definitions +/// where it is known that the op always returns a 0-d tensor. It is not for use +/// in general code. +@inlinable @inline(__always) +public func _TFGetScalarOrDie( + _ handle: TensorHandle +) -> Scalar { + return Scalar._getScalarOrDie(handle) +} + +/// This function converts a `TensorHandle` into a scalar if it is 0-d, or +/// returns nil otherwise. +@inlinable @inline(__always) +public func _TFGetScalar( + _ handle: TensorHandle +) -> Scalar? { + return Scalar._getScalar(handle) +} + +/// This compiler builtin is known by the partitioning pass, which recognizes it +/// and promotes calls to it to being in graph when it can. This signature was +/// designed to align with the requirements of the `Const` TensorFlow operation. +@inlinable @inline(never) +@_silgen_name("__tf_tensor_from_scalars") +public func _TFTensorFromScalars( + _ scalars: [Scalar], shape: [Int] +) -> TensorHandle { + let contiguousSize = shape.reduce(1, *) + precondition(scalars.count == contiguousSize, + "The number of scalars does not match the shape.") + return TensorHandle( + shape: shape, + scalarsInitializer: { addr in + scalars.withUnsafeBufferPointer { ptr in + addr.assign(from: ptr.baseAddress!, count: contiguousSize) + } + } + ) +} + +/// In graph mode, the deabstraction pass transforms this function call to +/// either a "Const" graph_op (if `scalar` is a compile-time constant), or a +/// "tfc.scalarToTensor" graph_op. In the latter case, the partition pass uses +/// it to do scalar promotion, and transforms it away before entering graph +/// lowering. e.g. For user code: +/// let x_scalar = x_tensor.mean() +/// let y_scalar = y_tensor.mean() +/// let z_scalar = x_scalar + y_scalar +/// let z_tensor = Tensor(z_scalar) +/// +/// The scalar addition can be promoted into graph, through the +/// "tfc.scalarToTensor" graph_op generated from Tensor(z_scalar). In this +/// example, the _getScalarOrDie() call generated from mean() will be "cancelled +/// out" with "tfc.scalarToTensor", such that we avoid generating scalar on the +/// host, and then converting it back to a graph tensor. +/// +/// In eager mode, this function is executed directly. +@inlinable @inline(never) +@_silgen_name("__tf_tensor_from_scalar") +public func _TFTensorFromScalar( + _ scalar: Scalar +) -> TensorHandle { + return _TFTensorFromScalars([scalar], shape: []) +} + +@inlinable @inline(never) +@_silgen_name("__tf_tensor_from_scalars_1d") +public func _TFTensorFromScalars1D(_ scalars: [Scalar]) + -> TensorHandle { + return _TFTensorFromScalars(scalars, shape: [scalars.count]) +} + +@inlinable @inline(__always) +public func _TFHoistable(_ fn: () -> TensorHandle) + -> TensorHandle { + return Scalar._hoistableClosure(fn) +} + +//===----------------------------------------------------------------------===// +// StringTensor +//===----------------------------------------------------------------------===// + +/// This compiler builtin is known by the partitioning pass, which recognizes it +/// and promotes calls to it to being in graph when it can. This signature was +/// designed to align with the requirements of the `Const` TensorFlow operation. +@inlinable @inline(never) +@_silgen_name("__tf_string_tensor_from_strings") +public func _TFStringTensorFromStrings( + _ scalars: [String], shape: [Int] +) -> TensorHandle { + let contiguousSize = shape.reduce(1, *) + precondition(scalars.count == contiguousSize, + "The number of scalars does not match the shape.") + + // utf8CString is null-terminated. TF APIs want the strings without + // null-terminators. + let cStrings = scalars.map { $0.utf8CString.dropLast() } + + let tfEncodedSizes = cStrings.map { TF_StringEncodedSize($0.count) } + + // Format information copied from tensorflow/c/c_api.h: + // The format for TF_STRING tensors is: + // start_offset: array[uint64] + // data: byte[...] + // + // The string length (as a varint), followed by the contents of the string + // is encoded at data[start_offset[i]]]. + + // The size of the "start_offset" region. + let startOffsetsByteCount = scalars.count * MemoryLayout.stride + + // The size of the "data" region. + let dataByteCount = tfEncodedSizes.reduce(0, +) * MemoryLayout.stride + + return TensorHandle( + shape: shape, + byteCount: startOffsetsByteCount + dataByteCount, + bufferInitializer: { tensorBuffer in + // Initialize the "start_offset" region. + var startOffset: UInt64 = 0 + var startOffsetAddr = + tensorBuffer.bindMemory(to: UInt64.self, capacity: scalars.count) + for tfEncodedSize in tfEncodedSizes { + startOffsetAddr.initialize(to: startOffset) + startOffsetAddr = startOffsetAddr.advanced(by: 1) + startOffset = startOffset + UInt64(tfEncodedSize) + } + + // Initialize the "data" region. + var dataAddr = tensorBuffer.advanced(by: startOffsetsByteCount) + .bindMemory(to: Int8.self, capacity: dataByteCount) + let status = TF_NewStatus() + for (cString, tfEncodedSize) in zip(cStrings, tfEncodedSizes) { + _ = cString.withUnsafeBufferPointer { buffer in + TF_StringEncode(buffer.baseAddress, buffer.count, dataAddr, + tfEncodedSize, status) + } + checkOk(status) + dataAddr = dataAddr.advanced(by: tfEncodedSize) + } + TF_DeleteStatus(status) + } + ) +} + +@inlinable @inline(never) +@_silgen_name("__tf_string_tensor_from_string") +public func _TFStringTensorFromString(_ scalar: String) -> TensorHandle { + return _TFStringTensorFromStrings([scalar], shape: []) +} + +@inlinable @inline(never) +@_silgen_name("__tf_string_tensor_from_strings_1d") +public func _TFStringTensorFromStrings1D(_ scalars: [String]) -> TensorHandle { + return _TFStringTensorFromStrings(scalars, shape: [scalars.count]) +} diff --git a/stdlib/public/TensorFlow/TensorGroup.swift b/stdlib/public/TensorFlowCore/TensorGroup.swift similarity index 69% rename from stdlib/public/TensorFlow/TensorGroup.swift rename to stdlib/public/TensorFlowCore/TensorGroup.swift index 02577eb73d8c4..bf0dbee912dbf 100644 --- a/stdlib/public/TensorFlow/TensorGroup.swift +++ b/stdlib/public/TensorFlowCore/TensorGroup.swift @@ -153,93 +153,3 @@ extension VariantHandle : TensorGroup { self.init(owning: tensorHandles!.pointee) } } - -extension Tensor : TensorGroup { - @inlinable - public static var _unknownShapeList: [TensorShape?] { - return [nil] - } - - @inlinable - public static var _typeList: [TensorDataType] { - return [Scalar.tensorFlowDataType] - } - - public func _unpackTensorHandles( - into address: UnsafeMutablePointer?) { - address!.initialize(to: handle._cTensorHandle) - } - - public init(_owning tensorHandles: UnsafePointer?) { - self.init(handle: TensorHandle(_owning: tensorHandles!.pointee)) - } -} - -extension _TensorElementLiteral : TensorGroup { - @inlinable - public static var _unknownShapeList: [TensorShape?] { - return [nil] - } - - @inlinable - public static var _typeList: [TensorDataType] { - return [Scalar.tensorFlowDataType] - } - - public func _unpackTensorHandles( - into address: UnsafeMutablePointer?) { - address!.initialize(to: handle._cTensorHandle) - } - - public init(_owning tensorHandles: UnsafePointer?) { - self.init(handle: TensorHandle(_owning: tensorHandles!.pointee)) - } -} - -extension StringTensor : TensorGroup { - @inlinable - public static var _unknownShapeList: [TensorShape?] { - return [nil] - } - - @inlinable - public static var _typeList: [TensorDataType] { - return [String.tensorFlowDataType] - } - - public func _unpackTensorHandles( - into address: UnsafeMutablePointer?) { - address!.initialize(to: handle._cTensorHandle) - } - - public init(_owning tensorHandles: UnsafePointer?) { - self.init(handle: TensorHandle(_owning: tensorHandles!.pointee)) - } -} - -extension Array : TensorArrayProtocol where Element : TensorGroup { - public func _unpackTensorHandles(into address: UnsafeMutablePointer?) { - var ptr = address - for elem in self { - elem._unpackTensorHandles(into: ptr) - ptr = ptr!.advanced(by: Int(elem._tensorHandleCount)) - } - } - - public var _tensorHandleCount: Int32 { - return Element._tensorHandleCount * Int32(count) - } - - public var _typeList: [TensorDataType] { - return Array([[TensorDataType]]( - repeating: Element._typeList, - count: Int(Element._tensorHandleCount)).joined()) - } - - public init(_owning tensorHandles: UnsafePointer?, count: Int) { - let size = count / Int(Element._tensorHandleCount) - self = Array((0.. : _AnyTensorHandle super.init(base: cTensorHandle) } - @usableFromInline - convenience init(copyingFromCTensor cTensor: CTensor) { + @inlinable + public convenience init(copyingFromCTensor cTensor: CTensor) { let status = TF_NewStatus() let cTensorHandle = TFE_NewTensorHandle(cTensor, status) checkOk(status) @@ -71,8 +71,8 @@ public final class TensorHandle : _AnyTensorHandle /// /// `bufferInitializer` receives a buffer with exactly `byteCount` bytes of /// capacity. `bufferInitializer` must initialize the entire buffer. - @usableFromInline - convenience init( + @inlinable + public convenience init( shape: [Int], byteCount: Int, bufferInitializer: (UnsafeMutableRawPointer) -> Void @@ -104,17 +104,17 @@ extension TensorHandle where Scalar : TensorFlowScalar { /// hold the scalars in a tensor with shape `shape`. `scalarsInitializer` /// must initialize the entire buffer, with contiguous scalars in row-major /// order. - @usableFromInline - convenience init( + @inlinable + public convenience init( shape: [Int], scalarsInitializer: (UnsafeMutablePointer) -> Void ) { let contiguousSize = shape.reduce(1, *) let byteCount = contiguousSize * MemoryLayout.stride - self.init(shape: shape, byteCount: byteCount) { buffer in - scalarsInitializer(buffer.bindMemory(to: Scalar.self, - capacity: contiguousSize)) - } + self.init(shape: shape, byteCount: byteCount, bufferInitializer: { buffer in + let pointer = buffer.bindMemory(to: Scalar.self, capacity: contiguousSize) + scalarsInitializer(pointer) + }) } } @@ -124,10 +124,23 @@ internal extension TensorHandle { /// - Returns: A `ShapedArray`. @usableFromInline @inline(never) - func makeHostCopy() -> ShapedArray { + func hostScalar() -> Scalar? { internalConsistencyCheck(isConcrete) - debugLog("Calling makeHostCopy() with c handle \(_cTensorHandle)") - return ShapedArray(cTensorHandle: _cTensorHandle) + debugLog("Calling hostScalar() with c handle \(_cTensorHandle)") + internalConsistencyCheck(TFE_TensorHandleIsConcrete(_cTensorHandle) != 0) + let status = TF_NewStatus() + let cTensor = TFE_TensorHandleResolve(_cTensorHandle, status) + checkOk(status) + TF_DeleteStatus(status) + internalConsistencyCheck(cTensor != nil) + debugLog("# of dims is \(TF_NumDims(cTensor!))") + debugLog("Returning a shaped array.") + defer { TF_DeleteTensor(cTensor!) } + guard TF_NumDims(cTensor!) == 0 else { return nil } + let startAddress = TF_TensorData(cTensor!) + .assumingMemoryBound(to: Scalar.self) + let bufferPointer = UnsafeBufferPointer(start: startAddress, count: 1) + return bufferPointer[0] } } @@ -148,7 +161,7 @@ extension TensorHandle : TensorSendableReceivable { tensorHandle = TensorHandle(_owning: cTensorHandle!) if _RuntimeConfig.printsDebugLog { debugLog("The received tensor of id \(tensorID) has content:") - dumpTensorContent(tensorHandle._cTensorHandle, Scalar.self) + // dumpTensorContent(tensorHandle._cTensorHandle, Scalar.self) } return tensorHandle } @@ -158,7 +171,7 @@ extension TensorHandle : TensorSendableReceivable { _ tensorID: Int) { if _RuntimeConfig.printsDebugLog { debugLog("Sending tensor of id \(tensorID) and type \(Scalar.self) with:") - dumpTensorContent(_cTensorHandle, Scalar.self) + // dumpTensorContent(_cTensorHandle, Scalar.self) } let status = TF_NewStatus() internalConsistencyCheck(status != nil) @@ -179,22 +192,6 @@ extension TensorHandle : TensorSendableReceivable { } } -internal extension ShapedArray where Scalar : _TensorFlowDataTypeCompatible { - @usableFromInline - @inline(never) - init(cTensorHandle: CTensorHandle) { - internalConsistencyCheck(TFE_TensorHandleIsConcrete(cTensorHandle) != 0) - let status = TF_NewStatus() - let cTensor = TFE_TensorHandleResolve(cTensorHandle, status) - checkOk(status) - TF_DeleteStatus(status) - internalConsistencyCheck(cTensor != nil) - debugLog("# of dims is \(TF_NumDims(cTensor!))") - debugLog("Returning a shaped array.") - self.init(owning: cTensor!) - } -} - /// `ResourceHandle` is the type used by ops and the `#tfop()` syntax to /// represent TensorFlow "resource" values. public final class ResourceHandle : _AnyTensorHandle { diff --git a/stdlib/public/TensorFlow/TensorProtocol.swift b/stdlib/public/TensorFlowCore/TensorProtocol.swift similarity index 100% rename from stdlib/public/TensorFlow/TensorProtocol.swift rename to stdlib/public/TensorFlowCore/TensorProtocol.swift diff --git a/stdlib/public/TensorFlow/TensorShape.swift b/stdlib/public/TensorFlowCore/TensorShape.swift similarity index 100% rename from stdlib/public/TensorFlow/TensorShape.swift rename to stdlib/public/TensorFlowCore/TensorShape.swift diff --git a/stdlib/public/TensorFlow/Threading.swift b/stdlib/public/TensorFlowCore/Threading.swift similarity index 100% rename from stdlib/public/TensorFlow/Threading.swift rename to stdlib/public/TensorFlowCore/Threading.swift diff --git a/stdlib/public/TensorFlow/Utilities.swift b/stdlib/public/TensorFlowCore/Utilities.swift similarity index 54% rename from stdlib/public/TensorFlow/Utilities.swift rename to stdlib/public/TensorFlowCore/Utilities.swift index 99f1911aecdcd..67399691096c7 100644 --- a/stdlib/public/TensorFlow/Utilities.swift +++ b/stdlib/public/TensorFlowCore/Utilities.swift @@ -29,8 +29,8 @@ import CTensorFlow /// in debug mode), to help shake out more bugs and facilitate debugging in the /// early project phases. It can be replaced with plain assert() later, when we /// have a more mature code base. -@usableFromInline -func internalConsistencyCheck( +@inlinable +public func internalConsistencyCheck( _ predicate: Bool, _ errMessage: String = "TF runtime assertion failure", file: StaticString = #file, @@ -41,11 +41,17 @@ func internalConsistencyCheck( } } -@usableFromInline -func checkOk(_ s: CTFStatus?, file: StaticString = #file, line: UInt = #line) { - internalConsistencyCheck(TF_GetCode(s) == TF_OK, - String(cString: TF_Message(s)), - file: file, line: line) +@inlinable +public func checkOk( + _ s: CTFStatus?, + file: StaticString = #file, + line: UInt = #line +) { + internalConsistencyCheck( + TF_GetCode(s) == TF_OK, + String(cString: TF_Message(s)), + file: file, + line: line) } //===----------------------------------------------------------------------===// @@ -56,19 +62,19 @@ func checkOk(_ s: CTFStatus?, file: StaticString = #file, line: UInt = #line) { // should check that the pointer is not NULL. /// The `TF_Session *` type. -@usableFromInline typealias CTFSession = OpaquePointer +public typealias CTFSession = OpaquePointer /// The `TF_Status *` type. -@usableFromInline typealias CTFStatus = OpaquePointer +public typealias CTFStatus = OpaquePointer /// The `TF_Graph*` type. -@usableFromInline typealias CTFGraph = OpaquePointer +public typealias CTFGraph = OpaquePointer /// The `TF_Function*` type. -@usableFromInline typealias CTFFunction = OpaquePointer +public typealias CTFFunction = OpaquePointer /// The `TF_Tensor *` type. -@usableFromInline typealias CTensor = OpaquePointer +public typealias CTensor = OpaquePointer /// The `TF_TensorHandle *` type. /// @@ -80,25 +86,25 @@ public typealias CTensorHandle = OpaquePointer public typealias CTFEContext = OpaquePointer /// The `TFE_Op *` type. -@usableFromInline typealias CTFEOp = OpaquePointer +public typealias CTFEOp = OpaquePointer /// The `TF_OperationDescription *` type. -@usableFromInline typealias CTFOperationDescription = OpaquePointer +public typealias CTFOperationDescription = OpaquePointer /// The `TFE_TraceContext *` type. -@usableFromInline typealias CTFETraceContext = OpaquePointer +public typealias CTFETraceContext = OpaquePointer //===----------------------------------------------------------------------===// // Logging //===----------------------------------------------------------------------===// #if os(macOS) || os(iOS) || os(watchOS) || os(tvOS) -let stderr = __stderrp -let stdout = __stdoutp +@usableFromInline let stderr = __stderrp +@usableFromInline let stdout = __stdoutp #endif /// Log to standard error. -func logToStderr(_ message: StaticString) { +public func logToStderr(_ message: StaticString) { message.utf8Start .withMemoryRebound(to: Int8.self, capacity: message.utf8CodeUnitCount) { _ = fputs($0, stderr) @@ -106,14 +112,14 @@ func logToStderr(_ message: StaticString) { } /// Log to standard error. -func logToStderr(_ message: String) { +public func logToStderr(_ message: String) { _ = fputs(message, stderr) } -@usableFromInline -func debugLog(_ message: @autoclosure () -> String, - file: StaticString = #file, - line: UInt = #line) { +@inlinable +public func debugLog(_ message: @autoclosure () -> String, + file: StaticString = #file, + line: UInt = #line) { if _RuntimeConfig.printsDebugLog { print("[\(file):\(line)] \(message())") // This helps dump more log before a crash. @@ -127,58 +133,11 @@ func debugLog(_ message: @autoclosure () -> String, /// Given the address of a `TF_Buffer` and a file path, write the buffer's /// contents to the file. -func writeContents(of buffer: UnsafePointer, - toFile path: String) { +public func writeContents( + of buffer: UnsafePointer, + toFile path: String +) { let fp = fopen(path, "w+") fwrite(buffer.pointee.data, /*size*/ 1, /*count*/ buffer.pointee.length, fp) fclose(fp) } - -//===----------------------------------------------------------------------===// -// Unit test utilities -//===----------------------------------------------------------------------===// -// TODO: Move this section to a unit-test only Swift module, once the google -// internal lit based test infra can handle importing additional Swift modules. - -/// This is a generic host-only op that hides the details of its impl in the SIL -/// code. This makes reading/writing SIL based compiler unit tests simple. -@inline(never) -public func _hostOp(_ x: T) { - print(x) -} - -@inline(never) -public func _hostOp(_ x: Tensor) { - print(x) -} - -@inline(never) -public func _hostOp(_ x: TensorHandle) { - print(Tensor(handle: x)) -} - -/// Some TPU ops (e.g. infeed/outfeed) require tensor shape info, which the APIs -/// below can provide. -/// -/// TODO: Remove these helper APIs, when we have a better shape -/// inference/propagation design. -@inlinable @inline(__always) -public func _scalarTensorWithShape( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = - #tfop("Identity", x, T$dtype: Scalar.tensorFlowDataType, - __shapes: [TensorShape()]) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public func _addScalarTensorsWithShape( - _ x: Tensor, - _ y: Tensor -) -> Tensor { - let ret: TensorHandle = - #tfop("Add", x, y, T$dtype: Scalar.tensorFlowDataType, - __shapes: [TensorShape()]) - return Tensor(handle: ret) -} diff --git a/test/TensorFlow/deabstraction_finished.swift b/test/TensorFlow/deabstraction_finished.swift index 4966bca0cd24c..7e08eeb7a0f27 100644 --- a/test/TensorFlow/deabstraction_finished.swift +++ b/test/TensorFlow/deabstraction_finished.swift @@ -42,8 +42,8 @@ public func constexprCall(a: Tensor, idx: Tensor) -> Tensor CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}constexprCall CHECK: [[A:%.*]] = graph_op "Const"() {dtype$dtype: i32 3, value$tensor: i32 0 CHECK: [[B:%.*]] = graph_op "Const" - CHECK: [[C:%.*]] = graph_op "Const" - CHECK: [[RESULT:%.*]] = graph_op "OneHot"(%0 : $TensorHandle, [[A]] : $TensorHandle, [[B]] : $TensorHandle, [[C]] : $TensorHandle) {T$dtype: i32 1, TI$dtype: i32 3, axis: i64 1, __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $TensorHandle + CHECK: [[C:%.*]] = graph_op "Const"() {dtype$dtype: i32 1, value$tensor: f32 0x0 /* 0 */, __device: "ALL_DEVICES"} : $TensorHandle // user: %4 + CHECK: [[RESULT:%.*]] = graph_op "OneHot"(%0 : $TensorHandle, [[A]] : $TensorHandle, [[B]] : $TensorHandle, [[C]] : $TensorHandle) {axis: i64 1, T$dtype: i32 1, TI$dtype: i32 3, __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $TensorHandle CHECK: return [[RESULT]] */ diff --git a/test/TensorFlow/diagnostics.swift b/test/TensorFlow/diagnostics.swift index 3d76f2574f87b..3d62908ad8d59 100644 --- a/test/TensorFlow/diagnostics.swift +++ b/test/TensorFlow/diagnostics.swift @@ -1,4 +1,4 @@ -// RUN: %target-swift-frontend -Xllvm -tf-dynamic-compilation=false -Xllvm -tf-dump-intermediates -Xllvm -tf-warn-send-recv -O -emit-sil -verify %s +// RUN: %target-swift-frontend -Xllvm -tf-dynamic-compilation=false -Xllvm -tf-dump-intermediates -Xllvm -tf-warn-send-recv -O -emit-sil -verify-ignore-unknown %s import TensorFlow @@ -98,7 +98,7 @@ public func scalarToAccelerator(x: Float) -> Tensor { // tf-warn-scalar-transfer=true. public func scalarToHost() { var i = Tensor(0) - while i < 10 { + while i < 10 { // expected-warning {{method result implicitly copied to the accelerator, use .toAccelerator() to make transfer explicit}} print("Running loop body") i += 1 } diff --git a/test/TensorFlow/diagnostics_scalar_transfers.swift b/test/TensorFlow/diagnostics_scalar_transfers.swift index 62641f172381c..840af030862c1 100644 --- a/test/TensorFlow/diagnostics_scalar_transfers.swift +++ b/test/TensorFlow/diagnostics_scalar_transfers.swift @@ -13,7 +13,7 @@ public func scalarToAccelerator(x: Float) -> Tensor { public func scalarToHost() { var i = Tensor(0) - while i < 10 { // expected-warning {{value implicitly copied to the host}} + while i < 10 { // expected-warning {{value implicitly copied to the host}} expected-warning {{method result implicitly copied to the accelerator, use .toAccelerator() to make transfer explicit}} print("Running loop body") i += 1 } diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index 962ec3f401f82..f235179907d43 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -745,18 +745,18 @@ TensorTests.testAllBackends("ReshapeTensor") { expectEqual([1, 3, 1, 2, 1], result.shape) } -TensorTests.testAllBackends("Unbroadcast1") { +TensorTests.testAllBackends("Unbroadcasted1") { let x = Tensor(repeating: 1, shape: [2, 3, 4, 5]) let y = Tensor(repeating: 1, shape: [4, 5]) - let z = x.unbroadcast(like: y) + let z = x.unbroadcasted(like: y) expectEqual(ShapedArray(repeating: 6, shape: [4, 5]), z.array) } -TensorTests.testAllBackends("Unbroadcast2") { +TensorTests.testAllBackends("Unbroadcasted2") { let x = Tensor(repeating: 1, shape: [2, 3, 4, 5]) let y = Tensor(repeating: 1, shape: [3, 1, 5]) - let z = x.unbroadcast(like: y) + let z = x.unbroadcasted(like: y) expectEqual(ShapedArray(repeating: 8, shape: [3, 1, 5]), z.array) } diff --git a/test/TensorFlowRuntime/tensor_api.swift b/test/TensorFlowRuntime/tensor_api.swift index 5fd08138b1637..4f43fe1dc4b67 100644 --- a/test/TensorFlowRuntime/tensor_api.swift +++ b/test/TensorFlowRuntime/tensor_api.swift @@ -41,7 +41,7 @@ TensorNonTPUTests.testAllBackends("BroadcastTensor") { // 1 -> 2 x 3 x 4 let one = Tensor(1) var target = Tensor(repeating: 0.0, shape: [2, 3, 4]) - let broadcasted = one.broadcast(like: target) + let broadcasted = one.broadcasted(like: target) expectEqual(Tensor(repeating: 1, shape: [2, 3, 4]), broadcasted) target .= Tensor(repeating: 1, shape: [1, 3, 1]) expectEqual(Tensor(repeating: 1, shape: [2, 3, 4]), target) diff --git a/utils/build-script-impl b/utils/build-script-impl index 4bf140f028519..414f8cb611d77 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -280,7 +280,7 @@ KNOWN_SETTINGS=( tensorflow-host-include-dir "" "Path to host TensorFlow headers" tensorflow-target-include-dir "" "Path to target Tensorflow headers" tensorflow-target-lib-dir "" "Path to target TensorFlow libraries" - tensorflow-swift-bindings "" "Path to TensorFlow Swift bindings file" + tensorflow-swift-bindings "" "Path to TensorFlow Swift bindings repository" tensorflow-swift-apis "" "Path to TensorFlow deep learning library repository" ) @@ -2476,7 +2476,7 @@ for host in "${ALL_HOSTS[@]}"; do # Handle TensorFlow Swift bindings file. if [[ ! "${TENSORFLOW_SWIFT_BINDINGS}" && -d "${TENSORFLOW_SWIFT_BINDINGS_DIR}" ]] ; then - TENSORFLOW_SWIFT_BINDINGS="${TENSORFLOW_SWIFT_BINDINGS_DIR}/RawOpsGenerated.swift" + TENSORFLOW_SWIFT_BINDINGS="${TENSORFLOW_SWIFT_BINDINGS_DIR}" fi if [[ "${TENSORFLOW_SWIFT_BINDINGS}" ]] ; then cmake_options=( diff --git a/utils/build_swift/driver_arguments.py b/utils/build_swift/driver_arguments.py index 3c927ef0a2b22..be01ca8e29de4 100644 --- a/utils/build_swift/driver_arguments.py +++ b/utils/build_swift/driver_arguments.py @@ -974,8 +974,7 @@ def create_argument_parser(): 'Used for linking Swift programs.') option('--tensorflow-swift-bindings', store_path, default=None, - help='Path to a TensorFlow Swift bindings file ' - '(RawOpsGenerated.swift).') + help='Path to a TensorFlow Swift bindings repository.') option('--tensorflow-swift-apis', store_path, default=None, help='Path to a TensorFlow deep learning library repository.') diff --git a/utils/update_checkout/update-checkout-config.json b/utils/update_checkout/update-checkout-config.json index 295cb669791e2..df0e9b678117f 100644 --- a/utils/update_checkout/update-checkout-config.json +++ b/utils/update_checkout/update-checkout-config.json @@ -242,7 +242,7 @@ "icu": "release-61-1", "tensorflow": "447e512d332ab86172a3b13119900b4d021d0c65", "tensorflow-swift-bindings": "a7ccb727514414d31df9e403f34fa923bdf6a519", - "tensorflow-swift-apis": "cfefd63fa60a55d1da1e2a412ed561eb3448e691" + "tensorflow-swift-apis": "d2c78f4c323f223ea79e7bf2a035f71edcd42824" } } }