From de8fdbddab43b73bd6ac3c4f6b503afc455b13fc Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Fri, 19 Apr 2019 13:37:17 -0400 Subject: [PATCH 01/30] Moved a couple of tensor initializers to swift-apis. --- stdlib/public/TensorFlow/Ops.swift | 8 -------- stdlib/public/TensorFlow/Tensor.swift | 7 ------- 2 files changed, 15 deletions(-) diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 9f542deb7018b..73a48294715ce 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -666,14 +666,6 @@ public extension Tensor { } public extension Tensor { - /// Returns a concatenated tensor of the given tensors. - /// - Precondition: The tensors must have the same dimensions, except for the - /// specified axis. - /// - Precondition: The axis must be in the range `-rank..], alongAxis axis: Int = 0) { - self = Raw.concatV2(tensors, axis: Tensor(Int32(axis))) - } - /// Concatenates tensors along the specified axis. /// - Precondition: The tensors must have the same dimensions, except for the /// specified axis. diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlow/Tensor.swift index f0e3972e7771f..2dede3b19eafc 100644 --- a/stdlib/public/TensorFlow/Tensor.swift +++ b/stdlib/public/TensorFlow/Tensor.swift @@ -240,13 +240,6 @@ internal extension Tensor where Scalar : TensorFlowFloatingPoint { } public extension Tensor { - /// Creates a tensor from an array of tensors (which may themselves be - /// scalars). - @inlinable @inline(__always) - init(_ elements: [Tensor]) { - self = Raw.pack(elements) - } - /// Creates a 1D tensor from contiguous scalars. /// /// - Parameters: From 06b96c00c6bf9984bfaff8d56e35854206f1b492 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Fri, 19 Apr 2019 17:24:17 -0400 Subject: [PATCH 02/30] Moved the activation functions to swift-apis. --- stdlib/public/TensorFlow/CompositeMath.swift | 51 -------------------- stdlib/public/TensorFlow/Gradients.swift | 30 ------------ stdlib/public/TensorFlow/Ops.swift | 7 --- 3 files changed, 88 deletions(-) delete mode 100644 stdlib/public/TensorFlow/CompositeMath.swift diff --git a/stdlib/public/TensorFlow/CompositeMath.swift b/stdlib/public/TensorFlow/CompositeMath.swift deleted file mode 100644 index 6a1541536eada..0000000000000 --- a/stdlib/public/TensorFlow/CompositeMath.swift +++ /dev/null @@ -1,51 +0,0 @@ -//===-- CompositeMath.swift -----------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains composite math functions. Functions in this file are -// defined in terms of core ops that are differentiable, and therefore do not -// need custom gradients. -// -//===----------------------------------------------------------------------===// - -/// Computes `sigmoid` of the specified tensor element-wise. -/// Specifically, computes `1 / (1 + exp(-x))`. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSigmoid(_:) where T : TensorFlowFloatingPoint) -public func sigmoid(_ x: Tensor) -> Tensor { - return Raw.sigmoid(x) -} - -/// Computes `relu` of the specified tensor element-wise. -/// Specifically, computes `max(0, x)`. -@inlinable @inline(__always) -@differentiable(vjp: _vjpRelu(_:) where T : TensorFlowFloatingPoint) -public func relu(_ x: Tensor) -> Tensor { - return max(0, x) -} - -/// Computes the softmax of the specified tensor along the last axis. -/// Specifically, computes `exp(x) / exp(x).sum(alongAxes: -1)`. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSoftmax(_:) where T : TensorFlowFloatingPoint) -public func softmax(_ x: Tensor) -> Tensor { - return Raw.softmax(logits: x) -} - -/// Computes the softmax of the specified tensor along the specified axis. -/// Specifically, computes `exp(x) / exp(x).sum(alongAxes: axis)`. -@inlinable @inline(__always) -public func softmax( - _ x: Tensor, alongAxis axis: Int -) -> Tensor { - let expx = exp(x) - return expx / expx.sum(alongAxes: axis) -} diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index 4430e04354462..d1b76b360ee6b 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -611,33 +611,3 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { }) } } - -//===----------------------------------------------------------------------===// -// Composite math -//===----------------------------------------------------------------------===// - -@inlinable -func _vjpSigmoid( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = sigmoid(x) - return (value, { v in Raw.sigmoidGrad(value, dy: v) }) -} - -@inlinable -func _vjpSoftmax( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = softmax(x) - return (value, { v in - let sumChannels = (v * value).sum(alongAxes: -1) - return (v - sumChannels) * value - }) -} - -@inlinable -func _vjpRelu( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (relu(x), { v in Tensor(x .> 0) * v }) -} diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 73a48294715ce..638d6098fcdb9 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -960,13 +960,6 @@ public extension Tensor where Scalar : Numeric { } } -/// Computes the log-softmax of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpLogSoftmax(_:) where T : TensorFlowFloatingPoint) -public func logSoftmax(_ x: Tensor) -> Tensor { - return Raw.logSoftmax(logits: x) -} - //===----------------------------------------------------------------------===// // Selection //===----------------------------------------------------------------------===// From dcd46be41a2d0cd51bbb9af267c7dadcbd53ab2a Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Fri, 19 Apr 2019 17:24:50 -0400 Subject: [PATCH 03/30] Minor edit. --- stdlib/public/TensorFlow/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index e3bb11fff4ceb..161e3ad4e183a 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -33,7 +33,6 @@ list(APPEND swift_stdlib_compile_flags "-DCOMPILING_TENSORFLOW_MODULE") set(SOURCES CompilerRuntime.swift - CompositeMath.swift Dataset.swift DataTypes.swift Execution.swift From 7a957cc970af0cfb4335edd131ee6dda12cc5c43 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Fri, 19 Apr 2019 17:31:04 -0400 Subject: [PATCH 04/30] Moved the log-softmax VJP to swift-apis. --- stdlib/public/TensorFlow/Gradients.swift | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index d1b76b360ee6b..5a043e57df764 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -457,16 +457,6 @@ func _vjpRsqrt( return (value, { v in -v / 2 * value }) } -@inlinable -func _vjpLogSoftmax( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = logSoftmax(x) - return (value, { v in - v - v.sum(alongAxes: -1) * exp(value) - }) -} - extension Tensor where Scalar : TensorFlowFloatingPoint { @inlinable func _vjpSquared() -> (Tensor, (Tensor) -> Tensor) { From 75f7039bfa56ad6531284cd63544efbd84f11140 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 12:10:15 -0400 Subject: [PATCH 05/30] Moved some tensor initializers to swift-apis. --- stdlib/public/TensorFlow/Gradients.swift | 1 - stdlib/public/TensorFlow/Ops.swift | 12 -- stdlib/public/TensorFlow/Tensor.swift | 157 ----------------------- 3 files changed, 170 deletions(-) diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index 5a043e57df764..9abbdf566d20f 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -38,7 +38,6 @@ //===----------------------------------------------------------------------===// infix operator .== : ComparisonPrecedence -infix operator .> : ComparisonPrecedence //===----------------------------------------------------------------------===// // Method-style differential operators diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 638d6098fcdb9..183c07c774a8b 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -52,18 +52,6 @@ infix operator .= // - Consider explicit broadcasting for elementwise binary ops when // scalarization and rank getter are implemented. -//===----------------------------------------------------------------------===// -// Scalar type cast -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Perform an element-wise type conversion from a `Bool` tensor. - @inlinable @inline(__always) - init(_ other: Tensor) { - self = Raw.cast(other) - } -} - //===----------------------------------------------------------------------===// // Additive group //===----------------------------------------------------------------------===// diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlow/Tensor.swift index 2dede3b19eafc..801bd6f3a614b 100644 --- a/stdlib/public/TensorFlow/Tensor.swift +++ b/stdlib/public/TensorFlow/Tensor.swift @@ -203,26 +203,6 @@ public extension Tensor { // Initialization //===----------------------------------------------------------------------===// -public extension Tensor where Scalar : Numeric { - /// Perform an element-wise conversion from another `Tensor`. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpCast where Scalar : TensorFlowFloatingPoint, - OtherScalar: TensorFlowFloatingPoint) - init(_ other: Tensor) { - self = Raw.cast(other) - } -} - -internal extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpCast( - _ other: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - return (Tensor(other), { v in Tensor(v) }) - } -} - public extension Tensor { /// Creates a tensor from a scalar value. @inlinable @inline(__always) @@ -341,62 +321,6 @@ public extension Tensor { } } -public extension Tensor { - /// Creates a tensor with the specified shape and a single, repeated scalar - /// value. - /// - /// - Parameters: - /// - shape: The dimensions of the tensor. - /// - repeatedValue: The scalar value to repeat. - @inlinable @inline(__always) - @available(*, deprecated, renamed: "init(repeating:shape:)") - init(shape: TensorShape, repeating repeatedValue: Scalar) { - self.init(repeating: repeatedValue, shape: shape) - } - - /// Creates a tensor with the specified shape and a single, repeated scalar value. - /// - /// - Parameters: - /// - repeatedValue: The scalar value to repeat. - /// - shape: The dimensions of the tensor. - @inlinable @inline(__always) - @differentiable(vjp: _vjpInit(repeating:shape:) - where Scalar : TensorFlowFloatingPoint) - init(repeating repeatedValue: Scalar, shape: TensorShape) { - self = Raw.fill(dims: Tensor(shape.dimensions.map(Int32.init)), - value: Tensor(repeatedValue)) - } -} - -internal extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpInit( - repeating repeatedValue: Scalar, - shape: TensorShape - ) -> (Tensor, (Tensor) -> Scalar) { - return (Tensor(repeating: repeatedValue, shape: shape), - { $0.sum().scalarized() }) - } -} - -public extension Tensor { - /// Creates a tensor by broadcasting the given scalar to a given rank with - /// all dimensions being 1. - @inlinable @inline(__always) - // @differentiable(where Scalar : TensorFlowFloatingPoint) - init(broadcasting scalar: Scalar, rank: Int) { - self = Tensor(scalar).reshaped(to: TensorShape(repeating: 1, count: rank)) - } - - /// Creates a tensor of shape `[4]` from a 4-tuple. - /// - Note: This is intended for internal use, for example, to initialize a - /// tensor attribute from `convolved2D`'s `strides` argument. - @inlinable @inline(__always) - internal init(_ scalars: (Scalar, Scalar, Scalar, Scalar)) { - self.init([scalars.0, scalars.1, scalars.2, scalars.3]) - } -} - //===----------------------------------------------------------------------===// // Initialization syntax //===----------------------------------------------------------------------===// @@ -560,87 +484,6 @@ public extension Tensor { } } -//===----------------------------------------------------------------------===// -// Numeric initialization -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Creates a tensor with all scalars set to zero. - /// - /// - Parameter shape: The dimensions of the tensor. - @inlinable @inline(__always) - init(zeros shape: TensorShape) { - self.init(repeating: 0, shape: shape) - } - - /// Creates a tensor with all scalars set to one. - /// - /// - Parameter shape: The dimensions of the tensor. - @inlinable @inline(__always) - init(ones shape: TensorShape) { - self.init(repeating: 1, shape: shape) - } - - /// Creates a 1-D tensor representing a sequence from a starting value to, but - /// not including, an end value, stepping by the specified amount. - /// - /// - Parameters: - /// - start: The starting value to use for the sequence. If the sequence - /// contains any values, the first one is `start`. - /// - end: An end value to limit the sequence. `end` is never an element of - /// the resulting sequence. - /// - stride: The amount to step by with each iteration. `stride` must be - /// positive. - /// - @inlinable @inline(__always) - init(rangeFrom start: Scalar, to end: Scalar, stride: Scalar) { - self = Raw.range( - start: Tensor(start), - limit: Tensor(end), - delta: Tensor(stride)) - } - - /// Creates a one-hot tensor at given indices. The locations represented by - /// `indices` take value `onValue` (`1` by default), while all other locations - /// take value `offValue` (`0` by default). If the input `indices` is rank - /// `n`, the new tensor will have rank `n+1`. The new axis is created at - /// dimension `axis` (by default, the new axis is appended at the end). - /// - /// If `indices` is a scalar, the new tensor's shape will be a vector of - /// length `depth`. - /// - /// If `indices` is a vector of length `features`, the output shape will be: - /// features x depth, if axis == -1 - /// depth x features, if axis == 0 - /// - /// If `indices` is a matrix (batch) with shape `[batch, features]`, the - /// output shape will be: - /// batch x features x depth, if axis == -1 - /// batch x depth x features, if axis == 1 - /// depth x batch x features, if axis == 0 - /// - /// - Parameters: - /// - indices: A `Tensor` of indices. - /// - depth: A scalar defining the depth of the one hot dimension. - /// - onValue: A scalar defining the value at the location referred to by - /// some index in `indices`. - /// - offValue: A scalar defining the value at a location that is not - /// referred to by any index in `indices`. - /// - axis: The axis to fill. The default is `-1`, a new inner-most axis. - /// - @inlinable @inline(__always) - init(oneHotAtIndices indices: Tensor, depth: Int, - onValue: Scalar = 1, offValue: Scalar = 0, axis: Int = -1) { - self = Raw.oneHot( - indices: indices, - depth: Tensor(Int32(depth)), - onValue: Tensor(onValue), - offValue: Tensor(offValue), - axis: Int64(axis) - ) - } -} - //===----------------------------------------------------------------------===// // Shape transformations //===----------------------------------------------------------------------===// From a89791824cf563e422ae951266e762aedd481738 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 12:27:18 -0400 Subject: [PATCH 06/30] Moved some more stuff to swift-apis. --- stdlib/public/TensorFlow/Gradients.swift | 34 ----- stdlib/public/TensorFlow/Tensor.swift | 185 +---------------------- 2 files changed, 5 insertions(+), 214 deletions(-) diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index 9abbdf566d20f..55866b6de6a9a 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -518,40 +518,6 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { } } -//===----------------------------------------------------------------------===// -// Shape transformations -//===----------------------------------------------------------------------===// - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - func _vjpReshaped( - toShape newShape: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = reshaped(toShape: newShape) - return (value, { [shape = shapeTensor] v in - v.reshaped(toShape: shape) - }) - } - - @inlinable - func _vjpSqueezingShape(at axes: [Int]) -> (Tensor, (Tensor) -> Tensor) { - let value = squeezingShape(at: axes) - return (value, { [shape = shapeTensor] v in - v.reshaped(toShape: shape) - }) - } - - @inlinable - func _vjpExpandingShape( - at shapeIndex: Int - ) -> (Tensor, (Tensor) -> Tensor) { - let value = expandingShape(at: shapeIndex) - return (value, { v in - v.squeezingShape(at: shapeIndex) - }) - } -} - //===----------------------------------------------------------------------===// // Reduction //===----------------------------------------------------------------------===// diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlow/Tensor.swift index 801bd6f3a614b..badfeda117a83 100644 --- a/stdlib/public/TensorFlow/Tensor.swift +++ b/stdlib/public/TensorFlow/Tensor.swift @@ -45,7 +45,7 @@ public struct Tensor : TensorProtocol { } //===----------------------------------------------------------------------===// -// Compiler intrinsics +// Compiler Intrinsics //===----------------------------------------------------------------------===// // // By default, when a `Tensor` value is implicitly passed between host and @@ -149,7 +149,7 @@ func _TFHoistable(_ fn: () -> TensorHandle) } //===----------------------------------------------------------------------===// -// Memory transfer markers +// Memory Transfer Markers //===----------------------------------------------------------------------===// public extension Tensor { @@ -322,7 +322,7 @@ public extension Tensor { } //===----------------------------------------------------------------------===// -// Initialization syntax +// Initialization Syntax //===----------------------------------------------------------------------===// // Background story on `TensorElementLiteral` and why it's necessary: @@ -485,94 +485,10 @@ public extension Tensor { } //===----------------------------------------------------------------------===// -// Shape transformations +// Shape Transformations //===----------------------------------------------------------------------===// -public extension TensorFlowScalar { - /// Convert to a tensor with the specified rank, with all dimensions equal to - /// 1. - @inlinable @inline(__always) - func makeTensor(rank: Int) -> Tensor { - return Tensor(repeating: self, shape: TensorShape(rank)) - } -} - public extension Tensor { - /// Reshape to the shape of the specified `Tensor`. - /// - Precondition: The number of scalars matches the new shape. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func reshaped(like other: Tensor) -> Tensor { - return reshaped(toShape: other.shapeTensor) - } - - /// Reshape to the specified shape. - /// - Precondition: The number of scalars matches the new shape. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func reshaped(to newShape: TensorShape) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - return reshaped(toShape: Tensor({newShape.dimensions.map(Int32.init)}())) - } - - /// Reshape to the specified `Tensor` representing a shape. - /// - Precondition: The number of scalars matches the new shape. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpReshaped(toShape:) - where Scalar : TensorFlowFloatingPoint - ) - func reshaped(toShape newShape: Tensor) -> Tensor { - return Raw.reshape(self, shape: newShape) - } - - /// Return a copy of the tensor collapsed into a 1-D `Tensor`, in row-major - /// order. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func flattened() -> Tensor { - return reshaped(to: [-1]) - } - - /// Returns a rank-lifted `Tensor` with a leading dimension of 1. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func rankLifted() -> Tensor { - return expandingShape(at: 0) - } - - /// Returns a shape-expanded `Tensor`, with a dimension of 1 inserted at the - /// specified shape index. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpExpandingShape(at:) - where Scalar : TensorFlowFloatingPoint - ) - func expandingShape(at shapeIndex: Int) -> Tensor { - return Raw.expandDims(self, dim: Tensor(Int32(shapeIndex))) - } - - /// Remove the specified dimensions of size 1 from the shape of a tensor. If - /// no dimensions are specified, then all dimensions of size 1 will be - /// removed. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func squeezingShape(at axes: Int...) -> Tensor { - return squeezingShape(at: axes) - } - - /// Remove the specified dimensions of size 1 from the shape of a tensor. If - /// no dimensions are specified, then all dimensions of size 1 will be - /// removed. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpSqueezingShape(at:) - where Scalar : TensorFlowFloatingPoint - ) - func squeezingShape(at axes: [Int]) -> Tensor { - return Raw.squeeze(self, squeezeDims: axes.map(Int32.init)) - } - /// Reshape to scalar. /// - Precondition: The tensor has exactly one scalar. @inlinable @@ -626,79 +542,7 @@ public extension TensorFlowScalar { } //===----------------------------------------------------------------------===// -// Equality -//===----------------------------------------------------------------------===// - -extension Tensor : Equatable where Scalar : Equatable { - @inlinable @inline(__always) - public static func == (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .== rhs).all() - } - - @inlinable @inline(__always) - public static func != (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .== rhs).any() - } -} - -//===----------------------------------------------------------------------===// -// Description and visualization -//===----------------------------------------------------------------------===// - -// String conversion. -extension Tensor : CustomStringConvertible { - /// A textual representation of the tensor. - /// - /// - Note: use `fullDescription` for a non-pretty-printed description showing - /// all scalars. - public var description: String { - return array.description - } -} - -public extension Tensor { - /// A textual representation of the tensor. Returns a summarized description - /// if `summarize` is true and the element count exceeds twice the - /// `edgeElementCount`. - /// - /// - Parameters: - /// - lineWidth: The max line width for printing. Used to determine number - /// of scalars to print per line. - /// - edgeElementCount: The maximum number of elements to print before and - /// after summarization via ellipses (`...`). - /// - summarizing: If true, summarize description if element count exceeds - /// twice `edgeElementCount`. - func description( - lineWidth: Int = 80, edgeElementCount: Int = 3, summarizing: Bool = false - ) -> String { - return array.description( - lineWidth: lineWidth, edgeElementCount: edgeElementCount, - summarizing: summarizing) - } - - /// A full, non-pretty-printed textual representation of the tensor, showing - /// all scalars. - var fullDescription: String { - return array.fullDescription - } -} - -// Xcode Playground display conversion. -extension Tensor : CustomPlaygroundDisplayConvertible { - public var playgroundDescription: Any { - return description - } -} - -// Mirror representation, used by debugger/REPL. -extension Tensor : CustomReflectable { - public var customMirror: Mirror { - return Mirror(self, children: [], displayStyle: .struct) - } -} - -//===----------------------------------------------------------------------===// -// Array conversion +// Array Conversion //===----------------------------------------------------------------------===// public extension Tensor { @@ -720,22 +564,3 @@ public extension Tensor { return array.scalars } } - -//===----------------------------------------------------------------------===// -// Codable conformance -//===----------------------------------------------------------------------===// - -extension Tensor : Codable where Scalar : Codable { - @inlinable - public func encode(to encoder: Encoder) throws { - var container = encoder.singleValueContainer() - try container.encode(array) - } - - @inlinable - public init(from decoder: Decoder) throws { - let container = try decoder.singleValueContainer() - let array = try container.decode(ShapedArray.self) - self.init(array) - } -} From da184a8dda15982615553ba63f3d6cefae7e5912 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 12:57:16 -0400 Subject: [PATCH 07/30] Moved some more stuff to swift-apis. --- stdlib/public/TensorFlow/Gradients.swift | 107 ----- stdlib/public/TensorFlow/Ops.swift | 511 ----------------------- stdlib/public/TensorFlow/Tensor.swift | 2 - 3 files changed, 620 deletions(-) diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index 55866b6de6a9a..96fc13891dc24 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -202,113 +202,6 @@ public func gradient( // Elementwise binary //===----------------------------------------------------------------------===// -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpAdd( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs + rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - return (v.unbroadcast(toShape: lhsShape), v.unbroadcast(toShape: rhsShape)) - }) - } - - @inlinable - static func _vjpSubtract( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs - rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - return (v.unbroadcast(toShape: lhsShape), - -v.unbroadcast(toShape: rhsShape)) - }) - } - - @inlinable - static func _vjpMultiply( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs * rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - ((rhs * v).unbroadcast(toShape: lhsShape), - (lhs * v).unbroadcast(toShape: rhsShape)) - }) - } - - @inlinable - static func _vjpDivide( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (lhs / rhs, { - [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in - ((v / rhs).unbroadcast(toShape: lhsShape), - ((-lhs) / rhs.squared() * v).unbroadcast(toShape: rhsShape)) - }) - } -} - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpAdd( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs + rhs, { v in (v, v.sum().scalarized()) }) - } - - @inlinable - static func _vjpAdd( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs + rhs, { v in (v.sum().scalarized(), v) }) - } - - @inlinable - static func _vjpSubtract( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs - rhs, { v in (v, 0 - v.sum().scalarized()) }) - } - - @inlinable - static func _vjpSubtract( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs - rhs, { v in (v.sum().scalarized(), 0 - v) }) - } - - @inlinable - static func _vjpMultiply( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs * rhs, { v in (v * rhs, (v * lhs).sum().scalarized()) }) - } - - @inlinable - static func _vjpMultiply( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs * rhs, { v in ((v * rhs).sum().scalarized(), v * lhs) }) - } - - @inlinable - static func _vjpDivide( - lhs: Tensor, rhs: Scalar - ) -> (Tensor, (Tensor) -> (Tensor, Scalar)) { - return (lhs / rhs, { v in - (v / rhs, (v * (0 - lhs) / Tensor(rhs).squared()).sum().scalarized()) - }) - } - - @inlinable - static func _vjpDivide( - lhs: Scalar, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Scalar, Tensor)) { - return (lhs / rhs, { v in - ((v / rhs).sum().scalarized(), v * -lhs / rhs.squared()) - }) - } -} - @inlinable func _vjpMinMaxHelper( _ x: Tensor, _ y: Tensor, originalValue: Tensor, vector: Tensor diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 183c07c774a8b..08324a91b7afe 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -48,264 +48,6 @@ infix operator .== : ComparisonPrecedence infix operator .!= : ComparisonPrecedence infix operator .= -// TODO: -// - Consider explicit broadcasting for elementwise binary ops when -// scalarization and rank getter are implemented. - -//===----------------------------------------------------------------------===// -// Additive group -//===----------------------------------------------------------------------===// - -extension Tensor : AdditiveArithmetic where Scalar : Numeric { - /// A scalar zero tensor. - @inlinable - public static var zero: Tensor { - @inline(__always) - get { - return Tensor(zeros: []) - } - } - - /// Adds two tensors and produces their sum. - /// - Note: `+` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpAdd(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - public static func + (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.add(lhs, rhs) - } - - /// Subtracts one tensor from another and produces their difference. - /// - Note: `-` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpSubtract(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - public static func - (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.sub(lhs, rhs) - } -} - -//===----------------------------------------------------------------------===// -// Vector space -//===----------------------------------------------------------------------===// - -extension Tensor : VectorNumeric where Scalar : Numeric { - /// Multiplies the scalar with every scalar of the tensor and produces the - /// product. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMultiply(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - public static func * (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) * rhs - } -} - -extension Tensor : ShapedVectorNumeric where Scalar : Numeric {} - -extension Tensor : Differentiable where Scalar : TensorFlowFloatingPoint { - public typealias TangentVector = Tensor - public typealias CotangentVector = Tensor - public typealias AllDifferentiableVariables = Tensor - @inlinable @inline(__always) - public func tangentVector(from cotangent: CotangentVector) -> TangentVector { - return cotangent - } -} - -//===----------------------------------------------------------------------===// -// Additional element-wise operators -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Adds the scalar to every scalar of the tensor and produces the sum. - @inlinable @inline(__always) - @differentiable(vjp: _vjpAdd(lhs:rhs:) where Scalar : TensorFlowFloatingPoint) - static func + (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) + rhs - } - - /// Adds the scalar to every scalar of the tensor and produces the sum. - @inlinable @inline(__always) - @differentiable(vjp: _vjpAdd(lhs:rhs:) where Scalar : TensorFlowFloatingPoint) - static func + (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs + Tensor(rhs) - } - - /// Subtracts the scalar from every scalar of the tensor and produces the - /// difference. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpSubtract(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func - (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) - rhs - } - - /// Subtracts the scalar from every scalar of the tensor and produces the - /// difference. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpSubtract(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func - (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs - Tensor(rhs) - } - - /// Adds two tensors and stores the result in the left-hand-side variable. - /// - Note: `+=` supports broadcasting. - @inlinable @inline(__always) - static func += (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs + rhs - } - - /// Adds the scalar to every scalar of the tensor and stores the result in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func += (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs + rhs - } - - /// Subtracts the second tensor from the first and stores the result in the - /// left-hand-side variable. - /// - Note: `-=` supports broadcasting. - @inlinable @inline(__always) - static func -= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs - rhs - } - - /// Subtracts the scalar from every scalar of the tensor and stores the result - /// in the left-hand-side variable. - @inlinable @inline(__always) - static func -= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs - rhs - } - - /// Multiplies two tensors and produces their product. - /// - Note: `*` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMultiply(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func * (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.mul(lhs, rhs) - } - - /// Multiplies the scalar with every scalar of the tensor and produces the - /// product. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMultiply(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func * (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs * Tensor(rhs) - } - - /// Multiplies two tensors and stores the result in the left-hand-side - /// variable. - /// - Note: `*=` supports broadcasting. - @inlinable @inline(__always) - static func *= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs * rhs - } - - @inlinable @inline(__always) - static func *= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs * rhs - } - - /// Returns the quotient of dividing the first tensor by the second. - /// - Note: `/` supports broadcasting. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpDivide(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func / (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.div(lhs, rhs) - } - - /// Returns the quotient of dividing the scalar by the tensor, broadcasting - /// the scalar. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpDivide(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func / (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) / rhs - } - - /// Returns the quotient of dividing the tensor by the scalar, broadcasting - /// the scalar. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpDivide(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func / (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs / Tensor(rhs) - } - - /// Divides the first tensor by the second and stores the quotient in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func /= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs / rhs - } - - /// Divides the tensor by the scalar, broadcasting the scalar, and stores the - /// quotient in the left-hand-side variable. - @inlinable @inline(__always) - static func /= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs / rhs - } - - /// Returns the remainder of dividing the first tensor by the second. - /// - Note: `%` supports broadcasting. - @inlinable @inline(__always) - static func % (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.mod(lhs, rhs) - } - - /// Returns the remainder of dividing the tensor by the scalar, broadcasting - /// the scalar. - @inlinable @inline(__always) - static func % (lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs % Tensor(rhs) - } - - /// Returns the remainder of dividing the scalar by the tensor, broadcasting - /// the scalar. - @inlinable @inline(__always) - static func % (lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) % rhs - } - - /// Divides the first tensor by the second and stores the remainder in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func %= (lhs: inout Tensor, rhs: Tensor) { - lhs = lhs % rhs - } - - /// Divides the tensor by the scalar and stores the remainder in the - /// left-hand-side variable. - @inlinable @inline(__always) - static func %= (lhs: inout Tensor, rhs: Scalar) { - lhs = lhs % rhs - } -} - //===----------------------------------------------------------------------===// // Linear algebra //===----------------------------------------------------------------------===// @@ -345,259 +87,6 @@ public extension Tensor where Scalar : Numeric { } } -//===----------------------------------------------------------------------===// -// Element-wise binary comparison -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric & Comparable { - /// Computes `lhs < rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .< (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.less(lhs, rhs) - } - - /// Computes `lhs <= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .<= (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.lessEqual(lhs, rhs) - } - - /// Computes `lhs > rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .> (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.greater(lhs, rhs) - } - - /// Computes `lhs >= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - @inlinable @inline(__always) - static func .>= (lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.greaterEqual(lhs, rhs) - } - - /// Computes `lhs < rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<` supports broadcasting. - @inlinable @inline(__always) - static func .< (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.less(Tensor(lhs), rhs) - } - - /// Computes `lhs <= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<=` supports broadcasting. - @inlinable @inline(__always) - static func .<= (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.lessEqual(Tensor(lhs), rhs) - } - - /// Computes `lhs > rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>` supports broadcasting. - @inlinable @inline(__always) - static func .> (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.greater(Tensor(lhs), rhs) - } - - /// Computes `lhs >= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>=` supports broadcasting. - @inlinable @inline(__always) - static func .>= (lhs: Scalar, rhs: Tensor) -> Tensor { - return Raw.greaterEqual(Tensor(lhs), rhs) - } - - /// Computes `lhs < rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<` supports broadcasting. - @inlinable @inline(__always) - static func .< (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.less(lhs, Tensor(rhs)) - } - - /// Computes `lhs <= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.<=` supports broadcasting. - @inlinable @inline(__always) - static func .<= (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.lessEqual(lhs, Tensor(rhs)) - } - - /// Computes `lhs > rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>` supports broadcasting. - @inlinable @inline(__always) - static func .> (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.greater(lhs, Tensor(rhs)) - } - - /// Computes `lhs >= rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.>=` supports broadcasting. - @inlinable @inline(__always) - static func .>= (lhs: Tensor, rhs: Scalar) -> Tensor { - return Raw.greaterEqual(lhs, Tensor(rhs)) - } -} - -extension Tensor : Comparable where Scalar : Numeric & Comparable { - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than that of the second argument. - @inlinable @inline(__always) - public static func < (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .< rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than or equal to that of the second argument. - @inlinable @inline(__always) - public static func <= (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .<= rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than that of the second argument. - @inlinable @inline(__always) - public static func > (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .> rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than or equal to that of the second argument. - @inlinable @inline(__always) - public static func >= (lhs: Tensor, rhs: Tensor) -> Bool { - return (lhs .>= rhs).all() - } -} - -public extension Tensor where Scalar : Numeric & Comparable { - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than that of the second argument. - @inlinable @inline(__always) - static func < (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .< rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically less than or equal to that of the second argument. - @inlinable @inline(__always) - static func <= (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .<= rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than that of the second argument. - @inlinable @inline(__always) - static func > (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .> rhs).all() - } - - /// Returns a Boolean value indicating whether the value of the first argument - /// is lexicographically greater than or equal to that of the second argument. - @inlinable @inline(__always) - static func >= (lhs: Tensor, rhs: Scalar) -> Bool { - return (lhs .>= rhs).all() - } -} - -public extension Tensor where Scalar : Equatable { - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.==` supports broadcasting. - @inlinable @inline(__always) - static func .==(lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.equal(lhs, rhs) - } - - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.!=` supports broadcasting. - @inlinable @inline(__always) - static func .!=(lhs: Tensor, rhs: Tensor) -> Tensor { - return Raw.notEqual(lhs, rhs) - } - - /// Computes `lhs == rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.==` supports broadcasting. - @inlinable @inline(__always) - static func .==(lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) .== rhs - } - - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.!=` supports broadcasting. - @inlinable @inline(__always) - static func .!=(lhs: Scalar, rhs: Tensor) -> Tensor { - return Tensor(lhs) .!= rhs - } - - /// Computes `lhs == rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.==` supports broadcasting. - @inlinable @inline(__always) - static func .==(lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs .== Tensor(rhs) - } - - /// Computes `lhs != rhs` element-wise and returns a `Tensor` of Boolean - /// scalars. - /// - Note: `.!=` supports broadcasting. - @inlinable @inline(__always) - static func .!=(lhs: Tensor, rhs: Scalar) -> Tensor { - return lhs .!= Tensor(rhs) - } -} - -infix operator ≈ : ComparisonPrecedence - -public extension Tensor where Scalar : FloatingPoint & Equatable { - /// Returns a `Tensor` of Boolean values indicating whether the elements of - /// `self` are approximately equal to those of `other`. - @inlinable @inline(__always) - func elementsApproximatelyEqual(_ other: Tensor, - tolerance: Double = 0.00001) -> Tensor { - return Raw.approximateEqual(self, other, tolerance: tolerance) - } -} - -public extension Tensor where Scalar == Bool { - /// Computes `!self` element-wise. - @inlinable @inline(__always) - func elementsLogicalNot() -> Tensor { - return Raw.logicalNot(self) - } - - /// Computes `self && other` element-wise. - /// - Note: `&&` supports broadcasting. - @inlinable @inline(__always) - func elementsLogicalAnd(_ other: Tensor) -> Tensor { - return Raw.logicalAnd(self, other) - } - - /// Computes `self && other` element-wise, broadcasting `other`. - @inlinable @inline(__always) - func elementsLogicalAnd(_ other: Scalar) -> Tensor { - return elementsLogicalAnd(Tensor(other)) - } - - /// Computes `self || other` element-wise. - @inlinable @inline(__always) - func elementsLogicalOr(_ other: Tensor) -> Tensor { - return Raw.logicalOr(self, other) - } - - /// Computes `self || other` element-wise, broadcasting `other`. - @inlinable @inline(__always) - func elementsLogicalOr(_ other: Scalar) -> Tensor { - return elementsLogicalOr(Tensor(other)) - } -} - //===----------------------------------------------------------------------===// // Transforms //===----------------------------------------------------------------------===// diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlow/Tensor.swift index badfeda117a83..d2a4cd064845d 100644 --- a/stdlib/public/TensorFlow/Tensor.swift +++ b/stdlib/public/TensorFlow/Tensor.swift @@ -23,8 +23,6 @@ import Glibc #endif import CTensorFlow -infix operator .== : ComparisonPrecedence - //===----------------------------------------------------------------------===// // Tensor //===----------------------------------------------------------------------===// From cde450e61f3638c39cce25bd7d2e5e5d77ac17f1 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 13:32:09 -0400 Subject: [PATCH 08/30] Moved some more stuff to swift-apis. --- stdlib/public/TensorFlow/Gradients.swift | 187 ----------- stdlib/public/TensorFlow/Ops.swift | 404 ----------------------- 2 files changed, 591 deletions(-) diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index 96fc13891dc24..a697533138071 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -198,164 +198,6 @@ public func gradient( return { x, y, z in gradient(at: x, y, z, in: f) } } -//===----------------------------------------------------------------------===// -// Elementwise binary -//===----------------------------------------------------------------------===// - -@inlinable -func _vjpMinMaxHelper( - _ x: Tensor, _ y: Tensor, originalValue: Tensor, vector: Tensor -) -> (Tensor, Tensor) { - let denom = 1 + Tensor(x .== y) - let dfdx = vector * Tensor(x .== originalValue) / denom - let dfdy = vector * Tensor(y .== originalValue) / denom - return (dfdx.unbroadcast(like: x), dfdy.unbroadcast(like: y)) -} - -@inlinable -func _vjpMax( - _ x: Tensor, _ y: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = max(x, y) - return (value, - { v in _vjpMinMaxHelper(x, y, originalValue: value, vector: v) }) -} - -@inlinable -func _vjpMin( - _ x: Tensor, _ y: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = min(x, y) - return (value, - { v in _vjpMinMaxHelper(x, y, originalValue: value, vector: v) }) -} - -@inlinable -func _vjpPow( - _ x: Tensor, _ y: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = pow(x, y) - return (value, { v in - ((v * y * pow(x, y-1)).unbroadcast(like: x), - (v * log(x) * value).unbroadcast(like: y)) - }) -} - -//===----------------------------------------------------------------------===// -// Elementwise unary -//===----------------------------------------------------------------------===// - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpNegate(_ x: Tensor) -> (Tensor, (Tensor) -> Tensor) { - return (-x, { v in -v }) - } -} - -@inlinable -func _vjpAbs( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let sign = Raw.sign(x) - return (abs(x), { v in v * sign }) -} - -@inlinable -func _vjpLog( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (log(x), { v in v / x }) -} - -@inlinable -func _vjpSin( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (sin(x), { v in v * cos(x) }) -} - -@inlinable -func _vjpCos( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (cos(x), { v in -v * sin(x) }) -} - -@inlinable -func _vjpTan( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = tan(x) - return (value, { v in v * (1 + value.squared()) }) -} - -@inlinable -func _vjpSinh( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (sinh(x), { v in v * cosh(x) }) -} - -@inlinable -func _vjpCosh( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (cosh(x), { v in v * sinh(x) }) -} - -@inlinable -func _vjpTanh( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = tanh(x) - return (value, { v in v * (1 - value.squared()) }) -} - -@inlinable -func _vjpExp( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = exp(x) - return (value, { v in value * v }) -} - -@inlinable -func _vjpCeil( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (ceil(x), { _ in Tensor(0).broadcast(like: x) }) -} - -@inlinable -func _vjpFloor( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - return (floor(x), { _ in Tensor(0).broadcast(like: x) }) -} - -@inlinable -func _vjpSqrt( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = sqrt(x) - return (value, { v in v / (2 * value) }) -} - -@inlinable -func _vjpRsqrt( - _ x: Tensor -) -> (Tensor, (Tensor) -> Tensor) { - let value = rsqrt(x) - return (value, { v in -v / 2 * value }) -} - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - func _vjpSquared() -> (Tensor, (Tensor) -> Tensor) { - return (squared(), { 2 * self * $0 }) - } -} - //===----------------------------------------------------------------------===// // Linear algebra //===----------------------------------------------------------------------===// @@ -380,35 +222,6 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { return _vjpMatmul(lhs, rhs) } - - @inlinable - func _vjpTransposed( - withPermutations permutations: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = transposed(withPermutations: permutations) - return (value, { $0.transposed(withPermutations: permutations) }) - } - - @inlinable - func _vjpTransposed( - withPermutations permutations: [Int] - ) -> (Tensor, (Tensor) -> Tensor) { - let value = transposed(withPermutations: permutations) - return (value, { $0.transposed(withPermutations: permutations) }) - } - - @inlinable - func _vjpTransposed( - withPermutations permutations: Int... - ) -> (Tensor, (Tensor) -> Tensor) { - let value = transposed(withPermutations: permutations) - return (value, { $0.transposed(withPermutations: permutations) }) - } - - @inlinable - func _vjpTransposed() -> (Tensor, (Tensor) -> Tensor) { - return (transposed(), { $0.transposed() }) - } } //===----------------------------------------------------------------------===// diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 08324a91b7afe..9827420244e27 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -87,410 +87,6 @@ public extension Tensor where Scalar : Numeric { } } -//===----------------------------------------------------------------------===// -// Transforms -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// Returns a transposed tensor, with dimensions permuted in the specified - /// order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed(withPermutations:) - where Scalar : TensorFlowFloatingPoint - ) - func transposed( - withPermutations permutations: Tensor - ) -> Tensor { - return Raw.transpose(self, perm: permutations) - } - - /// Returns a transposed tensor, with dimensions permuted in the specified - /// order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed(withPermutations:) - where Scalar : TensorFlowFloatingPoint - ) - func transposed(withPermutations permutations: [Int]) -> Tensor { - let permutations = permutations.map(Int32.init) - return transposed(withPermutations: Tensor(permutations)) - } - - /// Returns a transposed tensor, with dimensions permuted in the specified - /// order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed(withPermutations:) - where Scalar : TensorFlowFloatingPoint - ) - func transposed(withPermutations permutations: Int...) -> Tensor { - return transposed(withPermutations: permutations) - } - - /// Returns a transposed tensor, with dimensions permuted in reverse order. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpTransposed() - where Scalar : TensorFlowFloatingPoint - ) - func transposed() -> Tensor { - let defaultPermutations = rankTensor - 1 - Tensor( - rangeFrom: 0, to: Int32(rank), stride: 1 - ) - return transposed(withPermutations: Tensor(defaultPermutations)) - } -} - -public extension Tensor { - /// Concatenates tensors along the specified axis. - /// - Precondition: The tensors must have the same dimensions, except for the - /// specified axis. - /// - Precondition: The axis must be in the range `-rank.. Tensor { - return Raw.concatV2([self, other], axis: Tensor(Int32(axis))) - } - - /// Concatenation operator. - /// - Note: `++` is a custom operator that does not exist in Swift, but does - /// in Haskell/Scala. Its addition is not an insignificant language change - /// and may be controversial. The existence/naming of `++` will be discussed - /// during a later API design phase. - @inlinable @inline(__always) - @differentiable(where Scalar : TensorFlowFloatingPoint) - static func ++ (lhs: Tensor, rhs: Tensor) -> Tensor { - return lhs.concatenated(with: rhs) - } -} - -internal extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable @inline(__always) - func _vjpConcatenated(with other: Tensor, alongAxis axis: Int) - -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let idx = axis < 0 ? axis + rank : axis - let splits = Tensor([shapeTensor[idx], other.shapeTensor[idx]]) - return (concatenated(with: other, alongAxis: axis), { result in - let ret: (TensorHandle, TensorHandle) = #tfop("SplitV", - result, - splits, - Tensor(Int32(axis)), - num_split: Int64(2), - T$dtype: Scalar.tensorFlowDataType, - Tlen$dtype: Int32.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) - }) - } -} - -//===----------------------------------------------------------------------===// -// Element-wise math functions -//===----------------------------------------------------------------------===// - -// Export Glibc/Darwin math functions. We should not require users to import -// Foundation/Darwin/Glibc in order to use scalar math functions. -// -#if os(macOS) || os(iOS) || os(watchOS) || os(tvOS) -@_exported import Darwin.C -#else -@_exported import Glibc -#endif -// -// FIXME(rxwei): Scoped imports are not yet supported in parseable module -// interfaces, so `@_exported import` won't work. When that becomes supported, -// switch to `@_exported import` by removing `import Darwin.C/Glibc` above and -// uncommenting the following lines. In the meantime, consider using indirect -// wrappers for each function so that random libc symbols won't be leaked to -// users' code completion. -// -// #if os(macOS) || os(iOS) || os(watchOS) || os(tvOS) -// @_exported import func Darwin.C.sin -// @_exported import func Darwin.C.cos -// @_exported import func Darwin.C.tan -// @_exported import func Darwin.C.sinf -// @_exported import func Darwin.C.cosf -// @_exported import func Darwin.C.tanf -// @_exported import func Darwin.C.sinh -// @_exported import func Darwin.C.cosh -// @_exported import func Darwin.C.tanh -// @_exported import func Darwin.C.sinhf -// @_exported import func Darwin.C.coshf -// @_exported import func Darwin.C.tanhf -// @_exported import func Darwin.C.log -// @_exported import func Darwin.C.logf -// @_exported import func Darwin.C.exp -// @_exported import func Darwin.C.expf -// @_exported import func Darwin.C.pow -// @_exported import func Darwin.C.powf -// #else -// @_exported import func Glibc.sin -// @_exported import func Glibc.cos -// @_exported import func Glibc.tan -// @_exported import func Glibc.sinf -// @_exported import func Glibc.cosf -// @_exported import func Glibc.tanf -// @_exported import func Glibc.sinh -// @_exported import func Glibc.cosh -// @_exported import func Glibc.tanh -// @_exported import func Glibc.sinhf -// @_exported import func Glibc.coshf -// @_exported import func Glibc.tanhf -// @_exported import func Glibc.log -// @_exported import func Glibc.logf -// @_exported import func Glibc.exp -// @_exported import func Glibc.expf -// @_exported import func Glibc.pow -// @_exported import func Glibc.powf -// #endif - -public extension Tensor where Scalar : SignedNumeric { - /// Computes the negation of the specified tensor element-wise. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpNegate(_:) - where Scalar : TensorFlowFloatingPoint - ) - static prefix func - (rhs: Tensor) -> Tensor { - return Raw.neg(rhs) - } -} - -/// Computes the absolute value of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpAbs(_:) where T : TensorFlowFloatingPoint) -public func abs(_ x: Tensor) -> Tensor { - return Raw.abs(x) -} - -/// Computes the natural logarithm of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpLog(_:) where T : TensorFlowFloatingPoint) -public func log(_ x: Tensor) -> Tensor { - return Raw.log(x) -} - -/// Computes `sin` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSin(_:) where T : TensorFlowFloatingPoint) -public func sin(_ x: Tensor) -> Tensor { - return Raw.sin(x) -} - -/// Computes `cos` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpCos(_:) where T : TensorFlowFloatingPoint) -public func cos(_ x: Tensor) -> Tensor { - return Raw.cos(x) -} - -/// Computes `tan` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpTan(_:) where T : TensorFlowFloatingPoint) -public func tan(_ x: Tensor) -> Tensor { - return Raw.tan(x) -} - -/// Computes `sinh` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSinh(_:) where T : TensorFlowFloatingPoint) -public func sinh(_ x: Tensor) -> Tensor { - return Raw.sinh(x) -} - -/// Computes `cosh` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpCosh(_:) where T : TensorFlowFloatingPoint) -public func cosh(_ x: Tensor) -> Tensor { - return Raw.cosh(x) -} - -/// Computes `tanh` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpTanh(_:) where T : TensorFlowFloatingPoint) -public func tanh(_ x: Tensor) -> Tensor { - return Raw.tanh(x) -} - -/// Computes the square root of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpSqrt(_:) where T : TensorFlowFloatingPoint) -public func sqrt(_ x: Tensor) -> Tensor { - return Raw.sqrt(x) -} - -/// Computes the inverse square root of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpRsqrt(_:) where T : TensorFlowFloatingPoint) -public func rsqrt(_ x: Tensor) -> Tensor { - return Raw.rsqrt(x) -} - -/// Computes `exp` of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpExp(_:) where T : TensorFlowFloatingPoint) -public func exp(_ x: Tensor) -> Tensor { - return Raw.exp(x) -} - -/// Computes the ceiling of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpCeil(_:) where T : TensorFlowFloatingPoint) -public func ceil(_ x: Tensor) -> Tensor { - return Raw.ceil(x) -} - -/// Computes the floor of the specified tensor element-wise. -@inlinable @inline(__always) -@differentiable(vjp: _vjpFloor(_:) where T : TensorFlowFloatingPoint) -public func floor(_ x: Tensor) -> Tensor { - return Raw.floor(x) -} - -/// Computes the power of the first tensor to the second tensor. -@inlinable @inline(__always) -@differentiable(vjp: _vjpPow(_:_:) where T : TensorFlowFloatingPoint) -public func pow(_ lhs: Tensor, _ rhs: Tensor) -> Tensor - where T : FloatingPoint { - return Raw.pow(lhs, rhs) -} - -/// Computes the power of the scalar to the tensor, broadcasting the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func pow(_ lhs: T, _ rhs: Tensor) -> Tensor - where T : FloatingPoint { - return pow(Tensor(lhs), rhs) -} - -/// Computes the power of the tensor to the scalar, broadcasting the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func pow(_ lhs: Tensor, _ rhs: T) -> Tensor - where T : FloatingPoint { - return pow(lhs, Tensor(rhs)) -} - -/// Computes the element-wise maximum of two tensors. -/// - Note: `max` supports broadcasting. -@inlinable @inline(__always) -@differentiable(vjp: _vjpMax(_:_:) where T : TensorFlowFloatingPoint) -public func max(_ lhs: Tensor, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return Raw.maximum(lhs, rhs) -} - -/// Computes the element-wise maximum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -//@differentiable(where T : TensorFlowFloatingPoint) -public func max(_ lhs: T, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return max(Tensor(lhs), rhs) -} - -/// Computes the element-wise maximum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func max(_ lhs: Tensor, _ rhs: T) -> Tensor - where T : Numeric & Comparable { - return max(lhs, Tensor(rhs)) -} - -/// Computes the element-wise minimum of two tensors. -/// - Note: `min` supports broadcasting. -@inlinable @inline(__always) -@differentiable(vjp: _vjpMin(_:_:) where T : TensorFlowFloatingPoint) -public func min(_ lhs: Tensor, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return Raw.minimum(lhs, rhs) -} - -/// Computes the element-wise minimum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func min(_ lhs: T, _ rhs: Tensor) -> Tensor - where T : Numeric & Comparable { - return min(Tensor(lhs), rhs) -} - -/// Computes the element-wise minimum of the scalar and the tensor, broadcasting -/// the scalar. -@inlinable @inline(__always) -// @differentiable(where T : TensorFlowFloatingPoint) -public func min(_ lhs: Tensor, _ rhs: T) -> Tensor - where T : Numeric & Comparable { - return min(lhs, Tensor(rhs)) -} - -/// Computes the square of the tensor. -public extension Tensor where Scalar : Numeric { - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpSquared() - where Scalar : TensorFlowFloatingPoint - ) - func squared() -> Tensor { - return Raw.square(self) - } -} - -//===----------------------------------------------------------------------===// -// Selection -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar == Bool { - /// Returns a new tensor containing elements from either `left` or `right`, - /// depending on the elements of `self`. - /// - /// `self` acts as a mask that chooses, based on the value at each scalar, - /// whether the corresponding scalar in the output should be taken from - /// `left` (if `true`) or `right` (if `false`). - /// - /// - Precondition: `left` and `right` must have the same shape. If - /// `left` and `right` are scalar, then `self` must also be scalar. If - /// `left` and `right` have rank greater than or equal to 1, then `self` - /// must be either have the same shape as `left` or be a 1-D `Tensor` such - /// that `self.scalarCount == left[0]`. - @available(*, deprecated, message: "Use '.replacing(with:mask:)' instead") - @inlinable - func selecting(_ left: Tensor, _ right: Tensor) -> Tensor { - return left.replacing(with: right, where: self) - } -} - -public extension Tensor { - /// Replaces elements of this tensor with `other` in the lanes where `mask` is - /// `true`. - /// - /// - Precondition: `self` and `other` must have the same shape. If - /// `self` and `other` are scalar, then `mask` must also be scalar. If - /// `self` and `other` have rank greater than or equal to `1`, then `mask` - /// must be either have the same shape as `self` or be a 1-D `Tensor` such - /// that `mask.scalarCount == self.shape[0]`. - @inlinable - @differentiable(wrt: (self, other), - vjp: _vjpReplacing where Scalar : TensorFlowFloatingPoint) - func replacing(with other: Tensor, - where mask: Tensor) -> Tensor { - return Raw.select(condition: mask, t: self, e: other) - } -} - -public extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - internal func _vjpReplacing(with other: Tensor, where mask: Tensor) - -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return (replacing(with: other, where: mask), { v in - let zeros = Tensor(zeros: v.shape) - return (v.replacing(with: zeros, where: mask), - zeros.replacing(with: v, where: mask)) - }) - } -} - //===----------------------------------------------------------------------===// // Reduction //===----------------------------------------------------------------------===// From a878600ebb9ee3bb60efe0436c15cba6a87c3a66 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 14:11:43 -0400 Subject: [PATCH 09/30] Removed the now-redundant 'Ops.swift' file. --- stdlib/public/TensorFlow/CMakeLists.txt | 1 - stdlib/public/TensorFlow/Gradients.swift | 84 -- stdlib/public/TensorFlow/Ops.swift | 1073 ---------------------- 3 files changed, 1158 deletions(-) delete mode 100644 stdlib/public/TensorFlow/Ops.swift diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index 161e3ad4e183a..0b2d2b77fc794 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -37,7 +37,6 @@ set(SOURCES DataTypes.swift Execution.swift Gradients.swift - Ops.swift ShapedArray.swift StringOps.swift StringTensor.swift diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index a697533138071..7ae57f46402bf 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -28,17 +28,8 @@ // automatic differentiation pass identifies these VJPs and chains them // together to produce arbitrary differentiable programs. // -// NOTE: -// - Currently, we do not want to expose VJP functions to users. The name of -// each VJP function should start with an underscore. -// -// TODO: -// - Fix VJPs for broadcasting ops (need to perform reduction). -// //===----------------------------------------------------------------------===// -infix operator .== : ComparisonPrecedence - //===----------------------------------------------------------------------===// // Method-style differential operators //===----------------------------------------------------------------------===// @@ -197,78 +188,3 @@ public func gradient( R : TensorFlowFloatingPoint { return { x, y, z in gradient(at: x, y, z, in: f) } } - -//===----------------------------------------------------------------------===// -// Linear algebra -//===----------------------------------------------------------------------===// - -@inlinable -func _vjpMatmul( - _ lhs: Tensor, _ rhs: Tensor -) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - let value = matmul(lhs, rhs) - return (value, { v in - return (matmul(v, rhs.transposed()), matmul(lhs.transposed(), v)) - }) -} - -// TODO: We have to define a custom VJP on • because AD can't yet -// differentiate generic methods. After AD can differentiate generic methods, -// remove the custom VJP. -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - static func _vjpMatmulOperator( - lhs: Tensor, rhs: Tensor - ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { - return _vjpMatmul(lhs, rhs) - } -} - -//===----------------------------------------------------------------------===// -// Reduction -//===----------------------------------------------------------------------===// - -extension Tensor where Scalar : TensorFlowFloatingPoint { - @inlinable - func _vjpSum(alongAxes axes: Tensor) -> (Tensor, (Tensor) -> Tensor) { - let value = sum(alongAxes: axes) - return (value, { [shape = shapeTensor] in $0.broadcast(toShape: shape) }) - } - - @inlinable - func _vjpSum( - squeezingAxes axes: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = sum(squeezingAxes: axes) - return (value, { [shape = shapeTensor] in $0.broadcast(toShape: shape) }) - } - - @inlinable - func _vjpMean(alongAxes axes: Tensor) -> (Tensor, (Tensor) -> Tensor) { - let value = mean(alongAxes: axes) - let count = Raw.gather(params: shapeTensor, indices: axes).product() - return (value, { [shape = shapeTensor] in - $0.broadcast(toShape: shape) / Tensor(count) - }) - } - - @inlinable - func _vjpMean(squeezingAxes axes: [Int]) -> (Tensor, (Tensor) -> Tensor) { - let value = mean(squeezingAxes: axes) - return (value, { [shape = shapeTensor, - count = axes.map { shape[$0] }.reduce(1, *)] in - $0.broadcast(toShape: shape) / Tensor(Scalar(count)) - }) - } - - @inlinable - func _vjpMean( - squeezingAxes axes: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = mean(squeezingAxes: axes) - let count = Raw.gather(params: shapeTensor, indices: axes).product() - return (value, { [shape = shapeTensor] in - $0.broadcast(toShape: shape) / Tensor(count) - }) - } -} diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift deleted file mode 100644 index 9827420244e27..0000000000000 --- a/stdlib/public/TensorFlow/Ops.swift +++ /dev/null @@ -1,1073 +0,0 @@ -//===-- Ops.swift ------------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains definitions of most tensor operations. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Ops and Convenience Methods -//===----------------------------------------------------------------------===// -// -// The majority of the Tensor API is implemented in terms of 'ops' that are -// partitioned out to the TensorFlow graph when the compiler runs. These -// ops are intentially designed to reflect TensorFlow ops, but provide nicer -// Swift syntax for accessing them. In addition to the core ops themselves, -// we also define some helper function wrappers, e.g. to make things symmetric -// and generally feel nice to use. -// -// The ops themselves are defined by the primitive #tfop(...) syntax, here are -// some examples: -// result = #tfop("Add", lhs, rhs) -// result = #tfop("Const", dtype: Float.self, value$tensor: 4.0) -// -// The first parameter to this syntax is the TensorFlow op name as a string. -// After that, the inputs are specified, and then attributes are specified -// with their name as the keyword argument. -// -// Inputs and outputs must be of TensorHandle, ResourceHandle, or VariantHandle -// type. These are magic types known to the compiler. -// - -infix operator ++ : AdditionPrecedence - -infix operator .< : ComparisonPrecedence -infix operator .<= : ComparisonPrecedence -infix operator .>= : ComparisonPrecedence -infix operator .> : ComparisonPrecedence -infix operator .== : ComparisonPrecedence -infix operator .!= : ComparisonPrecedence -infix operator .= - -//===----------------------------------------------------------------------===// -// Linear algebra -//===----------------------------------------------------------------------===// - -/// Performs matrix multiplication with another tensor and produces the -/// result. -@inlinable @inline(__always) -@differentiable( - vjp: _vjpMatmul(_:_:) - where Scalar : TensorFlowFloatingPoint -) -public func matmul( - _ lhs: Tensor, _ rhs: Tensor -) -> Tensor { - // Default arguments specified explicitly to avoid "external declarations of - // SILFunctions with shared visibility is not allowed" SILVerifier error in - // "tests/AutoDiff/tensor_autodiff_runtime.swift". - return Raw.matMul(lhs, rhs, transposeA: false, transposeB: false) -} - -infix operator • : MultiplicationPrecedence - -public extension Tensor where Scalar : Numeric { - // TODO: We have to define a custom VJP on • because AD can't yet - // differentiate generic methods. After AD can differentiate generic methods, - // remove the custom VJP. - - /// Performs matrix multiplication between two tensors and produces the - /// result. - @inlinable @inline(__always) - @differentiable( - vjp: _vjpMatmulOperator(lhs:rhs:) - where Scalar : TensorFlowFloatingPoint - ) - static func • (lhs: Tensor, rhs: Tensor) -> Tensor { - return matmul(lhs, rhs) - } -} - -//===----------------------------------------------------------------------===// -// Reduction -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar == Bool { - /// Returns `true` if all scalars are equal to `true`. Otherwise, returns - /// `false`. - // NOTE: This overload is necessary, otherwise `all()` would refer - // to the variadic method `all(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func all() -> Bool { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return _TFGetScalarOrDie(Raw.all(self, reductionIndices: axes).handle) - } - - /// Returns `true` if any scalars are equal to `true`. Otherwise, returns - /// `false`. - // NOTE: This overload is necessary, otherwise `any()` would refer - // to the variadic method `any(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func any() -> Bool { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return _TFGetScalarOrDie(Raw.any(self, reductionIndices: axes).handle) - } - - /// Performs a logical AND operation along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.all(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Performs a logical AND operation along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.any(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Performs a logical AND operation along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.all(self, reductionIndices: Tensor(axes), keepDims: true) - } - - /// Performs a logical OR operation along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.any(self, reductionIndices: Tensor(axes), keepDims: true) - } -} - -public extension Tensor where Scalar : Numeric & Comparable { - // NOTE: This overload is necessary, otherwise `min()` would refer - // to the variadic method `min(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func min() -> Tensor { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return Raw.min(self, reductionIndices: axes) - } - - // NOTE: This overload is necessary, otherwise `max()` would refer - // to the variadic method `max(squeezingAxes:)` with zero indices. - @inlinable @inline(__always) - func max() -> Tensor { - let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) - return Raw.max(self, reductionIndices: axes) - } - - /// Returns the maximum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.max(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Returns the maximum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return max(squeezingAxes: axes) - } - - /// Returns the minimum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.min(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Returns the minimum values along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return min(squeezingAxes: axes) - } - - /// Returns the indices of the maximum values along the specified axes. The - /// reduced dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return Raw.argMax(self, dimension: Tensor(Int32(axis))) - } - - /// Returns the indices of the minimum values along the specified axes. The - /// reduced dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return Raw.argMin(self, dimension: Tensor(Int32(axis))) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.min(self, reductionIndices: Tensor(axes), keepDims: true) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return min(alongAxes: axes) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - let axes = axes.map(Int32.init) - return Raw.max(self, reductionIndices: Tensor(axes), keepDims: true) - } - - /// Returns the minimum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return max(alongAxes: axes) - } - - /// Returns the index of the maximum value of the flattened scalars. - @inlinable @inline(__always) - func argmax() -> Tensor { - return flattened().argmax(squeezingAxis: 0) - } - - /// Returns the index of the minimum value of the flattened scalars. - @inlinable @inline(__always) - func argmin() -> Tensor { - return flattened().argmin(squeezingAxis: 0) - } -} - -// MARK: - Numeric reduction - -public extension Tensor where Scalar : Numeric { - // MARK: - Sum - - /// Returns the sum along the specified axes. The reduced dimensions are - /// removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpSum(squeezingAxes:) - where Scalar : TensorFlowFloatingPoint - ) - func sum(squeezingAxes axes: Tensor) -> Tensor { - return Raw.sum(self, reductionIndices: Tensor(axes), keepDims: false) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum(squeezingAxes axes: [Int]) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return sum(squeezingAxes: Tensor(axes)) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum(squeezingAxes axes: Int...) -> Tensor { - return sum(squeezingAxes: axes) - } - - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum() -> Tensor { - return flattened().sum(squeezingAxes: 0) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return Raw.sum(self, reductionIndices: axes, keepDims: true) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return sum(alongAxes: Tensor(axes)) - } - - /// Returns the sum along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return sum(alongAxes: axes) - } - - // MARK: - Product - - /// Returns the product along the specified axes. The reduced dimensions are - /// removed. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - // TODO: Make this @differentiable. - @inlinable @inline(__always) - func product(squeezingAxes axes: Tensor) -> Tensor { - return Raw.prod(self, reductionIndices: axes, keepDims: false) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// removed. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - func product(squeezingAxes axes: [Int]) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return product(squeezingAxes: Tensor(axes)) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// removed. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - func product(squeezingAxes axes: Int...) -> Tensor { - return product(squeezingAxes: axes) - } - - @inlinable @inline(__always) - func product() -> Tensor { - return flattened().product(squeezingAxes: 0) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return Raw.prod(self, reductionIndices: axes, keepDims: true) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return product(alongAxes: Tensor(axes)) - } - - /// Returns the product along the specified axes. The reduced dimensions are - /// retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return product(alongAxes: axes) - } - - // MARK: - Mean - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable( - wrt: self, vjp: _vjpMean(squeezingAxes:) - where Scalar : TensorFlowFloatingPoint - ) - func mean(squeezingAxes axes: Tensor) -> Tensor { - return Raw.mean(self, reductionIndices: axes, keepDims: false) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean(squeezingAxes axes: [Int]) -> Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return mean(squeezingAxes: Tensor(axes)) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are removed. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean(squeezingAxes axes: Int...) -> Tensor { - return mean(squeezingAxes: axes) - } - - @inlinable @inline(__always) - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean() -> Tensor { - return flattened().mean(squeezingAxes: [0]) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return Raw.mean(self, reductionIndices: axes, keepDims: true) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return mean(alongAxes: Tensor(axes)) - } - - /// Returns the arithmetic mean along the specified axes. The reduced - /// dimensions are retained with value 1. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return mean(alongAxes: axes) - } - - // MARK: - Variance - - /// Returns the variance along the specified axes. The reduced dimensions are - /// removed. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - let squaredDiff = (self - mean(alongAxes: axes)).squared() - return squaredDiff.mean(squeezingAxes: axes) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// removed. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return variance(squeezingAxes: Tensor(axes)) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return variance(squeezingAxes: axes) - } - - @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - @inlinable @inline(__always) - func variance() -> Tensor { - let mean = self.mean() - let squaredDiff = (self - mean).squared() - return squaredDiff.mean() - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - let squaredDiff = (self - mean(alongAxes: axes)).squared() - return squaredDiff.mean(alongAxes: axes) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return variance(alongAxes: Tensor(axes)) - } - - /// Returns the variance along the specified axes. The reduced dimensions are - /// retained with value 1. Does not apply Bessel's correction. - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return variance(alongAxes: axes) - } -} - -// TODO: Consider making the return type be generic over `FloatingPoint` types -// so that `self`'s scalar type can be any `Numeric` type. -public extension Tensor where Scalar : TensorFlowFloatingPoint { - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank..) -> Tensor { - return sqrt(variance(squeezingAxes: axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return sqrt(variance(squeezingAxes: axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return standardDeviation(squeezingAxes: axes) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // Reduce along all dimensions. - return standardDeviation(squeezingAxes: Array(0..) -> Tensor { - return sqrt(variance(alongAxes: axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - // TODO(TF-433): Remove workaround for differentiating `map`. - let axes = {axes.map(Int32.init)}() - return standardDeviation(alongAxes: Tensor(axes)) - } - - /// Returns the standard deviation of the elements along the specified axes. - /// The reduced dimensions are retained with value `1`. Does not apply - /// Bessel's correction. - /// - /// - Parameter axes: The dimensions to reduce. - /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return sqrt(variance(alongAxes: axes)) - } -} - -//===----------------------------------------------------------------------===// -// Tensor properties -//===----------------------------------------------------------------------===// - -public extension Tensor { - /// The rank of the tensor, represented as a `Tensor`. - @inlinable - var rankTensor: Tensor { - @inline(__always) - get { - return Raw.rank(self) - } - } - - /// The dimensions of the tensor, represented as a `Tensor`. - @inlinable - var shapeTensor: Tensor { - @inline(__always) - get { - return Raw.shape(self) - } - } - - /// The number of scalars in the tensor, represented as a `Tensor`. - @inlinable - var scalarCountTensor: Tensor { - @inline(__always) - get { - return Raw.size(self) - } - } -} - -//===----------------------------------------------------------------------===// -// Broadcasting -//===----------------------------------------------------------------------===// - -public extension Tensor { - @inlinable @inline(__always) - func broadcast(toShape shape: Tensor) -> Tensor { - return Raw.broadcastTo(self, shape: shape) - } - - @inlinable @inline(__always) - func broadcast(to shape: TensorShape) -> Tensor { - return broadcast(toShape: Tensor(shape.dimensions.map(Int32.init))) - } - - /// Broadcast to the same shape as the specified `Tensor`. - /// - Precondition: The specified shape must be compatible for broadcasting. - @inlinable @inline(__always) - func broadcast(like other: Tensor) -> Tensor { - return broadcast(toShape: other.shapeTensor) - } -} - -public extension Tensor where Scalar : Numeric { - @inlinable - func unbroadcast(toShape otherShape: Tensor) -> Tensor { - let rankDiff = (rankTensor - otherShape.scalarCountTensor).rankLifted() - let ones: Tensor = Raw.fill(dims: rankDiff, value: Tensor(1)) - let paddedShape = ones ++ otherShape - let nonEqualIndices = paddedShape .!= shapeTensor - let broadcastIndices = Raw.where_(nonEqualIndices).flattened() - let unbroadcasted: Tensor = Raw.sum( - self, reductionIndices: Tensor(broadcastIndices), keepDims: false) - return Raw.reshape(unbroadcasted, shape: otherShape) - } - - @inlinable @inline(__always) - func unbroadcast(like other: Tensor) -> Tensor { - return unbroadcast(toShape: other.shapeTensor) - } - - @inlinable @inline(__always) - func unbroadcast(to shape: TensorShape) -> Tensor { - return unbroadcast(toShape: Tensor(shape.dimensions.map(Int32.init))) - } - - @inlinable @inline(__always) - static func .= (lhs: inout Tensor, rhs: Tensor) { - lhs = rhs.broadcast(like: lhs) - } -} - -//===----------------------------------------------------------------------===// -// Padding -//===----------------------------------------------------------------------===// - -public extension Tensor where Scalar : Numeric { - /// Returns a padded tensor according to the specified padding sizes. - @inlinable - func padded( - forSizes sizes: [(before: Int, after: Int)], - with value: Scalar = 0 - ) -> Tensor { - let paddings = Tensor( - shape: [sizes.count, 2], - scalars: sizes.flatMap { [Int32($0.before), Int32($0.after)] } - ) - return Raw.padV2(self, paddings: paddings, constantValues: Tensor(value)) - } -} - -//===----------------------------------------------------------------------===// -// Indexing and slicing -//===----------------------------------------------------------------------===// - -// TODO: Negative indexing and strides syntax. - -public extension Tensor { - /// Extracts a slice from the tensor defined by lower and upper bounds for - /// each dimension. - /// - /// - Parameter lowerBounds: The lower bounds at each dimension. - /// - Parameter upperBounds: The upper bounds at each dimension. - @inlinable - @differentiable(wrt: self) - func slice(lowerBounds: [Int], upperBounds: [Int]) -> Tensor { - // TODO: Precondition `lowerBounds.count == upperBounds.count`, - // preferably in graph. - // TODO: Differentiating control flow is not supported yet, thus the thunks. - let lowerBoundsTensor = Tensor({lowerBounds.map(Int32.init)}()) - let upperBoundsTensor = Tensor({upperBounds.map(Int32.init)}()) - return slice( - lowerBounds: lowerBoundsTensor, - sizes: upperBoundsTensor - lowerBoundsTensor) - } - - @inlinable - @differentiable(wrt: self, vjp: _vjpSlice) - func slice(lowerBounds: Tensor, sizes: Tensor) -> Tensor { - return Raw.slice(self, begin: lowerBounds, size: sizes) - } - - @inlinable - internal func _vjpSlice( - lowerBounds: Tensor, - sizes: Tensor - ) -> (Tensor, (Tensor) -> Tensor) { - let value = slice(lowerBounds: lowerBounds, sizes: sizes) - let afterPaddings = shapeTensor - value.shapeTensor - lowerBounds - return (value, { [after = afterPaddings] v in - let beforePaddings = lowerBounds.expandingShape(at: 1) - let afterPaddings = after.expandingShape(at: 1) - let paddings = Tensor( - concatenating: [beforePaddings, afterPaddings], alongAxis: 1) - return Raw.pad(v, paddings: paddings) - }) - } -} - -public enum TensorRange : TensorRangeExpression { - case ellipsis - case newAxis - case squeezeAxis - case index(Int) - case range(Range, stride: Int) - case closedRange(ClosedRange, stride: Int) - case partialRangeFrom(PartialRangeFrom, stride: Int) - case partialRangeUpTo(PartialRangeUpTo, stride: Int) - case partialRangeThrough(PartialRangeThrough, stride: Int) - - public var tensorRange: TensorRange { return self } -} - -extension TensorRange : Equatable { - public static func == (lhs: TensorRange, rhs: TensorRange) -> Bool { - switch (lhs, rhs) { - case (.ellipsis, .ellipsis), - (.newAxis, .newAxis), - (.squeezeAxis, .squeezeAxis): - return true - case (let .index(i1), let .index(i2)): return i1 == i2 - case (let .range(r1, s1), let .range(r2, s2)): return r1 == r2 && s1 == s2 - case (let .closedRange(r1, s1), let .closedRange(r2, s2)): - return r1 == r2 && s1 == s2 - case (let .partialRangeFrom(r1, s1), let .partialRangeFrom(r2, s2)): - return r1.lowerBound == r2.lowerBound && s1 == s2 - case (let .partialRangeUpTo(r1, s1), let .partialRangeUpTo(r2, s2)): - return r1.upperBound == r2.upperBound && s1 == s2 - case (let .partialRangeThrough(r1, s1), let .partialRangeThrough(r2, s2)): - return r1.upperBound == r2.upperBound && s1 == s2 - default: return false - } - } -} - -public protocol TensorRangeExpression { - var tensorRange: TensorRange { get } -} - -// TODO: Cannot extend non-nominal type 'UnboundedRange'. -// extension UnboundedRange : TensorRangeExpression { -// public var tensorRange: TensorRange { return .ellipsis } -// } - -extension Int : TensorRangeExpression { - public var tensorRange: TensorRange { return .index(self) } -} - -extension Range : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .range(self, stride: 1) - } -} - -extension ClosedRange : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .closedRange(self, stride: 1) - } -} - -extension PartialRangeFrom : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .partialRangeFrom(self, stride: 1) - } -} - -extension PartialRangeUpTo : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .partialRangeUpTo(self, stride: 1) - } -} - -extension PartialRangeThrough : TensorRangeExpression where Bound == Int { - public var tensorRange: TensorRange { - return .partialRangeThrough(self, stride: 1) - } -} - -infix operator .. : StridedRangeFormationPrecedence -precedencegroup StridedRangeFormationPrecedence { - associativity: left - higherThan: CastingPrecedence - lowerThan: RangeFormationPrecedence -} - -public extension Range where Bound == Int { - static func .. (range: Range, stride: Int) -> TensorRange { - return .range(range, stride: stride) - } -} - -public extension ClosedRange where Bound == Int { - static func .. (range: ClosedRange, stride: Int) -> TensorRange { - return .closedRange(range, stride: stride) - } -} - -public extension PartialRangeFrom where Bound == Int { - static func .. (range: PartialRangeFrom, stride: Int) -> TensorRange { - return .partialRangeFrom(range, stride: stride) - } -} - -public extension PartialRangeUpTo where Bound == Int { - static func .. (range: PartialRangeUpTo, stride: Int) -> TensorRange { - return .partialRangeUpTo(range, stride: stride) - } -} - -public extension PartialRangeThrough where Bound == Int { - static func .. (range: PartialRangeThrough, stride: Int) -> TensorRange { - return .partialRangeThrough(range, stride: stride) - } -} - -public extension Tensor { - @_fixed_layout @usableFromInline - internal struct IndexPath { - @usableFromInline - let begin, end, strides: Tensor - - @usableFromInline - let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 - - @inlinable - public init( - begin: Tensor, end: Tensor, strides: Tensor, - beginMask: Int64, endMask: Int64, ellipsisMask: Int64, newAxisMask: Int64, - squeezeAxisMask: Int64 - ) { - self.begin = begin - self.end = end - self.strides = strides - self.beginMask = beginMask - self.endMask = endMask - self.ellipsisMask = ellipsisMask - self.newAxisMask = newAxisMask - self.squeezeAxisMask = squeezeAxisMask - } - } - - @inlinable - @differentiable(wrt: self, vjp: _vjpSubscript) - internal subscript(_ indexPath: IndexPath) -> Tensor { - get { - return Raw.stridedSlice( - self, begin: indexPath.begin, end: indexPath.end, - strides: indexPath.strides, beginMask: indexPath.beginMask, - endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, - newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - } - set { - self = Raw.tensorStridedSliceUpdate( - self, begin: indexPath.begin, end: indexPath.end, - strides: indexPath.strides, value: newValue, - beginMask: indexPath.beginMask, endMask: indexPath.endMask, - ellipsisMask: indexPath.ellipsisMask, - newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - } - } - - @inlinable - // TODO: @differentiable(wrt: self) - subscript(_ ranges: TensorRangeExpression...) -> Tensor { - get { - return self[IndexPath(ranges.map { $0.tensorRange })] - } - set { - self[IndexPath(ranges.map { $0.tensorRange })] = newValue - } - } - - @usableFromInline - internal func _vjpSubscript( - _ indexPath: IndexPath - ) -> (Tensor, (Tensor) -> Tensor) { - return (self[indexPath], { [shape = shapeTensor] v in - Raw.stridedSliceGrad( - shape: shape, begin: indexPath.begin, end: indexPath.end, - strides: indexPath.strides, dy: v, beginMask: indexPath.beginMask, - endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, - newAxisMask: indexPath.newAxisMask, - shrinkAxisMask: indexPath.squeezeAxisMask) - }) - } -} - -internal extension Tensor.IndexPath { - @inlinable - init(_ ranges: [TensorRange]) { - precondition(!ranges.isEmpty, "The tensor range collection cannot be empty.") - precondition(ranges.count { $0 == TensorRange.ellipsis } < 2, - "Only one ellipsis is allowed per tensor range collection.") - - var begin = [Int32](repeating: 0, count: ranges.count) - var end = [Int32](repeating: 0, count: ranges.count) - var strides = [Int32](repeating: 1, count: ranges.count) - var beginMask: Int64 = 0 - var endMask: Int64 = 0 - var ellipsisMask: Int64 = 0 - var newAxisMask: Int64 = 0 - var squeezeAxisMask: Int64 = 0 - for (i, index) in ranges.enumerated() { - switch index { - case .ellipsis: ellipsisMask |= 1 << i - case .newAxis: newAxisMask |= 1 << i - case .squeezeAxis: squeezeAxisMask |= 1 << i - case .index(let index): - begin[i] = Int32(index) - end[i] = Int32(index) + 1 - squeezeAxisMask |= 1 << i - case .range(let range, let stride): - begin[i] = Int32(range.lowerBound) - end[i] = Int32(range.upperBound) - strides[i] = Int32(stride) - case .closedRange(let range, let stride): - begin[i] = Int32(range.lowerBound) - switch Int32(range.upperBound) { - case -1: endMask |= 1 << i - case let u: end[i] = u + 1 - } - strides[i] = Int32(stride) - case .partialRangeFrom(let range, let stride): - begin[i] = Int32(range.lowerBound) - strides[i] = Int32(stride) - endMask |= 1 << i - case .partialRangeUpTo(let range, let stride): - end[i] = Int32(range.upperBound) - strides[i] = Int32(stride) - beginMask |= 1 << i - case .partialRangeThrough(let range, let stride): - end[i] = Int32(range.upperBound) + 1 - strides[i] = Int32(stride) - beginMask |= 1 << i - } - } - - self.begin = Tensor(begin) - self.end = Tensor(end) - self.strides = Tensor(strides) - self.beginMask = beginMask - self.endMask = endMask - self.ellipsisMask = ellipsisMask - self.newAxisMask = newAxisMask - self.squeezeAxisMask = squeezeAxisMask - } -} From e790f783536bf861bc9fcd50082be46d56479233 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 14:18:16 -0400 Subject: [PATCH 10/30] Moved the gradient helper methods to swift-apis. --- stdlib/public/TensorFlow/CMakeLists.txt | 1 - stdlib/public/TensorFlow/Gradients.swift | 190 ----------------------- 2 files changed, 191 deletions(-) delete mode 100644 stdlib/public/TensorFlow/Gradients.swift diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index 0b2d2b77fc794..21cd8f6683067 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -36,7 +36,6 @@ set(SOURCES Dataset.swift DataTypes.swift Execution.swift - Gradients.swift ShapedArray.swift StringOps.swift StringTensor.swift diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift deleted file mode 100644 index 7ae57f46402bf..0000000000000 --- a/stdlib/public/TensorFlow/Gradients.swift +++ /dev/null @@ -1,190 +0,0 @@ -//===-- Gradients.swift ---------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains vector-Jacobian product (VJP) definitions for Tensor ops. -// -// Terminology: -// - originalValue (f): The function being differentiated, or the result of that -// function. -// - VJP (f'): The function as the result of differentiation, computing -// the vector-Jacobian products with respect to all arguments, or the result -// of that function. -// -// For more information, visit: -// https://en.wikipedia.org/wiki/Automatic_differentiation -// -// Every function in this file is the VJP of some corresponding function -// defined in Ops.swift, with respect to all arguments. The attribute -// '@differentiable(vjp: ...)' is used to register a function's VJP. The -// automatic differentiation pass identifies these VJPs and chains them -// together to produce arbitrary differentiable programs. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Method-style differential operators -//===----------------------------------------------------------------------===// - -public extension Differentiable { - @inlinable - func gradient( - in f: @differentiable (Self) -> Tensor - ) -> CotangentVector { - return self.pullback(in: f)(Tensor(1)) - } - - @inlinable - func valueWithGradient( - in f: @differentiable (Self) -> Tensor - ) -> (value: Tensor, gradient: CotangentVector) { - let (y, pb) = self.valueWithPullback(in: f) - return (y, pb(Tensor(1))) - } - - @inlinable - func gradient( - at x: T, in f: @differentiable (Self, T) -> Tensor - ) -> (CotangentVector, T.CotangentVector) { - return self.pullback(at: x, in: f)(Tensor(1)) - } - - @inlinable - func valueWithGradient( - at x: T, in f: @differentiable (Self, T) -> Tensor - ) -> (value: Tensor, gradient: (CotangentVector, T.CotangentVector)) { - let (y, pb) = self.valueWithPullback(at: x, in: f) - return (y, pb(Tensor(1))) - } -} - -//===----------------------------------------------------------------------===// -// Free-function-style differential operators -//===----------------------------------------------------------------------===// - -// Value with gradient - -@inlinable -public func valueWithGradient( - at x: T, in f: @differentiable (T) -> Tensor -) -> (value: Tensor, gradient: T.CotangentVector) -where T : Differentiable, R : TensorFlowFloatingPoint { - let (y, pullback) = valueWithPullback(at: x, in: f) - return (y, pullback(Tensor(1))) -} - -@inlinable -public func valueWithGradient( - at x: T, _ y: U, in f: @differentiable (T, U) -> Tensor -) -> (value: Tensor, gradient: (T.CotangentVector, U.CotangentVector)) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - let (y, pullback) = valueWithPullback(at: x, y, in: f) - return (y, pullback(Tensor(1))) -} - -@inlinable -public func valueWithGradient( - at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> Tensor -) -> (value: Tensor, - gradient: (T.CotangentVector, U.CotangentVector, V.CotangentVector)) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - let (y, pullback) = valueWithPullback(at: x, y, z, in: f) - return (y, pullback(Tensor(1))) -} - -// Value with gradient (curried) - -@inlinable -public func valueWithGradient( - of f: @escaping @differentiable (T) -> Tensor -) -> (T) -> (value: Tensor, gradient: T.CotangentVector) - where T : Differentiable, R : TensorFlowFloatingPoint { - return { x in valueWithGradient(at: x, in: f) } -} - -@inlinable -public func valueWithGradient( - of f: @escaping @differentiable (T, U) -> Tensor -) -> (T, U) - -> (value: Tensor, gradient: (T.CotangentVector, U.CotangentVector)) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y in valueWithGradient(at: x, y, in: f) } -} - -@inlinable -public func valueWithGradient( - of f: @escaping @differentiable (T, U, V) -> Tensor -) -> (T, U, V) - -> (value: Tensor, - gradient: (T.CotangentVector, U.CotangentVector, V.CotangentVector)) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y, z in valueWithGradient(at: x, y, z, in: f) } -} - -// Gradient - -@inlinable -public func gradient( - at x: T, in f: @differentiable (T) -> Tensor -) -> T.CotangentVector - where T : Differentiable, R : TensorFlowFloatingPoint { - return pullback(at: x, in: f)(Tensor(1)) -} - -@inlinable -public func gradient( - at x: T, _ y: U, in f: @differentiable (T, U) -> Tensor -) -> (T.CotangentVector, U.CotangentVector) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - return pullback(at: x, y, in: f)(Tensor(1)) -} - -@inlinable -public func gradient( - at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> Tensor -) -> (T.CotangentVector, U.CotangentVector, V.CotangentVector) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - return pullback(at: x, y, z, in: f)(Tensor(1)) -} - -// Gradient (curried) - -@inlinable -public func gradient( - of f: @escaping @differentiable (T) -> Tensor -) -> (T) -> T.CotangentVector - where T : Differentiable, R : TensorFlowFloatingPoint { - return { x in gradient(at: x, in: f) } -} - -@inlinable -public func gradient( - of f: @escaping @differentiable (T, U) -> Tensor -) -> (T, U) -> (T.CotangentVector, U.CotangentVector) - where T : Differentiable, U : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y in gradient(at: x, y, in: f) } -} - -@inlinable -public func gradient( - of f: @escaping @differentiable (T, U, V) -> Tensor -) -> (T, U, V) -> (T.CotangentVector, U.CotangentVector, V.CotangentVector) - where T : Differentiable, U : Differentiable, V : Differentiable, - R : TensorFlowFloatingPoint { - return { x, y, z in gradient(at: x, y, z, in: f) } -} From 93041e0e6a58a3d7227c00bf22065116b9916b65 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 16:37:24 -0400 Subject: [PATCH 11/30] Moved the tensor tests to swift-apis. --- stdlib/public/TensorFlow/CMakeLists.txt | 4 +- .../public/TensorFlow/PythonConversion.swift | 172 ---- test/TensorFlowRuntime/tensor.swift | 757 ------------------ test/TensorFlowRuntime/tensor_api.swift | 50 -- 4 files changed, 1 insertion(+), 982 deletions(-) delete mode 100644 stdlib/public/TensorFlow/PythonConversion.swift delete mode 100644 test/TensorFlowRuntime/tensor_api.swift diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index 21cd8f6683067..53e1cbda8c6dd 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -47,9 +47,7 @@ set(SOURCES Utilities.swift ArrayOps.swift Threading.swift - ExecuteOp.swift.gyb - # NumPy bridging for `ShapedArray` and `Tensor`. - PythonConversion.swift) + ExecuteOp.swift.gyb) # Copy TensorFlow bindings file, if it exists. if (TENSORFLOW_SWIFT_BINDINGS) diff --git a/stdlib/public/TensorFlow/PythonConversion.swift b/stdlib/public/TensorFlow/PythonConversion.swift deleted file mode 100644 index 7677c9d97496e..0000000000000 --- a/stdlib/public/TensorFlow/PythonConversion.swift +++ /dev/null @@ -1,172 +0,0 @@ -//===-- PythonConversion.swift --------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file defines conversions between Python types & custom TensorFlow types. -// -//===----------------------------------------------------------------------===// - -#if canImport(Python) -import Python - -/// The `numpy` Python module. -/// Note: Global variables are lazy, so the following declaration won't produce -// a Python import error until it is first used. -private let np = Python.import("numpy") - -private func debugLogNumpyError(_ message: String) { - debugLog("NumPy conversion error: " + message) -} - -extension ShapedArray : ConvertibleFromNumpyArray - where Scalar : NumpyScalarCompatible { - /// Creates a `ShapedArray` with the same shape and scalars as the specified - /// `numpy.ndarray` instance. - /// - /// - Parameter numpyArray: The `numpy.ndarray` instance to convert. - /// - Precondition: The `numpy` Python package must be installed. - /// - Precondition: `numpyArray` must have a compatible scalar `dtype`. - public init?(numpy numpyArray: PythonObject) { - // Check if input is a `numpy.ndarray` instance. - guard Python.isinstance(numpyArray, np.ndarray) == true else { - debugLogNumpyError(""" - PythonObject input has type '\(Python.type(numpyArray))' and is not \ - an instance of 'numpy.ndarray'. - """) - return nil - } - // Check if the dtype of the `ndarray` is compatible with the `Scalar` - // type. - guard Scalar.numpyScalarTypes.contains(numpyArray.dtype) else { - debugLogNumpyError(""" - 'numpy.ndarray' dtype '\(numpyArray.dtype)' is incompatible with \ - Swift type '\(Scalar.self)'. - """) - return nil - } - - let pyShape = numpyArray.__array_interface__["shape"] - guard let shape = [Int](pyShape) else { - debugLogNumpyError("cannot access shape of 'numpy.ndarray' instance.") - return nil - } - - // Make sure that the array is contiguous in memory. This does a copy if - // the array is not already contiguous in memory. - let contiguousNumpyArray = np.ascontiguousarray(numpyArray) - - guard let ptrVal = - UInt(contiguousNumpyArray.__array_interface__["data"].tuple2.0) else { - debugLogNumpyError("cannot access data of 'numpy.ndarray' instance.") - return nil - } - // Note: `ptr` is not nil even if the `ndarray` is empty (i.e. has a shape - // of `(0,)`). - guard let ptr = UnsafePointer(bitPattern: ptrVal) else { - fatalError("'numpy.ndarray' data pointer was nil") - } - // This code avoids calling `init(shape: [Int], scalars: S)`, - // which inefficiently copies scalars one by one. Instead, - // `init(shape: [Int], scalars: [Scalar])` is called, which efficiently - // does a `memcpy` of the entire `scalars` array. - // Unecessary copying is minimized. - let dummyPointer = UnsafeMutablePointer.allocate(capacity: 1) - let scalarCount = shape.reduce(1, *) - var scalars: [Scalar] = Array(repeating: dummyPointer.move(), - count: scalarCount) - dummyPointer.deallocate() - scalars.withUnsafeMutableBufferPointer { buffPtr in - buffPtr.baseAddress!.assign(from: ptr, count: scalarCount) - } - self.init(shape: shape, scalars: scalars) - } -} - -extension Tensor : ConvertibleFromNumpyArray - where Scalar : NumpyScalarCompatible { - /// Creates a tensor with the same shape and scalars as the specified - /// `numpy.ndarray` instance. - /// - /// - Parameter numpyArray: The `numpy.ndarray` instance to convert. - /// - Precondition: The `numpy` Python package must be installed. - /// - Returns: `numpyArray` converted to an `Array`. Returns `nil` if - /// `numpyArray` does not have a compatible scalar `dtype`. - public init?(numpy numpyArray: PythonObject) { - // Check if input is a `numpy.ndarray` instance. - guard Python.isinstance(numpyArray, np.ndarray) == true else { - debugLogNumpyError(""" - PythonObject input has type '\(Python.type(numpyArray))' and is not \ - an instance of 'numpy.ndarray'. - """) - return nil - } - // Check if the dtype of the `ndarray` is compatible with the `Scalar` - // type. - guard Scalar.numpyScalarTypes.contains(numpyArray.dtype) else { - debugLogNumpyError(""" - 'numpy.ndarray' dtype '\(numpyArray.dtype)' is incompatible with \ - Swift type '\(Scalar.self)'. - """) - return nil - } - - let pyShape = numpyArray.__array_interface__["shape"] - guard let dimensions = [Int](pyShape) else { - debugLogNumpyError("cannot access shape of 'numpy.ndarray' instance.") - return nil - } - let shape = TensorShape(dimensions) - - // Make sure that the array is contiguous in memory. This does a copy if - // the array is not already contiguous in memory. - let contiguousNumpyArray = np.ascontiguousarray(numpyArray) - - guard let ptrVal = - UInt(contiguousNumpyArray.__array_interface__["data"].tuple2.0) else { - debugLogNumpyError("cannot access data of 'numpy.ndarray' instance.") - return nil - } - // Note: `ptr` is not nil even if the `ndarray` is empty (i.e. has a shape - // of `(0,)`). - guard let ptr = UnsafePointer(bitPattern: ptrVal) else { - fatalError("'numpy.ndarray' data pointer was nil") - } - let buffPtr = UnsafeBufferPointer(start: ptr, - count: Int(shape.contiguousSize)) - self.init(shape: shape, scalars: buffPtr) - } -} - -extension ShapedArray where Scalar : NumpyScalarCompatible { - /// Creates a `numpy.ndarray` instance with the same shape and scalars as - /// this `ShapedArray`. - /// - /// - Precondition: The `numpy` Python package must be installed. - public func makeNumpyArray() -> PythonObject { - return scalars.makeNumpyArray().reshape(shape) - } -} - -extension Tensor where Scalar : NumpyScalarCompatible { - /// Creates a `numpy.ndarray` instance with the same shape and scalars as - /// this tensor. - /// - /// - Precondition: The `numpy` Python package must be installed. - public func makeNumpyArray() -> PythonObject { return array.makeNumpyArray() } -} - -extension TensorShape : PythonConvertible { - public var pythonObject: PythonObject { - return dimensions.pythonObject - } -} - -#endif // canImport(Python) diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index e67a2c3364d8f..1a1e802961a0e 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -19,536 +19,6 @@ import TensorFlowUnittest #endif import StdlibUnittest -var TensorTests = TestSuite("Tensor") - -TensorTests.testAllBackends("Initializers") { - let scalar = Tensor(1) - let matrix: Tensor = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] - let broadcastScalar = Tensor(broadcasting: 10, rank: 3) - let some4d = Tensor(shape: [2, 1, 2, 1], - scalars: AnyRandomAccessCollection([2, 3, 4, 5])) - expectEqual(ShapedArray(shape: [2, 1, 2, 1], scalars: [2, 3, 4, 5]), - some4d.array) - expectEqual(ShapedArray(shape: [], scalars: [1]), scalar.array) - expectEqual(ShapedArray(shape: [2, 3], scalars: [1, 2, 3, 4, 5, 6]), - matrix.array) - expectEqual(ShapedArray(shape: [1, 1, 1], scalars: [10]), - broadcastScalar.array) -} - -TensorTests.testAllBackends("FactoryInitializers") { - let x = Tensor(ones: [1, 10]) - expectEqual(ShapedArray(repeating: 1, shape: [1, 10]), x.array) -} - -TensorTests.testAllBackends("NumericInitializers") { - let x = Tensor(oneHotAtIndices: [0, 2, -1, 1], depth: 3) - expectEqual(ShapedArray(shape: [4, 3], scalars: [1, 0, 0, - 0, 0, 1, - 0, 0, 0, - 0, 1, 0]), - x.array) -} - -TensorTests.testAllBackends("ScalarToTensorConversion") { - let tensor = Tensor(broadcasting: 42, rank: 4) - expectEqual([1, 1, 1, 1], tensor.shape) - expectEqual([42], tensor.scalars) -} - -TensorTests.testAllBackends("ArrayConversion") { - let array3D = ShapedArray(repeating: 1.0, shape: [2, 3, 4]) - let tensor3D = Tensor(array3D) - expectEqual(array3D, tensor3D.array) -} - -TensorTests.testAllBackends("DataTypeCast_NonTPU") { - // TPU does not support Int8 or 16 casting. - guard !_RuntimeConfig.executionMode.isTPU else { return } - - let x = Tensor(ones: [5, 5]) - let ints = Tensor(x) - let floats = Tensor(x) - let i8s = Tensor(floats) - expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), ints.array) - expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), floats.array) - expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), i8s.array) -} - -TensorTests.testAllBackends("DataTypeCast_TPU") { - // Non-TPU mode (e.g. eager) does not support Uint32 casting. - guard _RuntimeConfig.executionMode.isTPU else { return } - - let x = Tensor(ones: [5, 5]) - let ints = Tensor(x) - let floats = Tensor(x) - let u32s = Tensor(floats) - expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), ints.array) - expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), floats.array) - expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), u32s.array) -} - -TensorTests.testAllBackends("BoolToNumericCast_NonTPU") { - // TPU does not support Int8 or 16 casting. - // - // When changing to UInt32, got another TPU/XLA compilation error when - // converting from bools to Uint32 (different from missing kernel error). - if _RuntimeConfig.executionMode.isTPU { return } - - let bools = Tensor(shape: [2, 2], scalars: [true, false, true, false]) - let ints = Tensor(bools) - let floats = Tensor(bools) - let i8s = Tensor(bools) - expectEqual(ShapedArray(shape: [2, 2], scalars: [1, 0, 1, 0]), ints.array) - expectEqual(ShapedArray(shape: [2, 2], scalars: [1, 0, 1, 0]), floats.array) - expectEqual(ShapedArray(shape: [2, 2], scalars: [1, 0, 1, 0]), i8s.array) -} - -TensorTests.testAllBackends("ElementIndexing") { - // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly - // until send and receive are implemented (without writing a bunch of mini - // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy - // and the ShapedArray is tested. - let tensor3D = Tensor(shape: [3, 4, 5], - scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let element2D = tensor3D[2] - let element1D = tensor3D[1][3] - let element0D = tensor3D[2][0][3] - - let array2D = element2D.array - let array1D = element1D.array - let array0D = element0D.array - - /// Test shapes - expectEqual([4, 5], array2D.shape) - expectEqual([5], array1D.shape) - expectEqual([], array0D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) - expectEqual([43], array0D.scalars) -} - -TensorTests.testAllBackends("ElementIndexingAssignment") { - // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly - // until send and receive are implemented (without writing a bunch of mini - // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy - // and the ShapedArray is tested. - var tensor3D = Tensor(shape: [3, 4, 5], - scalars: Array(stride(from: 0.0, to: 60, by: 1))) - tensor3D[2] = Tensor(shape: [4, 5], - scalars: Array(stride(from: 20.0, to: 40, by: 1))) - let element2D = tensor3D[2] - let element1D = tensor3D[1][3] - let element0D = tensor3D[2][0][3] - - let array2D = element2D.array - let array1D = element1D.array - let array0D = element0D.array - - /// Test shapes - expectEqual([4, 5], array2D.shape) - expectEqual([5], array1D.shape) - expectEqual([], array0D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) - expectEqual([23], array0D.scalars) -} - -TensorTests.testAllBackends("NestedElementIndexing") { - // NOTE: This test could use a clearer name, along with other "indexing" - // tests. Note to update corresponding test names in other files - // (shaped_array.test) as well. - let tensor3D = Tensor(shape: [3, 4, 5], - scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let element1D = tensor3D[1, 3] - let element0D = tensor3D[2, 0, 3] - - let array1D = element1D.array - let array0D = element0D.array - - /// Test shapes - expectEqual([5], array1D.shape) - expectEqual([], array0D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) - expectEqual([43], array0D.scalars) -} - -TensorTests.testAllBackends("SliceIndexing") { - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send - // and receive are implemented (without writing a bunch of mini tests). - // Instead, `Tensor.array` is called to make a ShapedArray host copy and the - // ShapedArray is tested instead. - let tensor3D = Tensor(shape: [3, 4, 5], - scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let slice3D = tensor3D[2...] - let slice2D = tensor3D[1][0..<2] - let slice1D = tensor3D[0][0][3..<5] - - let array3D = slice3D.array - let array2D = slice2D.array - let array1D = slice1D.array - - /// Test shapes - expectEqual([1, 4, 5], array3D.shape) - expectEqual([2, 5], array2D.shape) - expectEqual([2], array1D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) - expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) -} - -TensorTests.testAllBackends("SliceIndexingAssignment") { - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send - // and receive are implemented (without writing a bunch of mini tests). - // Instead, `Tensor.array` is called to make a ShapedArray host copy and the - // ShapedArray is tested instead. - var tensor3D = Tensor( - shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - tensor3D[2, 0..<5, 0..<6] = Tensor( - shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) - let slice3D = tensor3D[2...] - let slice2D = tensor3D[1][0..<2] - let slice1D = tensor3D[0][0][3..<5] - - let array3D = slice3D.array - let array2D = slice2D.array - let array1D = slice1D.array - - /// Test shapes - expectEqual([1, 4, 5], array3D.shape) - expectEqual([2, 5], array2D.shape) - expectEqual([2], array1D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array3D.scalars) - expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) -} - -TensorTests.testAllBackends("EllipsisIndexing") { - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send - // and receive are implemented (without writing a bunch of mini tests). - // Instead, `Tensor.array` is called to make a ShapedArray host copy and the - // ShapedArray is tested instead. - var tensor3D = Tensor( - shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - tensor3D[2, TensorRange.ellipsis] = Tensor( - shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) - let slice3D = tensor3D[2..., TensorRange.ellipsis] - let slice2D = tensor3D[1][0..<2] - let slice1D = tensor3D[0][0][3..<5] - - let array3D = slice3D.array - let array2D = slice2D.array - let array1D = slice1D.array - - /// Test shapes - expectEqual([1, 4, 5], array3D.shape) - expectEqual([2, 5], array2D.shape) - expectEqual([2], array1D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array3D.scalars) - expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) -} - -TensorTests.testAllBackends("NewAxisIndexing") { - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send - // and receive are implemented (without writing a bunch of mini tests). - // Instead, `Tensor.array` is called to make a ShapedArray host copy and the - // ShapedArray is tested instead. - let tensor3D = Tensor( - shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let newAxis = TensorRange.newAxis - let ellipsis = TensorRange.ellipsis - let slice3D = tensor3D[2..., newAxis, ellipsis] - let slice2D = tensor3D[1, newAxis][0..<1, 0..<2] - let slice1D = tensor3D[0][newAxis, 0][0..<1, 3..<5, newAxis] - - let array3D = slice3D.array - let array2D = slice2D.array - let array1D = slice1D.array - - /// Test shapes - expectEqual([1, 1, 4, 5], array3D.shape) - expectEqual([1, 2, 5], array2D.shape) - expectEqual([1, 2, 1], array1D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) - expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) -} - -TensorTests.testAllBackends("SqueezeAxisIndexing") { - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send - // and receive are implemented (without writing a bunch of mini tests). - // Instead, `Tensor.array` is called to make a ShapedArray host copy and the - // ShapedArray is tested instead. - let tensor3D = Tensor( - shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let newAxis = TensorRange.newAxis - let ellipsis = TensorRange.ellipsis - let squeezeAxis = TensorRange.squeezeAxis - let slice3D = tensor3D[2..., newAxis, ellipsis][squeezeAxis, squeezeAxis] - let slice2D = tensor3D[1, newAxis][squeezeAxis, 0..<2] - let slice1D = tensor3D[0..<1, 0, 3..<5, newAxis][ - squeezeAxis, ellipsis, squeezeAxis] - - let array3D = slice3D.array - let array2D = slice2D.array - let array1D = slice1D.array - - /// Test shapes - expectEqual([4, 5], array3D.shape) - expectEqual([2, 5], array2D.shape) - expectEqual([2], array1D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) - expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) -} - -TensorTests.testAllBackends("StridedSliceIndexing") { - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send - // and receive are implemented (without writing a bunch of mini tests). - // Instead, `Tensor.array` is called to make a ShapedArray host copy and the - // ShapedArray is tested instead. - let tensor3D = Tensor( - shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let slice3D = tensor3D[2...] - let slice2D = tensor3D[1][0..<3..2] - let slice1D = tensor3D[0][0][1..<5..2] - - let array3D = slice3D.array - let array2D = slice2D.array - let array1D = slice1D.array - - /// Test shapes - expectEqual([1, 4, 5], array3D.shape) - expectEqual([2, 5], array2D.shape) - expectEqual([2], array1D.shape) - - /// Test scalars - expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) - expectEqual( - Array(stride(from: 20.0, to: 25, by: 1)) + - Array(stride(from: 30.0, to: 35, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 1.0, to: 5, by: 2)), array1D.scalars) -} - -TensorTests.testAllBackends("StridedSliceIndexingAssignment") { - // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send - // and receive are implemented (without writing a bunch of mini tests). - // Instead, `Tensor.array` is called to make a ShapedArray host copy and the - // ShapedArray is tested instead. - var tensor3D = Tensor( - shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - tensor3D[2, 0..<5..2, 0..<6] = Tensor( - shape: [2, 5], scalars: Array(stride(from: 20.0, to: 40, by: 2))) - let slice3D = tensor3D[2...] - let slice2D = tensor3D[1][0..<2] - let slice1D = tensor3D[0][0][3..<5] - - let array3D = slice3D.array - let array2D = slice2D.array - let array1D = slice1D.array - - /// Test shapes - expectEqual([1, 4, 5], array3D.shape) - expectEqual([2, 5], array2D.shape) - expectEqual([2], array1D.shape) - - /// Test scalars - expectEqual( - Array(stride(from: 20.0, to: 30, by: 2)) + - Array(stride(from: 45.0, to: 50, by: 1)) + - Array(stride(from: 30.0, to: 40, by: 2)) + - Array(stride(from: 55.0, to: 60, by: 1)), array3D.scalars) - expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) - expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) -} - -TensorTests.test("WholeTensorSlicing") { - let t: Tensor = [[[1, 1, 1], [2, 2, 2]], - [[3, 3, 3], [4, 4, 4]], - [[5, 5, 5], [6, 6, 6]]] - let slice2 = t.slice(lowerBounds: [1, 0, 0], upperBounds: [2, 1, 3]) - expectEqual(ShapedArray(shape: [1, 1, 3], scalars: [3, 3, 3]), - slice2.array) -} - -TensorTests.testAllBackends("AdvancedIndexing") { - // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly - // until send and receive are implemented (without writing a bunch of mini - // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy - // and the ShapedArray is tested. - let tensor3D = Tensor(shape: [3, 4, 5], - scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let element2D = tensor3D[1..<3, 0, 3...] - let array2D = element2D.array - - // Test shape - expectEqual([2, 2], array2D.shape) - - // Test scalars - expectEqual(Array([23.0, 24.0, 43.0, 44.0]), array2D.scalars) -} - -TensorTests.testAllBackends("Reduction") { - // TODO(b/111815968): triage and fix this TPU issue - #if !TPU - // 2 x 5 - let x = Tensor([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]) - expectEqual(Tensor(30), x.sum().toHost(shape: [])) - expectEqual(Tensor(shape: [5], scalars: [2, 4, 6, 8, 10]), - x.sum(squeezingAxes: 0).toHost(shape: [])) - expectEqual(Tensor(shape: [1, 5], scalars: [2, 4, 6, 8, 10]), - x.sum(alongAxes: 0).toHost(shape: [])) - - expectEqual(Tensor(14400), x.product().toHost(shape: [])) - expectEqual(Tensor(shape: [5], scalars: [1, 4, 9, 16, 25]), - x.product(squeezingAxes: 0).toHost(shape: [])) - expectEqual(Tensor(shape: [1, 5], scalars: [1, 4, 9, 16, 25]), - x.product(alongAxes: 0).toHost(shape: [])) - - expectEqual(Tensor(3), x.mean().toHost(shape: [])) - expectEqual(Tensor(shape: [5], scalars: [1, 2, 3, 4, 5]), - x.mean(squeezingAxes: 0).toHost(shape: [])) - expectEqual(Tensor(shape: [5], scalars: [1, 2, 3, 4, 5]), - x.mean(alongAxes: 0).toHost(shape: [])) - expectEqual(Tensor(shape: [2], scalars: [3, 3]), - x.mean(squeezingAxes: 1).toHost(shape: [])) - expectEqual(Tensor(shape: [1, 2], scalars: [3, 3]), - x.mean(alongAxes: 1).toHost(shape: [])) - - expectEqual(Tensor(2), x.variance().toHost(shape: [])) - expectEqual(Tensor(shape: [5], scalars: [0, 0, 0, 0, 0]), - x.variance(squeezingAxes: 0).toHost(shape: [])) - expectEqual(Tensor(shape: [5], scalars: [0, 0, 0, 0, 0]), - x.variance(alongAxes: 0).toHost(shape: [])) - expectEqual(Tensor(shape: [2], scalars: [2, 2]), - x.variance(squeezingAxes: 1).toHost(shape: [])) - expectEqual(Tensor(shape: [1, 2], scalars: [2, 2]), - x.variance(alongAxes: 1).toHost(shape: [])) - #endif // !TPU -} - -TensorTests.testAllBackends("Concatenation") { - // 2 x 3 - let t1 = Tensor([[0, 1, 2], [3, 4, 5]]) - // 2 x 3 - let t2 = Tensor([[6, 7, 8], [9, 10, 11]]) - let concatenated = t1 ++ t2 - let concatenated0 = t1.concatenated(with: t2) - let concatenated1 = t1.concatenated(with: t2, alongAxis: 1) - expectEqual(ShapedArray(shape: [4, 3], scalars: Array(0..<12)), - concatenated.array) - expectEqual(ShapedArray(shape: [4, 3], scalars: Array(0..<12)), - concatenated0.array) - expectEqual(ShapedArray(shape: [2, 6], - scalars: [0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11]), - concatenated1.array) -} - -TensorTests.testAllBackends("VJPConcatenation") { - let a1 = Tensor([1,2,3,4]) - let b1 = Tensor([5,6,7,8,9,10]) - - let a2 = Tensor([1,1,1,1]) - let b2 = Tensor([1,1,1,1,1,1]) - - let grads = gradient(at: a2, b2) { a, b in - return ((a1 * a) ++ (b1 * b)).sum() - } - - expectEqual(a1, grads.0) - expectEqual(b1, grads.1) -} - -TensorTests.testAllBackends("VJPConcatenationNegativeAxis") { - let a1 = Tensor([1,2,3,4]) - let b1 = Tensor([5,6,7,8,9,10]) - - let a2 = Tensor([1,1,1,1]) - let b2 = Tensor([1,1,1,1,1,1]) - - let grads = gradient(at: a2, b2) { a, b in - return (a1 * a).concatenated(with: b1 * b, alongAxis: -1).sum() - } - - expectEqual(a1, grads.0) - expectEqual(b1, grads.1) -} - -TensorTests.test("EwiseComparison") { - let x = Tensor([0, 1, 2]) - let y = Tensor([2, 1, 3]) - expectEqual((x .< y).scalars, [true, false, true]) -} - -TensorTests.test("LexicographicalComparison") { - let x = Tensor([0, 1, 2, 3, 4]) - let y = Tensor([2, 3, 4, 5, 6]) - expectTrue(x < y) -} - -TensorTests.testAllBackends("ArgMax") { - // 2 x 3 - let x = Tensor([[0, 1, 2], [3, 4, 5]]) - let argmax0 = x.argmax(squeezingAxis: 0) - let argmax1 = x.argmax(squeezingAxis: 1) - let scalarsArgmax = x.argmax() - expectEqual(ShapedArray(shape: [3], scalars: [1, 1, 1]), argmax0.array) - expectEqual(ShapedArray(shape: [2], scalars: [2, 2]), argmax1.array) - expectEqual(ShapedArray(shape: [], scalars: [5]), scalarsArgmax.array) -} - -TensorTests.testAllBackends("CeilFloor") { - let x = Tensor([-1.3, -0.4, 0.5, 1.6]) - let xFloor = floor(x) - let xCeil = ceil(x) - expectEqual(ShapedArray(shape: [4], scalars: [-2, -1, 0, 1]), xFloor.array) - expectEqual(ShapedArray(shape: [4], scalars: [-1, 0, 1, 2]), xCeil.array) -} - -TensorTests.testAllBackends("SimpleMath") { - let x = Tensor([1.2, 1.2]) - let y = tanh(x) - let array = y.array - expectEqual([2], array.shape) - expectPointwiseNearlyEqual([0.833655, 0.833655], array.scalars, - byError: 0.0001) -} - -TensorTests.testAllBackends("StandardDeviation") { - expectEqual(Tensor(0), Tensor([1]).standardDeviation()) - expectEqual(Tensor(0.5), Tensor([0, 1]).standardDeviation(alongAxes: 0)) - expectEqual(Tensor(0.5), Tensor([0, 1]).standardDeviation()) - expectNearlyEqual( - 2.87228132, - Tensor(rangeFrom: 0, to: 10, stride: 1).standardDeviation().scalarized(), - byError: 0.001) - let matrix = Tensor(rangeFrom: 0, to: 10, stride: 1).reshaped(to: [2, 5]) - expectNearlyEqual(2.87228132, - matrix.standardDeviation().scalarized(), - byError: 0.001) - expectPointwiseNearlyEqual( - [1.4142, 1.4142], - matrix.standardDeviation(alongAxes: 1).array.scalars, - byError: 0.001) -} - TensorTests.testAllBackends("ReductionToScalar") { let _: Tensor = [1, 2, 3, 4, 5] // expectEqual(x.mean(), 3) @@ -561,233 +31,6 @@ TensorTests.testAllBackends("ReductionToScalar") { _hostOp(extra) } -TensorTests.testAllBackends("3Adds") { - let a = Tensor([1]) - let b = Tensor([2]) - let c = Tensor([3]) - - let o = a + b + c - expectEqual([6], o.scalars) -} - -TensorTests.testAllBackends("MultiOpMath") { - let x = Tensor([1.2, 1.2]) - let y = Tensor([2.4, 2.4]) - let t1 = x + y - let t2 = t1 * t1 - let t3 = sqrt(t2) - - let array1 = t1.array - let array2 = t2.array - let array3 = t3.array - expectEqual([2], array1.shape) - expectEqual([2], array2.shape) - expectEqual([2], array3.shape) - expectPointwiseNearlyEqual([3.6, 3.6], array1.scalars) - expectPointwiseNearlyEqual([12.96, 12.96], array2.scalars) - expectPointwiseNearlyEqual([3.6, 3.6], array3.scalars) -} - -TensorTests.testAllBackends("XWPlusB") { - // Shape: 1 x 4 - let x = Tensor([[1.0, 2.0, 2.0, 1.0]]) - // Shape: 4 x 2 - let w = Tensor([[1.0, 0.0], [3.0, 0.0], [2.0, 3.0], [1.0, 0.0]]) - // Shape: 2 - let b = Tensor([0.5, 0.5]) - // Shape: 1 x 2 (broadcasted) - let result = matmul(x, w) + b - expectEqual([1, 2], result.shape) - expectEqual([12.5, 6.5], result.scalars) -} - -TensorTests.testAllBackends("Transpose") { - // 3 x 2 -> 2 x 3 - let xT = Tensor([[1, 2], [3, 4], [5, 6]]).transposed() - let xTArray = xT.array - expectEqual(2, xTArray.rank) - expectEqual([2, 3], xTArray.shape) - expectEqual([1, 3, 5, 2, 4, 6], xTArray.scalars) -} - -TensorTests.testAllBackends("SimpleCond") { - func selectValue(_ pred: Bool) -> Tensor { - let a = Tensor(0) - let b = Tensor(1) - if pred { - return a - } - return b - } - - expectEqual(0, selectValue(true).scalar) -} - -@inline(never) -func testXORInference() { - func xor(_ x: Float, _ y: Float) -> Float { - let x = Tensor([x, y]).reshaped(to: [1, 2]) - - // FIXME: If params are declared outside of `xor`, it would crash. - // 2 x 4 - let w1 = Tensor( - [[-1.83586664, -0.20809225, 0.47667537, 1.90780607], - [-1.83523219, -0.51167348, 0.15490439, 1.91018065]]) - // 1 x 4 - let b1 = Tensor( - [[2.54353216, 0.25132703, -0.16503136, -0.85754058]]) - // 4 x 1 - let w2 = Tensor( - [[3.04350065], [0.35590511], [-0.3252157], [3.49349223]]) - // 1 x 1 - let b2 = Tensor([[-0.74635993]]) - - let o1 = tanh(matmul(x, w1) + b1) - let y = tanh(matmul(o1, w2) + b2) - return y.array.scalars[0] // TODO: use better scalar getter - } - expectNearlyEqual(0.0, xor(0.0, 0.0), byError: 0.1) - expectNearlyEqual(1.0, xor(0.0, 1.0), byError: 0.1) - expectNearlyEqual(1.0, xor(1.0, 0.0), byError: 0.1) - expectNearlyEqual(0.0, xor(1.0, 1.0), byError: 0.1) -} -TensorTests.testAllBackends("XORInference", testXORInference) - -TensorTests.testAllBackends("MLPClassifierStruct") { - struct MLPClassifier { - // 2 x 4 - var w1 = Tensor([[1.0, 0.8, 0.4, 0.4], - [0.4, 0.3, 0.2, 0.1]]) - // 4 x 1 - var w2 = Tensor([[0.4], [0.4], [0.3], [0.9]]) - var b1 = Tensor(zeros: [1, 4]) - var b2 = Tensor(zeros: [1, 1]) - - func prediction(for x: Tensor) -> Tensor { - let o1 = tanh(matmul(x, w1) + b1) - return tanh(matmul(o1, w2) + b2) - } - } - let input = Tensor([[1, 0.5]]) - let classifier = MLPClassifier() - let prediction = classifier.prediction(for: input) - expectPointwiseNearlyEqual([0.816997], prediction.scalars) -} - -TensorTests.testAllBackends("Reshape") { - // 2 x 3 -> 1 x 3 x 1 x 2 x 1 - let matrix = Tensor([[0, 1, 2], [3, 4, 5]]) - let reshaped = matrix.reshaped(to: [1, 3, 1, 2, 1]) - - expectEqual([1, 3, 1, 2, 1], reshaped.shape) - expectEqual(Array(0..<6), reshaped.scalars) -} - -TensorTests.testAllBackends("Flatten") { - // 2 x 3 -> 6 - let matrix = Tensor([[0, 1, 2], [3, 4, 5]]) - let flattened = matrix.flattened() - - expectEqual([6], flattened.shape) - expectEqual(Array(0..<6), flattened.scalars) -} - -TensorTests.testAllBackends("Flatten0D") { - let scalar = Tensor(5) - let flattened = scalar.flattened() - expectEqual([1], flattened.shape) - expectEqual([5], flattened.scalars) -} - -TensorTests.testAllBackends("ReshapeToScalar") { - // 1 x 1 -> scalar - let z = Tensor([[10]]).reshaped(to: []) - expectEqual([], z.shape) -} - -TensorTests.testAllBackends("ReshapeTensor") { - // 2 x 3 -> 1 x 3 x 1 x 2 x 1 - let x = Tensor(repeating: 0.0, shape: [2, 3]) - let y = Tensor(repeating: 0.0, shape: [1, 3, 1, 2, 1]) - let result = x.reshaped(like: y) - expectEqual([1, 3, 1, 2, 1], result.shape) -} - -TensorTests.testAllBackends("Unbroadcast1") { - let x = Tensor(repeating: 1, shape: [2, 3, 4, 5]) - let y = Tensor(repeating: 1, shape: [4, 5]) - let z = x.unbroadcast(like: y) - expectEqual(ShapedArray(repeating: 6, shape: [4, 5]), - z.array) -} - -TensorTests.testAllBackends("Unbroadcast2") { - let x = Tensor(repeating: 1, shape: [2, 3, 4, 5]) - let y = Tensor(repeating: 1, shape: [3, 1, 5]) - let z = x.unbroadcast(like: y) - expectEqual(ShapedArray(repeating: 8, shape: [3, 1, 5]), - z.array) -} - -// TODO: Merge all rank/shape getter tests into one when we support code motion -// to avoid sends. - -@inline(never) -func testRankGetter() { - let tensor = Tensor(shape: [3, 4, 5], scalars: Array(0..<60)) - expectEqual(3, tensor.rank) -} -TensorTests.testAllBackends("RankGetter", testRankGetter) - -@inline(never) -func testRankGetter2() { - let vector = Tensor([1]) - expectEqual(1, vector.rank) -} -TensorTests.testAllBackends("RankGetter2", testRankGetter2) - -@inline(never) -func testRankGetter3() { - let matrix = Tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) - expectEqual(2, matrix.rank) -} -TensorTests.testAllBackends("RankGetter3", testRankGetter3) - -@inline(never) -func testRankGetter4() { - let ones = Tensor(ones: [1, 2, 2, 2, 2, 2, 1]) - expectEqual(7, ones.rank) -} -TensorTests.testAllBackends("RankGetter4", testRankGetter4) - -@inline(never) -func testShapeGetter() { - let tensor = Tensor(shape: [3, 4, 5], scalars: Array(0..<60)) - expectEqual([3, 4, 5], tensor.shape) -} -TensorTests.testAllBackends("ShapeGetter", testShapeGetter) - -@inline(never) -func testShapeGetter2() { - let vector = Tensor([1]) - expectEqual([1], vector.shape) -} -TensorTests.testAllBackends("ShapeGetter2", testShapeGetter2) - -@inline(never) -func testShapeGetter3() { - let matrix = Tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) - expectEqual([2, 3], matrix.shape) -} -TensorTests.testAllBackends("ShapeGetter3", testShapeGetter3) - -@inline(never) -func testShapeGetter4() { - let ones = Tensor(ones: [1, 2, 2, 2, 2, 2, 1]) - expectEqual([1, 2, 2, 2, 2, 2, 1], ones.shape) -} -TensorTests.testAllBackends("ShapeGetter4", testShapeGetter4) - // For now it is sufficient to run remote tests with test cases in this file // only. When creating new test files, consider simply calling runAllTests(). #if CUDA diff --git a/test/TensorFlowRuntime/tensor_api.swift b/test/TensorFlowRuntime/tensor_api.swift deleted file mode 100644 index 5fd08138b1637..0000000000000 --- a/test/TensorFlowRuntime/tensor_api.swift +++ /dev/null @@ -1,50 +0,0 @@ -// RUN: %target-run-eager-swift %swift-tensorflow-test-run-extra-options - -// SR-9737: hanging tests in GPE GPU mode -// UN: %target-run-gpe-swift %swift-tensorflow-test-run-extra-options - -// REQUIRES: executable_test -// REQUIRES: swift_test_mode_optimize -// -// This test suite is for tensor API and has been created because tensor.swift -// has static shape restrictions for TPU send/receive. Until the restriction is -// resolved, API tests that incur send/receive should reside here.“ - -import TensorFlow -#if TPU -import TensorFlowUnittestTPU -#else -import TensorFlowUnittest -#endif -import StdlibUnittest - -var TensorNonTPUTests = TestSuite("TensorNonTPU") - -TensorNonTPUTests.testAllBackends("SliceUpdate") { - var t1 = Tensor([[1, 2, 3], [4, 5, 6]]) - t1[0] = Tensor(zeros: [3]) - expectEqual(ShapedArray(shape:[2, 3], scalars: [0, 0, 0, 4, 5, 6]), t1.array) - var t2 = t1 - t2[0][2] = Tensor(3) - expectEqual(ShapedArray(shape:[2, 3], scalars: [0, 0, 3, 4, 5, 6]), t2.array) - var t3 = Tensor([[true, true, true], [false, false, false]]) - t3[0][1] = Tensor(false) - expectEqual(ShapedArray(shape:[2, 3], - scalars: [true, false, true, false, false, false]), - t3.array) - var t4 = Tensor([[true, true, true], [false, false, false]]) - t4[0] = Tensor(repeating: false, shape: [3]) - expectEqual(ShapedArray(repeating: false, shape: [2, 3]), t4.array) -} - -TensorNonTPUTests.testAllBackends("BroadcastTensor") { - // 1 -> 2 x 3 x 4 - let one = Tensor(1) - var target = Tensor(repeating: 0.0, shape: [2, 3, 4]) - let broadcasted = one.broadcast(like: target) - expectEqual(Tensor(repeating: 1, shape: [2, 3, 4]), broadcasted) - target .= Tensor(repeating: 1, shape: [1, 3, 1]) - expectEqual(Tensor(repeating: 1, shape: [2, 3, 4]), target) -} - -runAllTests() From d98808405f4dbae48ca899c95f18378f5381b005 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 17:39:22 -0400 Subject: [PATCH 12/30] Brought back the tensor APItests. --- test/TensorFlowRuntime/tensor.swift | 757 ++++++++++++++++++++++++ test/TensorFlowRuntime/tensor_api.swift | 47 ++ 2 files changed, 804 insertions(+) create mode 100644 test/TensorFlowRuntime/tensor_api.swift diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index 1a1e802961a0e..e67a2c3364d8f 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -19,6 +19,536 @@ import TensorFlowUnittest #endif import StdlibUnittest +var TensorTests = TestSuite("Tensor") + +TensorTests.testAllBackends("Initializers") { + let scalar = Tensor(1) + let matrix: Tensor = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] + let broadcastScalar = Tensor(broadcasting: 10, rank: 3) + let some4d = Tensor(shape: [2, 1, 2, 1], + scalars: AnyRandomAccessCollection([2, 3, 4, 5])) + expectEqual(ShapedArray(shape: [2, 1, 2, 1], scalars: [2, 3, 4, 5]), + some4d.array) + expectEqual(ShapedArray(shape: [], scalars: [1]), scalar.array) + expectEqual(ShapedArray(shape: [2, 3], scalars: [1, 2, 3, 4, 5, 6]), + matrix.array) + expectEqual(ShapedArray(shape: [1, 1, 1], scalars: [10]), + broadcastScalar.array) +} + +TensorTests.testAllBackends("FactoryInitializers") { + let x = Tensor(ones: [1, 10]) + expectEqual(ShapedArray(repeating: 1, shape: [1, 10]), x.array) +} + +TensorTests.testAllBackends("NumericInitializers") { + let x = Tensor(oneHotAtIndices: [0, 2, -1, 1], depth: 3) + expectEqual(ShapedArray(shape: [4, 3], scalars: [1, 0, 0, + 0, 0, 1, + 0, 0, 0, + 0, 1, 0]), + x.array) +} + +TensorTests.testAllBackends("ScalarToTensorConversion") { + let tensor = Tensor(broadcasting: 42, rank: 4) + expectEqual([1, 1, 1, 1], tensor.shape) + expectEqual([42], tensor.scalars) +} + +TensorTests.testAllBackends("ArrayConversion") { + let array3D = ShapedArray(repeating: 1.0, shape: [2, 3, 4]) + let tensor3D = Tensor(array3D) + expectEqual(array3D, tensor3D.array) +} + +TensorTests.testAllBackends("DataTypeCast_NonTPU") { + // TPU does not support Int8 or 16 casting. + guard !_RuntimeConfig.executionMode.isTPU else { return } + + let x = Tensor(ones: [5, 5]) + let ints = Tensor(x) + let floats = Tensor(x) + let i8s = Tensor(floats) + expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), ints.array) + expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), floats.array) + expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), i8s.array) +} + +TensorTests.testAllBackends("DataTypeCast_TPU") { + // Non-TPU mode (e.g. eager) does not support Uint32 casting. + guard _RuntimeConfig.executionMode.isTPU else { return } + + let x = Tensor(ones: [5, 5]) + let ints = Tensor(x) + let floats = Tensor(x) + let u32s = Tensor(floats) + expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), ints.array) + expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), floats.array) + expectEqual(ShapedArray(repeating: 1, shape: [5, 5]), u32s.array) +} + +TensorTests.testAllBackends("BoolToNumericCast_NonTPU") { + // TPU does not support Int8 or 16 casting. + // + // When changing to UInt32, got another TPU/XLA compilation error when + // converting from bools to Uint32 (different from missing kernel error). + if _RuntimeConfig.executionMode.isTPU { return } + + let bools = Tensor(shape: [2, 2], scalars: [true, false, true, false]) + let ints = Tensor(bools) + let floats = Tensor(bools) + let i8s = Tensor(bools) + expectEqual(ShapedArray(shape: [2, 2], scalars: [1, 0, 1, 0]), ints.array) + expectEqual(ShapedArray(shape: [2, 2], scalars: [1, 0, 1, 0]), floats.array) + expectEqual(ShapedArray(shape: [2, 2], scalars: [1, 0, 1, 0]), i8s.array) +} + +TensorTests.testAllBackends("ElementIndexing") { + // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly + // until send and receive are implemented (without writing a bunch of mini + // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy + // and the ShapedArray is tested. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let element2D = tensor3D[2] + let element1D = tensor3D[1][3] + let element0D = tensor3D[2][0][3] + + let array2D = element2D.array + let array1D = element1D.array + let array0D = element0D.array + + /// Test shapes + expectEqual([4, 5], array2D.shape) + expectEqual([5], array1D.shape) + expectEqual([], array0D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) + expectEqual([43], array0D.scalars) +} + +TensorTests.testAllBackends("ElementIndexingAssignment") { + // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly + // until send and receive are implemented (without writing a bunch of mini + // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy + // and the ShapedArray is tested. + var tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2] = Tensor(shape: [4, 5], + scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let element2D = tensor3D[2] + let element1D = tensor3D[1][3] + let element0D = tensor3D[2][0][3] + + let array2D = element2D.array + let array1D = element1D.array + let array0D = element0D.array + + /// Test shapes + expectEqual([4, 5], array2D.shape) + expectEqual([5], array1D.shape) + expectEqual([], array0D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) + expectEqual([23], array0D.scalars) +} + +TensorTests.testAllBackends("NestedElementIndexing") { + // NOTE: This test could use a clearer name, along with other "indexing" + // tests. Note to update corresponding test names in other files + // (shaped_array.test) as well. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let element1D = tensor3D[1, 3] + let element0D = tensor3D[2, 0, 3] + + let array1D = element1D.array + let array0D = element0D.array + + /// Test shapes + expectEqual([5], array1D.shape) + expectEqual([], array0D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) + expectEqual([43], array0D.scalars) +} + +TensorTests.testAllBackends("SliceIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("SliceIndexingAssignment") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, 0..<5, 0..<6] = Tensor( + shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("EllipsisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, TensorRange.ellipsis] = Tensor( + shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let slice3D = tensor3D[2..., TensorRange.ellipsis] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("NewAxisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let newAxis = TensorRange.newAxis + let ellipsis = TensorRange.ellipsis + let slice3D = tensor3D[2..., newAxis, ellipsis] + let slice2D = tensor3D[1, newAxis][0..<1, 0..<2] + let slice1D = tensor3D[0][newAxis, 0][0..<1, 3..<5, newAxis] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 1, 4, 5], array3D.shape) + expectEqual([1, 2, 5], array2D.shape) + expectEqual([1, 2, 1], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("SqueezeAxisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let newAxis = TensorRange.newAxis + let ellipsis = TensorRange.ellipsis + let squeezeAxis = TensorRange.squeezeAxis + let slice3D = tensor3D[2..., newAxis, ellipsis][squeezeAxis, squeezeAxis] + let slice2D = tensor3D[1, newAxis][squeezeAxis, 0..<2] + let slice1D = tensor3D[0..<1, 0, 3..<5, newAxis][ + squeezeAxis, ellipsis, squeezeAxis] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("StridedSliceIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<3..2] + let slice1D = tensor3D[0][0][1..<5..2] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual( + Array(stride(from: 20.0, to: 25, by: 1)) + + Array(stride(from: 30.0, to: 35, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 1.0, to: 5, by: 2)), array1D.scalars) +} + +TensorTests.testAllBackends("StridedSliceIndexingAssignment") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, 0..<5..2, 0..<6] = Tensor( + shape: [2, 5], scalars: Array(stride(from: 20.0, to: 40, by: 2))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual( + Array(stride(from: 20.0, to: 30, by: 2)) + + Array(stride(from: 45.0, to: 50, by: 1)) + + Array(stride(from: 30.0, to: 40, by: 2)) + + Array(stride(from: 55.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.test("WholeTensorSlicing") { + let t: Tensor = [[[1, 1, 1], [2, 2, 2]], + [[3, 3, 3], [4, 4, 4]], + [[5, 5, 5], [6, 6, 6]]] + let slice2 = t.slice(lowerBounds: [1, 0, 0], upperBounds: [2, 1, 3]) + expectEqual(ShapedArray(shape: [1, 1, 3], scalars: [3, 3, 3]), + slice2.array) +} + +TensorTests.testAllBackends("AdvancedIndexing") { + // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly + // until send and receive are implemented (without writing a bunch of mini + // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy + // and the ShapedArray is tested. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let element2D = tensor3D[1..<3, 0, 3...] + let array2D = element2D.array + + // Test shape + expectEqual([2, 2], array2D.shape) + + // Test scalars + expectEqual(Array([23.0, 24.0, 43.0, 44.0]), array2D.scalars) +} + +TensorTests.testAllBackends("Reduction") { + // TODO(b/111815968): triage and fix this TPU issue + #if !TPU + // 2 x 5 + let x = Tensor([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]) + expectEqual(Tensor(30), x.sum().toHost(shape: [])) + expectEqual(Tensor(shape: [5], scalars: [2, 4, 6, 8, 10]), + x.sum(squeezingAxes: 0).toHost(shape: [])) + expectEqual(Tensor(shape: [1, 5], scalars: [2, 4, 6, 8, 10]), + x.sum(alongAxes: 0).toHost(shape: [])) + + expectEqual(Tensor(14400), x.product().toHost(shape: [])) + expectEqual(Tensor(shape: [5], scalars: [1, 4, 9, 16, 25]), + x.product(squeezingAxes: 0).toHost(shape: [])) + expectEqual(Tensor(shape: [1, 5], scalars: [1, 4, 9, 16, 25]), + x.product(alongAxes: 0).toHost(shape: [])) + + expectEqual(Tensor(3), x.mean().toHost(shape: [])) + expectEqual(Tensor(shape: [5], scalars: [1, 2, 3, 4, 5]), + x.mean(squeezingAxes: 0).toHost(shape: [])) + expectEqual(Tensor(shape: [5], scalars: [1, 2, 3, 4, 5]), + x.mean(alongAxes: 0).toHost(shape: [])) + expectEqual(Tensor(shape: [2], scalars: [3, 3]), + x.mean(squeezingAxes: 1).toHost(shape: [])) + expectEqual(Tensor(shape: [1, 2], scalars: [3, 3]), + x.mean(alongAxes: 1).toHost(shape: [])) + + expectEqual(Tensor(2), x.variance().toHost(shape: [])) + expectEqual(Tensor(shape: [5], scalars: [0, 0, 0, 0, 0]), + x.variance(squeezingAxes: 0).toHost(shape: [])) + expectEqual(Tensor(shape: [5], scalars: [0, 0, 0, 0, 0]), + x.variance(alongAxes: 0).toHost(shape: [])) + expectEqual(Tensor(shape: [2], scalars: [2, 2]), + x.variance(squeezingAxes: 1).toHost(shape: [])) + expectEqual(Tensor(shape: [1, 2], scalars: [2, 2]), + x.variance(alongAxes: 1).toHost(shape: [])) + #endif // !TPU +} + +TensorTests.testAllBackends("Concatenation") { + // 2 x 3 + let t1 = Tensor([[0, 1, 2], [3, 4, 5]]) + // 2 x 3 + let t2 = Tensor([[6, 7, 8], [9, 10, 11]]) + let concatenated = t1 ++ t2 + let concatenated0 = t1.concatenated(with: t2) + let concatenated1 = t1.concatenated(with: t2, alongAxis: 1) + expectEqual(ShapedArray(shape: [4, 3], scalars: Array(0..<12)), + concatenated.array) + expectEqual(ShapedArray(shape: [4, 3], scalars: Array(0..<12)), + concatenated0.array) + expectEqual(ShapedArray(shape: [2, 6], + scalars: [0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11]), + concatenated1.array) +} + +TensorTests.testAllBackends("VJPConcatenation") { + let a1 = Tensor([1,2,3,4]) + let b1 = Tensor([5,6,7,8,9,10]) + + let a2 = Tensor([1,1,1,1]) + let b2 = Tensor([1,1,1,1,1,1]) + + let grads = gradient(at: a2, b2) { a, b in + return ((a1 * a) ++ (b1 * b)).sum() + } + + expectEqual(a1, grads.0) + expectEqual(b1, grads.1) +} + +TensorTests.testAllBackends("VJPConcatenationNegativeAxis") { + let a1 = Tensor([1,2,3,4]) + let b1 = Tensor([5,6,7,8,9,10]) + + let a2 = Tensor([1,1,1,1]) + let b2 = Tensor([1,1,1,1,1,1]) + + let grads = gradient(at: a2, b2) { a, b in + return (a1 * a).concatenated(with: b1 * b, alongAxis: -1).sum() + } + + expectEqual(a1, grads.0) + expectEqual(b1, grads.1) +} + +TensorTests.test("EwiseComparison") { + let x = Tensor([0, 1, 2]) + let y = Tensor([2, 1, 3]) + expectEqual((x .< y).scalars, [true, false, true]) +} + +TensorTests.test("LexicographicalComparison") { + let x = Tensor([0, 1, 2, 3, 4]) + let y = Tensor([2, 3, 4, 5, 6]) + expectTrue(x < y) +} + +TensorTests.testAllBackends("ArgMax") { + // 2 x 3 + let x = Tensor([[0, 1, 2], [3, 4, 5]]) + let argmax0 = x.argmax(squeezingAxis: 0) + let argmax1 = x.argmax(squeezingAxis: 1) + let scalarsArgmax = x.argmax() + expectEqual(ShapedArray(shape: [3], scalars: [1, 1, 1]), argmax0.array) + expectEqual(ShapedArray(shape: [2], scalars: [2, 2]), argmax1.array) + expectEqual(ShapedArray(shape: [], scalars: [5]), scalarsArgmax.array) +} + +TensorTests.testAllBackends("CeilFloor") { + let x = Tensor([-1.3, -0.4, 0.5, 1.6]) + let xFloor = floor(x) + let xCeil = ceil(x) + expectEqual(ShapedArray(shape: [4], scalars: [-2, -1, 0, 1]), xFloor.array) + expectEqual(ShapedArray(shape: [4], scalars: [-1, 0, 1, 2]), xCeil.array) +} + +TensorTests.testAllBackends("SimpleMath") { + let x = Tensor([1.2, 1.2]) + let y = tanh(x) + let array = y.array + expectEqual([2], array.shape) + expectPointwiseNearlyEqual([0.833655, 0.833655], array.scalars, + byError: 0.0001) +} + +TensorTests.testAllBackends("StandardDeviation") { + expectEqual(Tensor(0), Tensor([1]).standardDeviation()) + expectEqual(Tensor(0.5), Tensor([0, 1]).standardDeviation(alongAxes: 0)) + expectEqual(Tensor(0.5), Tensor([0, 1]).standardDeviation()) + expectNearlyEqual( + 2.87228132, + Tensor(rangeFrom: 0, to: 10, stride: 1).standardDeviation().scalarized(), + byError: 0.001) + let matrix = Tensor(rangeFrom: 0, to: 10, stride: 1).reshaped(to: [2, 5]) + expectNearlyEqual(2.87228132, + matrix.standardDeviation().scalarized(), + byError: 0.001) + expectPointwiseNearlyEqual( + [1.4142, 1.4142], + matrix.standardDeviation(alongAxes: 1).array.scalars, + byError: 0.001) +} + TensorTests.testAllBackends("ReductionToScalar") { let _: Tensor = [1, 2, 3, 4, 5] // expectEqual(x.mean(), 3) @@ -31,6 +561,233 @@ TensorTests.testAllBackends("ReductionToScalar") { _hostOp(extra) } +TensorTests.testAllBackends("3Adds") { + let a = Tensor([1]) + let b = Tensor([2]) + let c = Tensor([3]) + + let o = a + b + c + expectEqual([6], o.scalars) +} + +TensorTests.testAllBackends("MultiOpMath") { + let x = Tensor([1.2, 1.2]) + let y = Tensor([2.4, 2.4]) + let t1 = x + y + let t2 = t1 * t1 + let t3 = sqrt(t2) + + let array1 = t1.array + let array2 = t2.array + let array3 = t3.array + expectEqual([2], array1.shape) + expectEqual([2], array2.shape) + expectEqual([2], array3.shape) + expectPointwiseNearlyEqual([3.6, 3.6], array1.scalars) + expectPointwiseNearlyEqual([12.96, 12.96], array2.scalars) + expectPointwiseNearlyEqual([3.6, 3.6], array3.scalars) +} + +TensorTests.testAllBackends("XWPlusB") { + // Shape: 1 x 4 + let x = Tensor([[1.0, 2.0, 2.0, 1.0]]) + // Shape: 4 x 2 + let w = Tensor([[1.0, 0.0], [3.0, 0.0], [2.0, 3.0], [1.0, 0.0]]) + // Shape: 2 + let b = Tensor([0.5, 0.5]) + // Shape: 1 x 2 (broadcasted) + let result = matmul(x, w) + b + expectEqual([1, 2], result.shape) + expectEqual([12.5, 6.5], result.scalars) +} + +TensorTests.testAllBackends("Transpose") { + // 3 x 2 -> 2 x 3 + let xT = Tensor([[1, 2], [3, 4], [5, 6]]).transposed() + let xTArray = xT.array + expectEqual(2, xTArray.rank) + expectEqual([2, 3], xTArray.shape) + expectEqual([1, 3, 5, 2, 4, 6], xTArray.scalars) +} + +TensorTests.testAllBackends("SimpleCond") { + func selectValue(_ pred: Bool) -> Tensor { + let a = Tensor(0) + let b = Tensor(1) + if pred { + return a + } + return b + } + + expectEqual(0, selectValue(true).scalar) +} + +@inline(never) +func testXORInference() { + func xor(_ x: Float, _ y: Float) -> Float { + let x = Tensor([x, y]).reshaped(to: [1, 2]) + + // FIXME: If params are declared outside of `xor`, it would crash. + // 2 x 4 + let w1 = Tensor( + [[-1.83586664, -0.20809225, 0.47667537, 1.90780607], + [-1.83523219, -0.51167348, 0.15490439, 1.91018065]]) + // 1 x 4 + let b1 = Tensor( + [[2.54353216, 0.25132703, -0.16503136, -0.85754058]]) + // 4 x 1 + let w2 = Tensor( + [[3.04350065], [0.35590511], [-0.3252157], [3.49349223]]) + // 1 x 1 + let b2 = Tensor([[-0.74635993]]) + + let o1 = tanh(matmul(x, w1) + b1) + let y = tanh(matmul(o1, w2) + b2) + return y.array.scalars[0] // TODO: use better scalar getter + } + expectNearlyEqual(0.0, xor(0.0, 0.0), byError: 0.1) + expectNearlyEqual(1.0, xor(0.0, 1.0), byError: 0.1) + expectNearlyEqual(1.0, xor(1.0, 0.0), byError: 0.1) + expectNearlyEqual(0.0, xor(1.0, 1.0), byError: 0.1) +} +TensorTests.testAllBackends("XORInference", testXORInference) + +TensorTests.testAllBackends("MLPClassifierStruct") { + struct MLPClassifier { + // 2 x 4 + var w1 = Tensor([[1.0, 0.8, 0.4, 0.4], + [0.4, 0.3, 0.2, 0.1]]) + // 4 x 1 + var w2 = Tensor([[0.4], [0.4], [0.3], [0.9]]) + var b1 = Tensor(zeros: [1, 4]) + var b2 = Tensor(zeros: [1, 1]) + + func prediction(for x: Tensor) -> Tensor { + let o1 = tanh(matmul(x, w1) + b1) + return tanh(matmul(o1, w2) + b2) + } + } + let input = Tensor([[1, 0.5]]) + let classifier = MLPClassifier() + let prediction = classifier.prediction(for: input) + expectPointwiseNearlyEqual([0.816997], prediction.scalars) +} + +TensorTests.testAllBackends("Reshape") { + // 2 x 3 -> 1 x 3 x 1 x 2 x 1 + let matrix = Tensor([[0, 1, 2], [3, 4, 5]]) + let reshaped = matrix.reshaped(to: [1, 3, 1, 2, 1]) + + expectEqual([1, 3, 1, 2, 1], reshaped.shape) + expectEqual(Array(0..<6), reshaped.scalars) +} + +TensorTests.testAllBackends("Flatten") { + // 2 x 3 -> 6 + let matrix = Tensor([[0, 1, 2], [3, 4, 5]]) + let flattened = matrix.flattened() + + expectEqual([6], flattened.shape) + expectEqual(Array(0..<6), flattened.scalars) +} + +TensorTests.testAllBackends("Flatten0D") { + let scalar = Tensor(5) + let flattened = scalar.flattened() + expectEqual([1], flattened.shape) + expectEqual([5], flattened.scalars) +} + +TensorTests.testAllBackends("ReshapeToScalar") { + // 1 x 1 -> scalar + let z = Tensor([[10]]).reshaped(to: []) + expectEqual([], z.shape) +} + +TensorTests.testAllBackends("ReshapeTensor") { + // 2 x 3 -> 1 x 3 x 1 x 2 x 1 + let x = Tensor(repeating: 0.0, shape: [2, 3]) + let y = Tensor(repeating: 0.0, shape: [1, 3, 1, 2, 1]) + let result = x.reshaped(like: y) + expectEqual([1, 3, 1, 2, 1], result.shape) +} + +TensorTests.testAllBackends("Unbroadcast1") { + let x = Tensor(repeating: 1, shape: [2, 3, 4, 5]) + let y = Tensor(repeating: 1, shape: [4, 5]) + let z = x.unbroadcast(like: y) + expectEqual(ShapedArray(repeating: 6, shape: [4, 5]), + z.array) +} + +TensorTests.testAllBackends("Unbroadcast2") { + let x = Tensor(repeating: 1, shape: [2, 3, 4, 5]) + let y = Tensor(repeating: 1, shape: [3, 1, 5]) + let z = x.unbroadcast(like: y) + expectEqual(ShapedArray(repeating: 8, shape: [3, 1, 5]), + z.array) +} + +// TODO: Merge all rank/shape getter tests into one when we support code motion +// to avoid sends. + +@inline(never) +func testRankGetter() { + let tensor = Tensor(shape: [3, 4, 5], scalars: Array(0..<60)) + expectEqual(3, tensor.rank) +} +TensorTests.testAllBackends("RankGetter", testRankGetter) + +@inline(never) +func testRankGetter2() { + let vector = Tensor([1]) + expectEqual(1, vector.rank) +} +TensorTests.testAllBackends("RankGetter2", testRankGetter2) + +@inline(never) +func testRankGetter3() { + let matrix = Tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + expectEqual(2, matrix.rank) +} +TensorTests.testAllBackends("RankGetter3", testRankGetter3) + +@inline(never) +func testRankGetter4() { + let ones = Tensor(ones: [1, 2, 2, 2, 2, 2, 1]) + expectEqual(7, ones.rank) +} +TensorTests.testAllBackends("RankGetter4", testRankGetter4) + +@inline(never) +func testShapeGetter() { + let tensor = Tensor(shape: [3, 4, 5], scalars: Array(0..<60)) + expectEqual([3, 4, 5], tensor.shape) +} +TensorTests.testAllBackends("ShapeGetter", testShapeGetter) + +@inline(never) +func testShapeGetter2() { + let vector = Tensor([1]) + expectEqual([1], vector.shape) +} +TensorTests.testAllBackends("ShapeGetter2", testShapeGetter2) + +@inline(never) +func testShapeGetter3() { + let matrix = Tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + expectEqual([2, 3], matrix.shape) +} +TensorTests.testAllBackends("ShapeGetter3", testShapeGetter3) + +@inline(never) +func testShapeGetter4() { + let ones = Tensor(ones: [1, 2, 2, 2, 2, 2, 1]) + expectEqual([1, 2, 2, 2, 2, 2, 1], ones.shape) +} +TensorTests.testAllBackends("ShapeGetter4", testShapeGetter4) + // For now it is sufficient to run remote tests with test cases in this file // only. When creating new test files, consider simply calling runAllTests(). #if CUDA diff --git a/test/TensorFlowRuntime/tensor_api.swift b/test/TensorFlowRuntime/tensor_api.swift new file mode 100644 index 0000000000000..be77a0075cfbd --- /dev/null +++ b/test/TensorFlowRuntime/tensor_api.swift @@ -0,0 +1,47 @@ +// RUN: %target-run-eager-swift %swift-tensorflow-test-run-extra-options +// SR-9737: hanging tests in GPE GPU mode +// UN: %target-run-gpe-swift %swift-tensorflow-test-run-extra-options +// REQUIRES: executable_test +// REQUIRES: swift_test_mode_optimize +// +// This test suite is for tensor API and has been created because tensor.swift +// has static shape restrictions for TPU send/receive. Until the restriction is +// resolved, API tests that incur send/receive should reside here.“ +import TensorFlow +#if TPU +import TensorFlowUnittestTPU +#else +import TensorFlowUnittest +#endif +import StdlibUnittest + +var TensorNonTPUTests = TestSuite("TensorNonTPU") + +TensorNonTPUTests.testAllBackends("SliceUpdate") { + var t1 = Tensor([[1, 2, 3], [4, 5, 6]]) + t1[0] = Tensor(zeros: [3]) + expectEqual(ShapedArray(shape:[2, 3], scalars: [0, 0, 0, 4, 5, 6]), t1.array) + var t2 = t1 + t2[0][2] = Tensor(3) + expectEqual(ShapedArray(shape:[2, 3], scalars: [0, 0, 3, 4, 5, 6]), t2.array) + var t3 = Tensor([[true, true, true], [false, false, false]]) + t3[0][1] = Tensor(false) + expectEqual(ShapedArray(shape:[2, 3], + scalars: [true, false, true, false, false, false]), + t3.array) + var t4 = Tensor([[true, true, true], [false, false, false]]) + t4[0] = Tensor(repeating: false, shape: [3]) + expectEqual(ShapedArray(repeating: false, shape: [2, 3]), t4.array) +} + +TensorNonTPUTests.testAllBackends("BroadcastTensor") { + // 1 -> 2 x 3 x 4 + let one = Tensor(1) + var target = Tensor(repeating: 0.0, shape: [2, 3, 4]) + let broadcasted = one.broadcast(like: target) + expectEqual(Tensor(repeating: 1, shape: [2, 3, 4]), broadcasted) + target .= Tensor(repeating: 1, shape: [1, 3, 1]) + expectEqual(Tensor(repeating: 1, shape: [2, 3, 4]), target) +} + +runAllTests() \ No newline at end of file From a238b6e96d0a115704eb81143cca5980fa1c27a8 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 19:00:23 -0400 Subject: [PATCH 13/30] Added support for the TensorFlow op. --- stdlib/public/TensorFlow/ArrayOps.swift | 39 +++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/stdlib/public/TensorFlow/ArrayOps.swift b/stdlib/public/TensorFlow/ArrayOps.swift index 619d2ad539171..daa87ef4f1646 100644 --- a/stdlib/public/TensorFlow/ArrayOps.swift +++ b/stdlib/public/TensorFlow/ArrayOps.swift @@ -196,4 +196,43 @@ public extension Raw { } return out } + + /// Unpacks a tensor into `numSplit` tensors along one dimension. + /// + /// - Parameters: + /// - value: The tensor to unpack. + /// - num: The number of tensors to unpack. + /// - axis: The dimension along which to unpack. Must be in the range + /// `[-rank(value), rank(value))`. + /// + /// - Returns: Tensors whose shape matches that of `value` without the `axis` dimension. + @inlinable @inline(__always) + static func unpack( + value: Tensor, + num: Int64, + axis: Int64 + ) -> [Tensor] { + let s: CTFStatus = TF_NewStatus() + defer { TF_DeleteStatus(s) } + let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "Unpack", s) + defer { TFE_DeleteOp(op) } + let _ = _TFCOpAddInputFromTensorGroup(op, value, s) + TFE_OpSetAttrInt(op, "num", num) + TFE_OpSetAttrType(op, "T", T.tensorFlowDataType._cDataType) + TFE_OpSetAttrType(op, "axis", axis) + var count: Int32 = Int32(num) + let buffer: UnsafeMutablePointer = + UnsafeMutablePointer.allocate(capacity: Int(count)) + defer { buffer.deallocate() } + _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) + checkOk(s) + + var out: [Tensor] = [] + var cursor = buffer + for _ in 0..(handle: TensorHandle(_owning: cursor.pointee))) + cursor = cursor.advanced(by: 1) + } + return out + } } From d759e24ca40c4436b51fc94ce63b08bdad04e640 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 19:46:16 -0400 Subject: [PATCH 14/30] Bug fix. --- stdlib/public/TensorFlow/ArrayOps.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/public/TensorFlow/ArrayOps.swift b/stdlib/public/TensorFlow/ArrayOps.swift index daa87ef4f1646..41794f67f4b7e 100644 --- a/stdlib/public/TensorFlow/ArrayOps.swift +++ b/stdlib/public/TensorFlow/ArrayOps.swift @@ -219,7 +219,7 @@ public extension Raw { let _ = _TFCOpAddInputFromTensorGroup(op, value, s) TFE_OpSetAttrInt(op, "num", num) TFE_OpSetAttrType(op, "T", T.tensorFlowDataType._cDataType) - TFE_OpSetAttrType(op, "axis", axis) + TFE_OpSetAttrInt(op, "axis", axis) var count: Int32 = Int32(num) let buffer: UnsafeMutablePointer = UnsafeMutablePointer.allocate(capacity: Int(count)) From 944d7f6c22d412f854ac76a428f5329e3c92b904 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 19:53:51 -0400 Subject: [PATCH 15/30] Bug fix. --- stdlib/public/TensorFlow/ArrayOps.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/public/TensorFlow/ArrayOps.swift b/stdlib/public/TensorFlow/ArrayOps.swift index 41794f67f4b7e..28db3f4d150e0 100644 --- a/stdlib/public/TensorFlow/ArrayOps.swift +++ b/stdlib/public/TensorFlow/ArrayOps.swift @@ -229,7 +229,7 @@ public extension Raw { var out: [Tensor] = [] var cursor = buffer - for _ in 0..(handle: TensorHandle(_owning: cursor.pointee))) cursor = cursor.advanced(by: 1) } From 12ec483229f4e19588640d48ea8eaf2bbd538f2c Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sat, 20 Apr 2019 22:59:52 -0400 Subject: [PATCH 16/30] Updated the swift-apis dependency. --- utils/update_checkout/update-checkout-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/update_checkout/update-checkout-config.json b/utils/update_checkout/update-checkout-config.json index 48499c141edec..17e2753231972 100644 --- a/utils/update_checkout/update-checkout-config.json +++ b/utils/update_checkout/update-checkout-config.json @@ -242,7 +242,7 @@ "icu": "release-61-1", "tensorflow": "447e512d332ab86172a3b13119900b4d021d0c65", "tensorflow-swift-bindings": "a7ccb727514414d31df9e403f34fa923bdf6a519", - "tensorflow-swift-apis": "cc4abe206dce7f47fabebfcf28effdcc2ad5992b" + "tensorflow-swift-apis": "d2c78f4c323f223ea79e7bf2a035f71edcd42824" } } } From 993c9724e409636b31b88edf7e30038178aa51fb Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sun, 21 Apr 2019 10:34:42 -0400 Subject: [PATCH 17/30] Minor edit. --- test/TensorFlowRuntime/tensor_api.swift | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/TensorFlowRuntime/tensor_api.swift b/test/TensorFlowRuntime/tensor_api.swift index be77a0075cfbd..5fd08138b1637 100644 --- a/test/TensorFlowRuntime/tensor_api.swift +++ b/test/TensorFlowRuntime/tensor_api.swift @@ -1,12 +1,15 @@ // RUN: %target-run-eager-swift %swift-tensorflow-test-run-extra-options + // SR-9737: hanging tests in GPE GPU mode // UN: %target-run-gpe-swift %swift-tensorflow-test-run-extra-options + // REQUIRES: executable_test // REQUIRES: swift_test_mode_optimize // // This test suite is for tensor API and has been created because tensor.swift // has static shape restrictions for TPU send/receive. Until the restriction is // resolved, API tests that incur send/receive should reside here.“ + import TensorFlow #if TPU import TensorFlowUnittestTPU @@ -44,4 +47,4 @@ TensorNonTPUTests.testAllBackends("BroadcastTensor") { expectEqual(Tensor(repeating: 1, shape: [2, 3, 4]), target) } -runAllTests() \ No newline at end of file +runAllTests() From aa72c700319960b9e8801fdc0a48b4ea569b5731 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Sun, 21 Apr 2019 18:08:27 -0400 Subject: [PATCH 18/30] Added support for 'Dataset.repeated(count:)' since '#tfop' does not work currently from outside stdlib. --- stdlib/public/TensorFlow/Dataset.swift | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stdlib/public/TensorFlow/Dataset.swift b/stdlib/public/TensorFlow/Dataset.swift index 568f386d00f42..5560f4a64814d 100644 --- a/stdlib/public/TensorFlow/Dataset.swift +++ b/stdlib/public/TensorFlow/Dataset.swift @@ -154,6 +154,15 @@ public extension Dataset { ) ) } + + @inlinable @inline(__always) + func repeated(count: Int? = nil) -> Dataset { + return Dataset( + _handle: #tfop( + "RepeatDataset", _handle, Tensor(Int64(count ?? -1)), + output_types$dtype: Element._typeList, + output_shapes: Element._unknownShapeList)) + } } /// The type that allows iteration over a dataset's elements. From 4f8c2ca8b1fae0c4aef1c750b9a77d622fe1c8eb Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Mon, 22 Apr 2019 10:28:13 -0400 Subject: [PATCH 19/30] Added support for prefetched datasets. --- stdlib/public/TensorFlow/Dataset.swift | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/stdlib/public/TensorFlow/Dataset.swift b/stdlib/public/TensorFlow/Dataset.swift index 5560f4a64814d..97a0b5aea5b00 100644 --- a/stdlib/public/TensorFlow/Dataset.swift +++ b/stdlib/public/TensorFlow/Dataset.swift @@ -130,6 +130,17 @@ public extension Dataset { } public extension Dataset { + @inlinable @inline(__always) + func prefetched(count: Int) -> Dataset { + return Dataset( + _handle: #tfop( + "PrefetchDataset", _handle, Tensor(Int64(count)), + output_types$dtype: Element._typeList, + output_shapes: Element._unknownShapeList + ) + ) + } + @inlinable @inline(__always) func shuffled( sampleCount: Int, randomSeed: Int64 From 98e5704f719e137d1f62490996ba39e57fd47e37 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 23 Apr 2019 17:02:55 -0400 Subject: [PATCH 20/30] Moved the dataset ops to swift-apis. --- stdlib/public/TensorFlow/Dataset.swift | 231 ------------------------- 1 file changed, 231 deletions(-) delete mode 100644 stdlib/public/TensorFlow/Dataset.swift diff --git a/stdlib/public/TensorFlow/Dataset.swift b/stdlib/public/TensorFlow/Dataset.swift deleted file mode 100644 index 97a0b5aea5b00..0000000000000 --- a/stdlib/public/TensorFlow/Dataset.swift +++ /dev/null @@ -1,231 +0,0 @@ -//===-- Dataset.swift -----------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// The dataset API. -// -//===----------------------------------------------------------------------===// - -/// The default graph seed. -/// -/// - Note: See TensorFlow's `python.framework.random_seed.DEFAULT_GRAPH_SEED`. -@usableFromInline let _defaultGraphSeed: Int64 = 87654321 - -/// Returns the local seeds an operation should use given an op-specific seed. -/// -/// Given operation-specific seed, `seed`, this helper function returns two -/// seeds derived from graph-level and op-level seeds. Many random operations -/// internally use the two seeds to allow user to change the seed globally for a -/// graph, or for only specific operations. -/// -/// - Note: See TensorFlow's `python.framework.random_seed.get_seed`. -/// -// TODO: There's no support for TF's "global seed" yet, so we always use the -// default graph seed as the first seed. Need to investigate the best way to -// model TF's "global seed". -@usableFromInline @inline(__always) -func _tensorSeeds(_ seed: Tensor) -> (Tensor, Tensor) { - return (Tensor(_defaultGraphSeed), seed) -} - -//===----------------------------------------------------------------------===// -// Single value dataset -//===----------------------------------------------------------------------===// - -/// Represents a potentially large set of elements. -/// -/// A `Dataset` can be used to represent an input pipeline as a collection of -/// element tensors. -@_fixed_layout -public struct Dataset { - public let _handle: VariantHandle - - @inlinable @inline(__always) - public init(_handle: VariantHandle) { - self._handle = _handle - } -} - -public extension Dataset { - @inlinable @inline(__always) - init(randomSeed: Int64) { - let (seed1, seed2) = _tensorSeeds(Tensor(randomSeed)) - self.init( - _handle: #tfop("RandomDataset", seed1, seed2, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) - ) - } -} - -public extension Dataset { - /// Creates a dataset from a batch of elements as a tensor. - @inlinable @inline(__always) - init(elements: Element) { - // A dataset creation op only runs on TF CPU. - self.init( - _handle: #tfop( - "TensorSliceDataset", [elements], - Toutput_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } -} - -extension Dataset : Sequence { - public typealias Iterator = DatasetIterator - - /// Returns an iterator over the elements of this dataset. - @inlinable @inline(__always) - public func makeIterator() -> DatasetIterator { - let resource: ResourceHandle = - #tfop("AnonymousIterator", output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) - #tfop("MakeIterator", _handle, resource) as Void - return DatasetIterator(_handle: resource) - } -} - -public extension Dataset { - // Note that this Dataset API implementation uses an experimental tracing - // feature, which is not robust and does not have great diagnostics yet. - @inlinable @inline(__always) - func map( - _ transform: (Element) -> ResultElement - ) -> Dataset { - return Dataset( - _handle: #tfop( - "MapDataset", _handle, [Tensor(0)], - f$func: _tffunc(transform), - Targuments$dtype: [Int32.tensorFlowDataType], - output_types$dtype: ResultElement._typeList, - output_shapes: ResultElement._unknownShapeList - ) - ) - } - - @inlinable @inline(__always) - func filter( - _ isIncluded: (Element) -> Tensor - ) -> Dataset { - return Dataset( - _handle: #tfop( - "FilterDataset", _handle, [Tensor(0)], - predicate$func: _tffunc(isIncluded), - Targuments$dtype: [Int32.tensorFlowDataType], - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } -} - -public extension Dataset { - @inlinable @inline(__always) - func prefetched(count: Int) -> Dataset { - return Dataset( - _handle: #tfop( - "PrefetchDataset", _handle, Tensor(Int64(count)), - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } - - @inlinable @inline(__always) - func shuffled( - sampleCount: Int, randomSeed: Int64 - ) -> Dataset { - let (seed1, seed2) = _tensorSeeds(Tensor(randomSeed)) - return Dataset( - _handle: #tfop( - "ShuffleDataset", _handle, Tensor(Int64(sampleCount)), seed1, seed2, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } - - @inlinable @inline(__always) - func batched(_ batchSize: Int) -> Dataset { - return Dataset( - _handle: #tfop( - "BatchDataset", _handle, Tensor(Int64(batchSize)), - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList - ) - ) - } - - @inlinable @inline(__always) - func repeated(count: Int? = nil) -> Dataset { - return Dataset( - _handle: #tfop( - "RepeatDataset", _handle, Tensor(Int64(count ?? -1)), - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList)) - } -} - -/// The type that allows iteration over a dataset's elements. -@_fixed_layout -public struct DatasetIterator { - @usableFromInline let _handle: ResourceHandle - - @usableFromInline @inline(__always) - internal init(_handle: ResourceHandle) { - self._handle = _handle - } -} - -extension DatasetIterator : IteratorProtocol { - /// Advances to the next element and returns it, or `nil` if no next element - /// exists. - @inlinable @inline(__always) - public mutating func next() -> Element? { - let optional: VariantHandle = - #tfop("IteratorGetNextAsOptional", _handle, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) - guard _TFGetScalarOrDie(#tfop("OptionalHasValue", optional)) else { - return nil - } - return #tfop("OptionalGetValue", optional, - output_types$dtype: Element._typeList, - output_shapes: Element._unknownShapeList) as Element - } -} - -/// A 2-tuple-like struct that conforms to TensorGroup that represents a tuple -/// of 2 types conforming to TensorGroup. -@_fixed_layout -public struct Zip2TensorGroup : TensorGroup { - public var first: T - public var second: U - - public init(_ first: T, _ second: U) { - self.first = first - self.second = second - } -} - -// TODO(SR-9156): This does not work in graph mode. -@inlinable @inline(__always) -public func zip( - _ dataset1: Dataset, _ dataset2: Dataset -) -> Dataset> { - let handle: VariantHandle = #tfop( - "ZipDataset", Zip2TensorGroup(dataset1._handle, dataset2._handle), - output_types$dtype: Zip2TensorGroup._typeList, - output_shapes: Zip2TensorGroup._unknownShapeList) - return Dataset(_handle: handle) -} From df0ec40ee29941e34bf25d1b672ddf4dc16acdce Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 23 Apr 2019 17:05:08 -0400 Subject: [PATCH 21/30] Removed the now-redundant 'ArrayOps.swift' file. --- stdlib/public/TensorFlow/ArrayOps.swift | 238 ------------------------ stdlib/public/TensorFlow/CMakeLists.txt | 7 +- 2 files changed, 4 insertions(+), 241 deletions(-) delete mode 100644 stdlib/public/TensorFlow/ArrayOps.swift diff --git a/stdlib/public/TensorFlow/ArrayOps.swift b/stdlib/public/TensorFlow/ArrayOps.swift deleted file mode 100644 index 28db3f4d150e0..0000000000000 --- a/stdlib/public/TensorFlow/ArrayOps.swift +++ /dev/null @@ -1,238 +0,0 @@ -//===-- ArrayOps.swift ----------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains some Array ops that cannot be properly handled by #tfop. -// -// TODO: These should be deleted once we can properly generate raw ops for these. -// -//===----------------------------------------------------------------------===// - -import CTensorFlow - -public extension Raw { - /// Saves tensors in V2 checkpoint format. - /// - /// By default, saves the named tensors in full. If the caller wishes to save - /// specific slices of full tensors, "shape_and_slices" should be non-empty strings - /// and correspondingly well-formed. - /// - /// - Parameters: - /// - prefix: Must have a single element. The prefix of the V2 checkpoint to which we - /// write the tensors. - /// - tensor_names: shape {N}. The names of the tensors to be saved. - /// - shape_and_slices: shape {N}. The slice specs of the tensors to be saved. - /// Empty strings indicate that they are non-partitioned tensors. - /// - tensors: `N` tensors to save. - @inlinable @inline(__always) - static func saveV2( - prefix: StringTensor, - tensorNames: StringTensor, - shapeAndSlices: StringTensor, - tensors: [AnyTensor] - ) { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "SaveV2", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, prefix, s) - let _ = _TFCOpAddInputFromTensorGroup(op, tensorNames, s) - let _ = _TFCOpAddInputFromTensorGroup(op, shapeAndSlices, s) - let _ = _TFCOpAddInputFromAnyTensors(op, tensors, s) - let _ = _TFCOpSetAttrTypeArray(op, "dtypes", tensors.map { $0._tensorFlowDataType }) - return _TFCExecuteOp(op, s) - } - - /// Restores tensors from a V2 checkpoint. - /// - /// For backward compatibility with the V1 format, this Op currently allows - /// restoring from a V1 checkpoint as well: - /// - This Op first attempts to find the V2 index file pointed to by "prefix", and - /// if found proceed to read it as a V2 checkpoint; - /// - Otherwise the V1 read path is invoked. - /// Relying on this behavior is not recommended, as the ability to fall back to read - /// V1 might be deprecated and eventually removed. - /// - /// By default, restores the named tensors in full. If the caller wishes to restore - /// specific slices of stored tensors, "shape_and_slices" should be non-empty - /// strings and correspondingly well-formed. - /// - /// Callers must ensure all the named tensors are indeed stored in the checkpoint. - /// - /// - Parameters: - /// - prefix: Must have a single element. The prefix of a V2 checkpoint. - /// - tensor_names: shape {N}. The names of the tensors to be restored. - /// - shape_and_slices: shape {N}. The slice specs of the tensors to be restored. - /// Empty strings indicate that they are non-partitioned tensors. - /// - /// - Attr dtypes: shape {N}. The list of expected dtype for the tensors. Must match - /// those stored in the checkpoint. - /// - /// - Output tensors: shape {N}. The restored tensors, whose shapes are read from the - /// checkpoint directly. - @inlinable @inline(__always) - static func restoreV2( - prefix: StringTensor, - tensorNames: StringTensor, - shapeAndSlices: StringTensor, - dtypes: [TensorDataType] - ) -> [AnyTensor] { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "RestoreV2", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, prefix, s) - let _ = _TFCOpAddInputFromTensorGroup(op, tensorNames, s) - let _ = _TFCOpAddInputFromTensorGroup(op, shapeAndSlices, s) - let _ = _TFCOpSetAttrTypeArray(op, "dtypes", dtypes) - - var count: Int32 = Int32(dtypes.count) - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) - - var out: [AnyTensor] = [] - var cursor = buffer - for type in dtypes { - out.append(makeTensor(dataType: type, owning: cursor.pointee)) - cursor = cursor.advanced(by: 1) - } - return out - } - - /// Splits a tensor into `numSplit` tensors along one dimension. - /// - /// - Parameters: - /// - splitDim: 0-D. The dimension along which to split. Must be in the range - /// `[-rank(value), rank(value))`. - /// - value: The tensor to split. - /// - numSplit: The number of splits to create. - /// - /// - Returns: Tensors whose shape matches that of `value` - /// except along `axis`, where their sizes are - /// `value.shape[axis] / numSplit`. - @inlinable @inline(__always) - static func split( - splitDim: Tensor, - value: Tensor, - numSplit: Int64 - ) -> [Tensor] { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "Split", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, splitDim, s) - let _ = _TFCOpAddInputFromTensorGroup(op, value, s) - TFE_OpSetAttrInt(op, "num_split", numSplit) - TFE_OpSetAttrType(op, "T", T.tensorFlowDataType._cDataType) - var count: Int32 = Int32(numSplit) - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) - - var out: [Tensor] = [] - var cursor = buffer - for _ in 0..(handle: TensorHandle(_owning: cursor.pointee))) - cursor = cursor.advanced(by: 1) - } - return out - } - - /// Splits a tensor into `numSplit` tensors along one dimension. - /// - /// - Parameters: - /// - value: The tensor to split. - /// - sizeSplits: list containing the sizes of each output tensor along the split - /// dimension. Must sum to the dimension of value along split_dim. - /// Can contain one -1 indicating that dimension is to be inferred. - /// - splitDim: 0-D. The dimension along which to split. Must be in the range - /// `[-rank(value), rank(value))`. - /// - /// - Returns: Tensors whose shape matches that of `value` - /// except along `axis`, where their sizes are - /// `size_splits[i]`. - @inlinable @inline(__always) - static func splitV( - value: Tensor, - sizeSplits: Tensor, - splitDim: Tensor, - numSplit: Int64 - ) -> [Tensor] { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "SplitV", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, value, s) - let _ = _TFCOpAddInputFromTensorGroup(op, sizeSplits, s) - let _ = _TFCOpAddInputFromTensorGroup(op, splitDim, s) - TFE_OpSetAttrInt(op, "num_split", numSplit) - TFE_OpSetAttrType(op, "T", T.tensorFlowDataType._cDataType) - TFE_OpSetAttrType(op, "Tlen", Tlen.tensorFlowDataType._cDataType) - var count: Int32 = Int32(numSplit) - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) - - var out: [Tensor] = [] - var cursor = buffer - for _ in 0..(handle: TensorHandle(_owning: cursor.pointee))) - cursor = cursor.advanced(by: 1) - } - return out - } - - /// Unpacks a tensor into `numSplit` tensors along one dimension. - /// - /// - Parameters: - /// - value: The tensor to unpack. - /// - num: The number of tensors to unpack. - /// - axis: The dimension along which to unpack. Must be in the range - /// `[-rank(value), rank(value))`. - /// - /// - Returns: Tensors whose shape matches that of `value` without the `axis` dimension. - @inlinable @inline(__always) - static func unpack( - value: Tensor, - num: Int64, - axis: Int64 - ) -> [Tensor] { - let s: CTFStatus = TF_NewStatus() - defer { TF_DeleteStatus(s) } - let op: CTFEOp = TFE_NewOp(_ExecutionContext.global.eagerContext, "Unpack", s) - defer { TFE_DeleteOp(op) } - let _ = _TFCOpAddInputFromTensorGroup(op, value, s) - TFE_OpSetAttrInt(op, "num", num) - TFE_OpSetAttrType(op, "T", T.tensorFlowDataType._cDataType) - TFE_OpSetAttrInt(op, "axis", axis) - var count: Int32 = Int32(num) - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) - - var out: [Tensor] = [] - var cursor = buffer - for _ in 0..(handle: TensorHandle(_owning: cursor.pointee))) - cursor = cursor.advanced(by: 1) - } - return out - } -} diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index 53e1cbda8c6dd..678bcdf540d56 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -28,12 +28,12 @@ list(APPEND swift_stdlib_compile_flags "-Xllvm" "-sil-partial-specialization") list(APPEND swift_stdlib_compile_flags "-Xfrontend" "-enable-sil-ownership") list(APPEND swift_stdlib_compile_flags "-force-single-frontend-invocation") # FIXME(SR-7972): Some tests fail when TensorFlow is optimized. +# list(APPEND swift_stdlib_compile_flags "-O" "-whole-module-optimization") list(APPEND swift_stdlib_compile_flags "-Onone") list(APPEND swift_stdlib_compile_flags "-DCOMPILING_TENSORFLOW_MODULE") set(SOURCES CompilerRuntime.swift - Dataset.swift DataTypes.swift Execution.swift ShapedArray.swift @@ -45,13 +45,14 @@ set(SOURCES TensorProtocol.swift TensorShape.swift Utilities.swift - ArrayOps.swift Threading.swift ExecuteOp.swift.gyb) # Copy TensorFlow bindings file, if it exists. if (TENSORFLOW_SWIFT_BINDINGS) - list(APPEND SOURCES "${TENSORFLOW_SWIFT_BINDINGS}") + file(GLOB_RECURSE TENSORFLOW_SWIFT_BINDINGS_SOURCES + "${TENSORFLOW_SWIFT_BINDINGS}/*.swift") + list(APPEND SOURCES "${TENSORFLOW_SWIFT_BINDINGS_SOURCES}") endif() # Copy TensorFlow high-level API sources, if they exist. From ce4dfd31dc180c86bbfd476610203a43a9847f86 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 23 Apr 2019 17:13:43 -0400 Subject: [PATCH 22/30] Changes to support the new swift-bindings. --- stdlib/public/TensorFlow/StringOps.swift | 3 +-- stdlib/public/TensorFlow/Tensor.swift | 10 ++-------- stdlib/public/TensorFlow/TensorGroup.swift | 16 +++++++++++++++- test/TensorFlowRuntime/tracer.swift | 12 ++++++++++++ utils/build-script-impl | 4 ++-- utils/build_swift/driver_arguments.py | 3 +-- 6 files changed, 33 insertions(+), 15 deletions(-) diff --git a/stdlib/public/TensorFlow/StringOps.swift b/stdlib/public/TensorFlow/StringOps.swift index 7c2ff9715cfb4..907f7de106533 100644 --- a/stdlib/public/TensorFlow/StringOps.swift +++ b/stdlib/public/TensorFlow/StringOps.swift @@ -23,7 +23,6 @@ public extension StringTensor { /// - Note: `elementsEqual` supports broadcasting. @inlinable @inline(__always) func elementsEqual(_ other: StringTensor) -> Tensor { - return #tfop("Equal", self.handle, other.handle, - T$dtype: String.tensorFlowDataType) + return Raw.equal(self, other) } } diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlow/Tensor.swift index d2a4cd064845d..af0ff6f0fa27c 100644 --- a/stdlib/public/TensorFlow/Tensor.swift +++ b/stdlib/public/TensorFlow/Tensor.swift @@ -416,12 +416,7 @@ extension _TensorElementLiteral : ExpressibleByArrayLiteral { public typealias ArrayLiteralElement = _TensorElementLiteral @inlinable @inline(__always) public init(arrayLiteral elements: _TensorElementLiteral...) { - // Attr T (non-optional in the op definition) need not be specified when we - // run the op as part of a graph function, but need to be specified when we - // run it via eager C API. - let handle: TensorHandle = #tfop("Pack", elements, - T$dtype: Scalar.tensorFlowDataType) - tensor = Tensor(handle: handle) + tensor = Raw.pack(elements.map { $0.tensor }) } } @@ -436,8 +431,7 @@ extension Tensor : ExpressibleByArrayLiteral { internal init( _tensorElementLiterals elements: [_TensorElementLiteral] ) { - self.init(handle: #tfop("Pack", elements, - T$dtype: Scalar.tensorFlowDataType)) + self = Raw.pack(elements.map { $0.tensor }) } /// Creates a tensor initialized with the given elements. diff --git a/stdlib/public/TensorFlow/TensorGroup.swift b/stdlib/public/TensorFlow/TensorGroup.swift index af2b4f0dbe457..b763f54e38c98 100644 --- a/stdlib/public/TensorFlow/TensorGroup.swift +++ b/stdlib/public/TensorFlow/TensorGroup.swift @@ -29,6 +29,8 @@ public protocol TensorArrayProtocol { func _unpackTensorHandles(into address: UnsafeMutablePointer?) var _tensorHandleCount: Int32 { get } + + init(_owning tensorHandles: UnsafePointer?, count: Int) } /// A protocol representing types that can be mapped to and from @@ -67,6 +69,11 @@ public extension TensorGroup { static var _unknownShapeList: [TensorShape?] { return Array(repeating: nil, count: _typeList.count) } + + init(_owning tensorHandles: UnsafePointer?, count: Int) { + precondition(count == Self._typeList.count) + self.init(_owning: tensorHandles) + } } //===----------------------------------------------------------------------===// @@ -199,7 +206,7 @@ extension StringTensor : TensorGroup { } } -extension Array : TensorArrayProtocol where Element : TensorArrayProtocol { +extension Array : TensorArrayProtocol where Element : TensorGroup { public func _unpackTensorHandles(into address: UnsafeMutablePointer?) { var ptr = address for elem in self { @@ -213,4 +220,11 @@ extension Array : TensorArrayProtocol where Element : TensorArrayProtocol { for elem in self { count += elem._tensorHandleCount } return count } + + public init(_owning tensorHandles: UnsafePointer?, count: Int) { + let size = count / Int(Element._tensorHandleCount) + self = Array((0..(1.0), Tensor(2.0)] var optimizer: Optimizer = [Tensor(1.0), Tensor(2.0)] + public init() {} + + public init(_owning tensorHandles: UnsafePointer?, count: Int) { + self.model = [ + Tensor(_owning: tensorHandles), + Tensor(_owning: tensorHandles?.advanced(by: 1))] + self.optimizer = [ + Tensor(_owning: tensorHandles?.advanced(by: 2)), + Tensor(_owning: tensorHandles?.advanced(by: 3))] + } + public func _unpackTensorHandles(into address: UnsafeMutablePointer?) { print("Calling State._unpackTensorHandles().") var ptr = address @@ -175,6 +186,7 @@ TracerTests.testAllBackends("Advanced") { ptr = ptr!.advanced(by: Int(model._tensorHandleCount)) optimizer._unpackTensorHandles(into: ptr) } + public var _tensorHandleCount: Int32 { return model._tensorHandleCount + optimizer._tensorHandleCount } diff --git a/utils/build-script-impl b/utils/build-script-impl index 4bf140f028519..414f8cb611d77 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -280,7 +280,7 @@ KNOWN_SETTINGS=( tensorflow-host-include-dir "" "Path to host TensorFlow headers" tensorflow-target-include-dir "" "Path to target Tensorflow headers" tensorflow-target-lib-dir "" "Path to target TensorFlow libraries" - tensorflow-swift-bindings "" "Path to TensorFlow Swift bindings file" + tensorflow-swift-bindings "" "Path to TensorFlow Swift bindings repository" tensorflow-swift-apis "" "Path to TensorFlow deep learning library repository" ) @@ -2476,7 +2476,7 @@ for host in "${ALL_HOSTS[@]}"; do # Handle TensorFlow Swift bindings file. if [[ ! "${TENSORFLOW_SWIFT_BINDINGS}" && -d "${TENSORFLOW_SWIFT_BINDINGS_DIR}" ]] ; then - TENSORFLOW_SWIFT_BINDINGS="${TENSORFLOW_SWIFT_BINDINGS_DIR}/RawOpsGenerated.swift" + TENSORFLOW_SWIFT_BINDINGS="${TENSORFLOW_SWIFT_BINDINGS_DIR}" fi if [[ "${TENSORFLOW_SWIFT_BINDINGS}" ]] ; then cmake_options=( diff --git a/utils/build_swift/driver_arguments.py b/utils/build_swift/driver_arguments.py index 3c927ef0a2b22..be01ca8e29de4 100644 --- a/utils/build_swift/driver_arguments.py +++ b/utils/build_swift/driver_arguments.py @@ -974,8 +974,7 @@ def create_argument_parser(): 'Used for linking Swift programs.') option('--tensorflow-swift-bindings', store_path, default=None, - help='Path to a TensorFlow Swift bindings file ' - '(RawOpsGenerated.swift).') + help='Path to a TensorFlow Swift bindings repository.') option('--tensorflow-swift-apis', store_path, default=None, help='Path to a TensorFlow deep learning library repository.') From 701e31d4cfb130ded87c85a8dea98db36ab057b8 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 23 Apr 2019 17:31:03 -0400 Subject: [PATCH 23/30] Updated the 'TensorArrayProtocol' and its automatic derivation implementation. --- .../DerivedConformanceTensorArrayProtocol.cpp | 256 +++++++++++++++++- lib/Sema/DerivedConformances.cpp | 7 + stdlib/public/TensorFlow/TensorGroup.swift | 7 + .../tensor_array_protocol.swift | 6 +- 4 files changed, 269 insertions(+), 7 deletions(-) diff --git a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp index 990d65c5532a1..f9998b896a3f6 100644 --- a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp +++ b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp @@ -37,17 +37,17 @@ bool DerivedConformance::canDeriveTensorArrayProtocol(NominalTypeDecl *nominal, auto *structDecl = dyn_cast(nominal); if (!structDecl) return false; - // All stored properties must conform to `TensorArrayProtocol`. + // All stored properties must conform to `TensorGroup`. auto &C = nominal->getASTContext(); - auto *tensorArrayProto = - C.getProtocol(KnownProtocolKind::TensorArrayProtocol); + auto *tensorGroupProto = + C.getProtocol(KnownProtocolKind::TensorGroup); return llvm::all_of(structDecl->getStoredProperties(), [&](VarDecl *v) { if (!v->hasInterfaceType()) C.getLazyResolver()->resolveDeclSignature(v); if (!v->hasInterfaceType()) return false; auto varType = DC->mapTypeIntoContext(v->getValueInterfaceType()); - return (bool)TypeChecker::conformsToProtocol(varType, tensorArrayProto, DC, + return (bool)TypeChecker::conformsToProtocol(varType, tensorGroupProto, DC, ConformanceCheckFlags::Used); }); } @@ -66,6 +66,20 @@ static ValueDecl *getProtocolRequirement(ProtocolDecl *proto, Identifier name) { return lookup.front(); } +// Return the protocol requirement with the specified name. +static ValueDecl *getProtocolRequirement(ProtocolDecl *proto, DeclName name) { + auto lookup = proto->lookupDirect(name); + lookup.erase(std::remove_if(lookup.begin(), lookup.end(), + [](ValueDecl *v) { + return !isa( + v->getDeclContext()) || + !v->isProtocolRequirement(); + }), + lookup.end()); + assert(lookup.size() == 1 && "Ambiguous protocol requirement"); + return lookup.front(); +} + // Synthesize body for `_unpackTensorHandles(into:)`. static void deriveBodyTensorArrayProtocol_unpackTensorHandles( @@ -349,12 +363,246 @@ static ValueDecl *deriveTensorArrayProtocol_tensorHandleCount( return tensorHandleCountDecl; } +// Synthesize body for `init(_owning:count:)`. +static void +deriveBodyTensorArrayProtocol_init(AbstractFunctionDecl *funcDecl) { + auto *parentDC = funcDecl->getParent(); + auto *nominal = parentDC->getSelfNominalTypeDecl(); + auto &C = nominal->getASTContext(); + + // Obtain the address type. + auto cTensorHandleType = C.getOpaquePointerDecl()->getDeclaredType(); + auto baseAddressType = BoundGenericType::get( + C.getUnsafePointerDecl(), Type(), {cTensorHandleType}); + auto addressType = BoundGenericType::get( + C.getOptionalDecl(), Type(), {baseAddressType}); + auto *addressTE = TypeExpr::createImplicit(addressType, C); + + // Get references to `self` and parameter declarations. + auto *selfDecl = funcDecl->getImplicitSelfDecl(); + auto *selfDRE = new (C) + DeclRefExpr(selfDecl, DeclNameLoc(), /*Implicit*/ true); + auto *paramDecl = funcDecl->getParameters()->get(0); + auto *paramDRE = new (C) + DeclRefExpr(paramDecl, DeclNameLoc(), /*Implicit*/ true); + + // Create an `if var` statement for the current address. + VarDecl *currAddressDecl = new (C) VarDecl( + /*IsStatic*/ false, VarDecl::Specifier::Var, /*IsCaptureList*/ false, + SourceLoc(), C.getIdentifier("currentAddress"), funcDecl); + currAddressDecl->setImplicit(); + currAddressDecl->setHasNonPatternBindingInit(true); + currAddressDecl->setInterfaceType(baseAddressType); + currAddressDecl->setValidationToChecked(); + + Pattern *currAddressPat = new (C) + NamedPattern(currAddressDecl, /*implicit*/ true); + currAddressPat = new (C) + VarPattern(SourceLoc(), /*isLet*/ false, currAddressPat, + /*implicit*/ true); + currAddressPat = new (C) + OptionalSomePattern(currAddressPat, currAddressPat->getEndLoc(), + /*implicit*/ true); + StmtConditionElement cond[] = { + StmtConditionElement(SourceLoc(), currAddressPat, /*Init*/ paramDRE)}; + + // Get the necessary protocol requirements. + auto *tensorGroupProto = C.getProtocol(KnownProtocolKind::TensorGroup); + auto *tensorArrayProto = C.getProtocol( + KnownProtocolKind::TensorArrayProtocol); + auto initName = DeclName( + C, DeclBaseName::createConstructor(), + {C.getIdentifier("_owning"), C.getIdentifier("count")}); + auto *initReq = getProtocolRequirement(tensorArrayProto, initName); + auto *tensorHandleCountReq = getProtocolRequirement( + tensorArrayProto, C.Id_tensorHandleCount); + + Type intType = C.getIntDecl()->getDeclaredType(); + TypeExpr *intTE = TypeExpr::createImplicit(intType, C); + + // Goes through the member TensorGroups and call + // `self.t = T(_owning:count:)`. + llvm::SmallVector thenMemberExprs; + llvm::SmallVector elseMemberExprs; + for (auto member : nominal->getStoredProperties()) { + auto memberType = parentDC->mapTypeIntoContext( + member->getValueInterfaceType()); + auto *memberTypeExpr = TypeExpr::createImplicit(memberType, C); + auto module = nominal->getModuleContext(); + auto confRef = module->lookupConformance( + memberType, tensorGroupProto); + assert(confRef && "Member does not conform to `TensorGroup`"); + + // Get member type's constructor, e.g. `MemberType.init(_owning:)`. + // Use protocol requirement declaration for the method by default: this + // will be dynamically dispatched. + ValueDecl *memberInitDecl = initReq; + // If conformance reference is concrete, then use concrete witness + // declaration for the constructor. + if (confRef->isConcrete()) + memberInitDecl = confRef->getConcrete()->getWitnessDecl( + initReq, C.getLazyResolver()); + assert(memberInitDecl && "Member constructor declaration must exist"); + auto memberInitDRE = new (C) DeclRefExpr( + memberInitDecl, DeclNameLoc(), /*implicit*/ true); + memberInitDRE->setFunctionRefKind(FunctionRefKind::SingleApply); + + // Create reference to member constructor: `MemberType.init(_owning:)`. + auto *memberInitExpr = new (C) ConstructorRefCallExpr( + memberInitDRE, memberTypeExpr); + + auto *addressDRE = new (C) DeclRefExpr( + currAddressDecl, DeclNameLoc(), /*implicit*/ true); + auto *loadExpr = new (C) LoadExpr(addressDRE, baseAddressType); + + // Initialize the member using its TensorGroup constructor. + // Note that, initialization is dependent on the branch of the + // if-statement taken. + auto *thenInitExpr = new (C) InjectIntoOptionalExpr(loadExpr, addressType); + auto *thenInitCallExpr = CallExpr::createImplicit( + C, memberInitExpr, {thenInitExpr}, {C.getIdentifier("_owning")}); + + // Create a nil expression with type UnsafePointer? for the + // `else` branch. + auto *nilDecl = C.getOptionalNoneDecl(); + auto *nilDRE = new (C) DeclRefExpr( + nilDecl, DeclNameLoc(), /*implicit*/ true); + auto *elseInitExpr = new (C) DotSyntaxCallExpr( + nilDRE, SourceLoc(), addressTE); + auto *elseInitCallExpr = CallExpr::createImplicit( + C, memberInitExpr, {elseInitExpr}, {C.getIdentifier("_owning")}); + + // Assign the current member to the result of the initializer call. + auto *memberDRE = new (C) MemberRefExpr( + selfDRE, SourceLoc(), member, DeclNameLoc(), /*Implicit*/ true); + + auto *thenAssignMemberExpr = new (C) AssignExpr( + memberDRE, SourceLoc(), thenInitCallExpr, /*Implicit*/ true); + auto *elseAssignMemberExpr = new (C) AssignExpr( + memberDRE, SourceLoc(), elseInitCallExpr, /*Implicit*/ true); + + thenMemberExprs.push_back(thenAssignMemberExpr); + elseMemberExprs.push_back(elseAssignMemberExpr); + + // Advance the current address. + DeclName advancedName(C, C.getIdentifier("advanced"), + {C.getIdentifier("by")}); + auto *advancedMethodExpr = + new (C) UnresolvedDotExpr(addressDRE, SourceLoc(), + advancedName, DeclNameLoc(), + /*Implicit*/ true); + + // Obtain `MemberType._tensorHandleCount`. + auto *memberCountMRE = new (C) MemberRefExpr( + memberDRE, SourceLoc(), tensorHandleCountReq, DeclNameLoc(), + /*Implicit*/ true); + + // Cast the tensor handle count to Int. + auto intInitName = DeclName(C, DeclBaseName::createConstructor(), + {Identifier()}); + auto *intInitExpr = + new (C) UnresolvedDotExpr(intTE, SourceLoc(), intInitName, + DeclNameLoc(), /*Implicit*/ true); + auto *intInitCallExpr = CallExpr::createImplicit( + C, intInitExpr, {memberCountMRE}, {Identifier()}); + + // Assign the new address. + auto *assignAddrCallExpr = CallExpr::createImplicit( + C, advancedMethodExpr, {intInitCallExpr}, {C.getIdentifier("by")}); + auto *assignAddrExpr = new (C) AssignExpr(addressDRE, SourceLoc(), + assignAddrCallExpr, + /*Implicit*/ true); + + thenMemberExprs.push_back(assignAddrExpr); + } + + auto *thenBody = BraceStmt::create( + C, SourceLoc(), C.AllocateCopy(thenMemberExprs), SourceLoc(), + /*implicit*/ true); + + auto *elseBody = BraceStmt::create( + C, SourceLoc(), C.AllocateCopy(elseMemberExprs), SourceLoc(), + /*implicit*/ true); + + auto *ifStmt = new (C) + IfStmt(LabeledStmtInfo(), /*IfLoc*/ SourceLoc(), + /*Cond*/ C.AllocateCopy(cond), /*Then*/ thenBody, + /*ElseLoc*/ SourceLoc(), /*Else*/ elseBody, /*implicit*/ true); + + funcDecl->setBody(BraceStmt::create(C, SourceLoc(), {ifStmt}, SourceLoc(), + /*implicit*/ true)); +} + +// Synthesize a constructor declaration for a `TensorArrayProtocol` +// method requirement. +static ValueDecl *deriveTensorArrayProtocol_constructor( + DerivedConformance &derived, Identifier argument1Name, + Identifier parameter1Name, Type parameter1Type, + Identifier parameter2Name, Type parameter2Type, Type returnType, + AbstractFunctionDecl::BodySynthesizer bodySynthesizer) { + auto nominal = derived.Nominal; + auto &C = derived.TC.Context; + auto parentDC = derived.getConformanceContext(); + + auto *param1 = + new (C) ParamDecl(VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), + argument1Name, SourceLoc(), parameter1Name, parentDC); + param1->setInterfaceType(parameter1Type); + auto *param2 = + new (C) ParamDecl(VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), + parameter2Name, SourceLoc(), parameter2Name, parentDC); + param2->setInterfaceType(parameter2Type); + ParameterList *params = ParameterList::create(C, {param1, param2}); + + DeclName name(C, DeclBaseName::createConstructor(), params); + auto *initDecl = + new (C) ConstructorDecl(name, SourceLoc(), OTK_None, SourceLoc(), + /*Throws*/ false, SourceLoc(), params, + /*GenericParams*/ nullptr, parentDC); + initDecl->setImplicit(); + initDecl->setSynthesized(); + initDecl->setBodySynthesizer(bodySynthesizer); + + if (auto env = parentDC->getGenericEnvironmentOfContext()) + initDecl->setGenericEnvironment(env); + initDecl->computeType(AnyFunctionType::ExtInfo().withThrows(false)); + initDecl->copyFormalAccessFrom(nominal, /*sourceIsParentContext*/ true); + initDecl->setValidationToChecked(); + + derived.addMembersToConformanceContext({initDecl}); + C.addSynthesizedDecl(initDecl); + + return initDecl; +} + +// Synthesize the `init(_owning:count:)` function declaration. +static ValueDecl +*deriveTensorArrayProtocol_init(DerivedConformance &derived) { + auto &C = derived.TC.Context; + + // Obtain the address type. + auto cTensorHandleType = C.getOpaquePointerDecl()->getDeclaredType(); + Type baseAddressType = BoundGenericType::get( + C.getUnsafePointerDecl(), Type(), {cTensorHandleType}); + Type addressType = BoundGenericType::get( + C.getOptionalDecl(), Type(), {baseAddressType}); + Type intType = C.getIntDecl()->getDeclaredType(); + Type voidType = C.getVoidDecl()->getDeclaredInterfaceType(); + + return deriveTensorArrayProtocol_constructor( + derived, C.getIdentifier("_owning"), C.getIdentifier("tensorHandles"), + addressType, C.getIdentifier("count"), intType, voidType, + deriveBodyTensorArrayProtocol_init); +} + ValueDecl *DerivedConformance::deriveTensorArrayProtocol( ValueDecl *requirement) { if (requirement->getBaseName() == TC.Context.Id_unpackTensorHandles) return deriveTensorArrayProtocol_unpackTensorHandles(*this); if (requirement->getBaseName() == TC.Context.Id_tensorHandleCount) return deriveTensorArrayProtocol_tensorHandleCount(*this); + if (requirement->getBaseName() == DeclBaseName::createConstructor()) + return deriveTensorArrayProtocol_init(*this); TC.diagnose(requirement->getLoc(), diag::broken_tensor_array_protocol_requirement); return nullptr; diff --git a/lib/Sema/DerivedConformances.cpp b/lib/Sema/DerivedConformances.cpp index 9f16124c59127..8fdecb20a0397 100644 --- a/lib/Sema/DerivedConformances.cpp +++ b/lib/Sema/DerivedConformances.cpp @@ -340,6 +340,13 @@ ValueDecl *DerivedConformance::getDerivableRequirement(TypeChecker &tc, if (argumentNames[0] == ctx.getIdentifier("_owning")) { return getRequirement(KnownProtocolKind::TensorGroup); } + } else if (argumentNames.size() == 2) { + // SWIFT_ENABLE_TENSORFLOW + // TensorArrayProtocol.init(_owning:count) + if (argumentNames[0] == ctx.getIdentifier("_owning") && + argumentNames[0] == ctx.getIdentifier("count")) { + return getRequirement(KnownProtocolKind::TensorArrayProtocol); + } } return nullptr; diff --git a/stdlib/public/TensorFlow/TensorGroup.swift b/stdlib/public/TensorFlow/TensorGroup.swift index b763f54e38c98..0982a3d765f56 100644 --- a/stdlib/public/TensorFlow/TensorGroup.swift +++ b/stdlib/public/TensorFlow/TensorGroup.swift @@ -21,6 +21,13 @@ import CTensorFlow /// This protocol is defined separately from `TensorGroup` in order for the /// number of tensors to be determined at runtime. For example, /// `[Tensor]` may have an unknown number of elements at compile time. +/// +/// This protocol can be derived automatically for structs whose stored +/// properties all conform to the `TensorGroup` protocol. It cannot be derived +/// automatically for structs whose properties all conform to +/// `TensorArrayProtocol` due to the constructor requirement (i.e., in such +/// cases it would be impossible to know how to break down `count` among the +/// stored properties). public protocol TensorArrayProtocol { /// Writes the tensor handles to `address`, which must be allocated /// with enough capacity to hold `_tensorHandleCount` handles. The tensor diff --git a/test/TensorFlowRuntime/tensor_array_protocol.swift b/test/TensorFlowRuntime/tensor_array_protocol.swift index 15cf6014d0a56..9669c27b05051 100644 --- a/test/TensorFlowRuntime/tensor_array_protocol.swift +++ b/test/TensorFlowRuntime/tensor_array_protocol.swift @@ -11,9 +11,9 @@ import StdlibUnittest var TensorArrayProtocolTests = TestSuite("TensorArrayProtocol") -struct Empty : TensorArrayProtocol {} +struct Empty : TensorGroup {} -struct Simple : TensorArrayProtocol { +struct Simple : TensorGroup { var w, b: Tensor } @@ -32,7 +32,7 @@ struct Nested : TensorArrayProtocol { var mixed: Mixed } -struct Generic : TensorArrayProtocol { +struct Generic : TensorArrayProtocol { var t: T var u: U } From a5fcdc2ac9a97c534f7b3826608c0120761fff60 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Tue, 23 Apr 2019 21:15:59 -0400 Subject: [PATCH 24/30] Bug fixes. --- lib/Sema/DerivedConformanceTensorArrayProtocol.cpp | 5 ++--- lib/Sema/DerivedConformances.cpp | 2 +- test/TensorFlowRuntime/tensor_array_protocol.swift | 10 +++++----- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp index f9998b896a3f6..f92bb4cc320c0 100644 --- a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp +++ b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp @@ -411,9 +411,8 @@ deriveBodyTensorArrayProtocol_init(AbstractFunctionDecl *funcDecl) { auto *tensorArrayProto = C.getProtocol( KnownProtocolKind::TensorArrayProtocol); auto initName = DeclName( - C, DeclBaseName::createConstructor(), - {C.getIdentifier("_owning"), C.getIdentifier("count")}); - auto *initReq = getProtocolRequirement(tensorArrayProto, initName); + C, DeclBaseName::createConstructor(), {C.getIdentifier("_owning")}); + auto *initReq = getProtocolRequirement(tensorGroupProto, initName); auto *tensorHandleCountReq = getProtocolRequirement( tensorArrayProto, C.Id_tensorHandleCount); diff --git a/lib/Sema/DerivedConformances.cpp b/lib/Sema/DerivedConformances.cpp index 8fdecb20a0397..78d821285aa0c 100644 --- a/lib/Sema/DerivedConformances.cpp +++ b/lib/Sema/DerivedConformances.cpp @@ -344,7 +344,7 @@ ValueDecl *DerivedConformance::getDerivableRequirement(TypeChecker &tc, // SWIFT_ENABLE_TENSORFLOW // TensorArrayProtocol.init(_owning:count) if (argumentNames[0] == ctx.getIdentifier("_owning") && - argumentNames[0] == ctx.getIdentifier("count")) { + argumentNames[1] == ctx.getIdentifier("count")) { return getRequirement(KnownProtocolKind::TensorArrayProtocol); } } diff --git a/test/TensorFlowRuntime/tensor_array_protocol.swift b/test/TensorFlowRuntime/tensor_array_protocol.swift index 9669c27b05051..64200dcabc524 100644 --- a/test/TensorFlowRuntime/tensor_array_protocol.swift +++ b/test/TensorFlowRuntime/tensor_array_protocol.swift @@ -17,7 +17,7 @@ struct Simple : TensorGroup { var w, b: Tensor } -struct Mixed : TensorArrayProtocol { +struct Mixed : TensorGroup { // Mutable. var string: StringTensor var float: Tensor @@ -25,14 +25,14 @@ struct Mixed : TensorArrayProtocol { let int: Tensor } -struct Nested : TensorArrayProtocol { +struct Nested : TensorGroup { // Immutable. let simple: Simple // Mutable. var mixed: Mixed } -struct Generic : TensorArrayProtocol { +struct Generic : TensorGroup { var t: T var u: U } @@ -157,7 +157,7 @@ TensorArrayProtocolTests.test("GenericUnpackTensorHandles") { TensorArrayProtocolTests.test("NestedGenericTensorHandleCount") { struct NestedGeneric { func function() { - struct UltraNested : TensorArrayProtocol { + struct UltraNested : TensorArrayProtocol { var a: Generic var b: Generic } @@ -181,7 +181,7 @@ TensorArrayProtocolTests.test("NestedGenericTensorHandleCount") { TensorArrayProtocolTests.test("NestedGenericUnpackTensorHandles") { struct NestedGeneric { func function() { - struct UltraNested : TensorArrayProtocol { + struct UltraNested : TensorArrayProtocol { var a: Generic var b: Generic } From 22fed1799be138cec69d8d45a3ce7f3cf0b602c8 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 24 Apr 2019 00:08:30 -0400 Subject: [PATCH 25/30] Minor edits. --- README.md | 4 ++-- test/TensorFlowRuntime/dynamic_compilation_tensor_group.swift | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4e664a610411b..84c7792e43385 100644 --- a/README.md +++ b/README.md @@ -152,8 +152,8 @@ Below is more information about TensorFlow-related build arguments. * Default value: None. * `tensorflow-swift-apis`: A path to the [tensorflow/swift-apis](https://github.com/tensorflow/swift-apis) deep learning library repository. * Default value: `tensorflow-swift-apis` if the [tensorflow/swift-apis](https://github.com/tensorflow/swift-apis) repository is cloned. Otherwise, none. -* `tensorflow-swift-bindings`: A generated TensorFlow Swift bindings file (`RawOpsGenerated.swift`) obtained from [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings). - * Default value: `tensorflow-swift-bindings/RawOpsGenerated.swift` if the [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings) repository is cloned. Otherwise, none. +* `tensorflow-swift-bindings`: A path to the [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings) repository. + * Default value: `tensorflow-swift-bindings` if the [tensorflow/swift-bindings](https://github.com/tensorflow/swift-bindings) repository is cloned. Otherwise, none. ### Build systems diff --git a/test/TensorFlowRuntime/dynamic_compilation_tensor_group.swift b/test/TensorFlowRuntime/dynamic_compilation_tensor_group.swift index e16ff52b94479..59939eccdd04c 100644 --- a/test/TensorFlowRuntime/dynamic_compilation_tensor_group.swift +++ b/test/TensorFlowRuntime/dynamic_compilation_tensor_group.swift @@ -142,7 +142,7 @@ func some_tf_op(n : Int) { } let actual: Tensor = #tfop("Pack", - [arr], T$dtype: Float.tensorFlowDataType, axis: Int64(0)) + arr, T$dtype: Float.tensorFlowDataType, axis: Int64(0)) let expected = ShapedArray(shape: [n, 3], scalars: arr_exp) expectEqual(expected, actual.array) } From 2de2d2b29750ddfb2e1221d7555d6dc6c159e789 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 24 Apr 2019 13:35:31 -0400 Subject: [PATCH 26/30] Addressed Dan's comments regarding the 'TensorArrayProtocol' derived conformance. --- .../DerivedConformanceTensorArrayProtocol.cpp | 62 +++++++------------ 1 file changed, 24 insertions(+), 38 deletions(-) diff --git a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp index f92bb4cc320c0..304efd5795c47 100644 --- a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp +++ b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp @@ -419,8 +419,7 @@ deriveBodyTensorArrayProtocol_init(AbstractFunctionDecl *funcDecl) { Type intType = C.getIntDecl()->getDeclaredType(); TypeExpr *intTE = TypeExpr::createImplicit(intType, C); - // Goes through the member TensorGroups and call - // `self.t = T(_owning:count:)`. + // Iterate over members and call `self.t = T(_owning:)`. llvm::SmallVector thenMemberExprs; llvm::SmallVector elseMemberExprs; for (auto member : nominal->getStoredProperties()) { @@ -532,25 +531,32 @@ deriveBodyTensorArrayProtocol_init(AbstractFunctionDecl *funcDecl) { /*implicit*/ true)); } -// Synthesize a constructor declaration for a `TensorArrayProtocol` -// method requirement. -static ValueDecl *deriveTensorArrayProtocol_constructor( - DerivedConformance &derived, Identifier argument1Name, - Identifier parameter1Name, Type parameter1Type, - Identifier parameter2Name, Type parameter2Type, Type returnType, - AbstractFunctionDecl::BodySynthesizer bodySynthesizer) { +// Synthesize the `init(_owning:count:)` function declaration. +static ValueDecl +*deriveTensorArrayProtocol_init(DerivedConformance &derived) { + auto &C = derived.TC.Context; + + // Obtain the address type. + auto cTensorHandleType = C.getOpaquePointerDecl()->getDeclaredType(); + Type baseAddressType = BoundGenericType::get( + C.getUnsafePointerDecl(), Type(), {cTensorHandleType}); + Type addressType = BoundGenericType::get( + C.getOptionalDecl(), Type(), {baseAddressType}); + Type intType = C.getIntDecl()->getDeclaredType(); + auto nominal = derived.Nominal; auto &C = derived.TC.Context; auto parentDC = derived.getConformanceContext(); - auto *param1 = - new (C) ParamDecl(VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), - argument1Name, SourceLoc(), parameter1Name, parentDC); - param1->setInterfaceType(parameter1Type); - auto *param2 = - new (C) ParamDecl(VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), - parameter2Name, SourceLoc(), parameter2Name, parentDC); - param2->setInterfaceType(parameter2Type); + auto *param1 = new (C) ParamDecl( + VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), + C.getIdentifier("_owning"), SourceLoc(), C.getIdentifier("tensorHandles"), + parentDC); + param1->setInterfaceType(addressType); + auto *param2 = new (C) ParamDecl( + VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), + C.getIdentifier("count"), SourceLoc(), C.getIdentifier("count"), parentDC); + param2->setInterfaceType(intType); ParameterList *params = ParameterList::create(C, {param1, param2}); DeclName name(C, DeclBaseName::createConstructor(), params); @@ -560,7 +566,7 @@ static ValueDecl *deriveTensorArrayProtocol_constructor( /*GenericParams*/ nullptr, parentDC); initDecl->setImplicit(); initDecl->setSynthesized(); - initDecl->setBodySynthesizer(bodySynthesizer); + initDecl->setBodySynthesizer(deriveBodyTensorArrayProtocol_init); if (auto env = parentDC->getGenericEnvironmentOfContext()) initDecl->setGenericEnvironment(env); @@ -574,26 +580,6 @@ static ValueDecl *deriveTensorArrayProtocol_constructor( return initDecl; } -// Synthesize the `init(_owning:count:)` function declaration. -static ValueDecl -*deriveTensorArrayProtocol_init(DerivedConformance &derived) { - auto &C = derived.TC.Context; - - // Obtain the address type. - auto cTensorHandleType = C.getOpaquePointerDecl()->getDeclaredType(); - Type baseAddressType = BoundGenericType::get( - C.getUnsafePointerDecl(), Type(), {cTensorHandleType}); - Type addressType = BoundGenericType::get( - C.getOptionalDecl(), Type(), {baseAddressType}); - Type intType = C.getIntDecl()->getDeclaredType(); - Type voidType = C.getVoidDecl()->getDeclaredInterfaceType(); - - return deriveTensorArrayProtocol_constructor( - derived, C.getIdentifier("_owning"), C.getIdentifier("tensorHandles"), - addressType, C.getIdentifier("count"), intType, voidType, - deriveBodyTensorArrayProtocol_init); -} - ValueDecl *DerivedConformance::deriveTensorArrayProtocol( ValueDecl *requirement) { if (requirement->getBaseName() == TC.Context.Id_unpackTensorHandles) From 93e335abb9b3e52c08d8233ace08d407dd36c43a Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 24 Apr 2019 13:41:08 -0400 Subject: [PATCH 27/30] Minor bug fix. --- lib/Sema/DerivedConformanceTensorArrayProtocol.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp index 304efd5795c47..ef7a79d188739 100644 --- a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp +++ b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp @@ -535,6 +535,8 @@ deriveBodyTensorArrayProtocol_init(AbstractFunctionDecl *funcDecl) { static ValueDecl *deriveTensorArrayProtocol_init(DerivedConformance &derived) { auto &C = derived.TC.Context; + auto nominal = derived.Nominal; + auto parentDC = derived.getConformanceContext(); // Obtain the address type. auto cTensorHandleType = C.getOpaquePointerDecl()->getDeclaredType(); @@ -544,10 +546,6 @@ static ValueDecl C.getOptionalDecl(), Type(), {baseAddressType}); Type intType = C.getIntDecl()->getDeclaredType(); - auto nominal = derived.Nominal; - auto &C = derived.TC.Context; - auto parentDC = derived.getConformanceContext(); - auto *param1 = new (C) ParamDecl( VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), C.getIdentifier("_owning"), SourceLoc(), C.getIdentifier("tensorHandles"), From 6a79ac8252d257768d8b45d60403d47c3b483f62 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 24 Apr 2019 13:58:20 -0400 Subject: [PATCH 28/30] Minor edit. --- .../DerivedConformanceTensorArrayProtocol.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp index ef7a79d188739..751ea6640d0d1 100644 --- a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp +++ b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp @@ -515,12 +515,12 @@ deriveBodyTensorArrayProtocol_init(AbstractFunctionDecl *funcDecl) { } auto *thenBody = BraceStmt::create( - C, SourceLoc(), C.AllocateCopy(thenMemberExprs), SourceLoc(), - /*implicit*/ true); + C, SourceLoc(), C.AllocateCopy(thenMemberExprs), SourceLoc(), + /*implicit*/ true); auto *elseBody = BraceStmt::create( - C, SourceLoc(), C.AllocateCopy(elseMemberExprs), SourceLoc(), - /*implicit*/ true); + C, SourceLoc(), C.AllocateCopy(elseMemberExprs), SourceLoc(), + /*implicit*/ true); auto *ifStmt = new (C) IfStmt(LabeledStmtInfo(), /*IfLoc*/ SourceLoc(), @@ -547,13 +547,13 @@ static ValueDecl Type intType = C.getIntDecl()->getDeclaredType(); auto *param1 = new (C) ParamDecl( - VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), - C.getIdentifier("_owning"), SourceLoc(), C.getIdentifier("tensorHandles"), - parentDC); + VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), + C.getIdentifier("_owning"), SourceLoc(), C.getIdentifier("tensorHandles"), + parentDC); param1->setInterfaceType(addressType); auto *param2 = new (C) ParamDecl( - VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), - C.getIdentifier("count"), SourceLoc(), C.getIdentifier("count"), parentDC); + VarDecl::Specifier::Default, SourceLoc(), SourceLoc(), + C.getIdentifier("count"), SourceLoc(), C.getIdentifier("count"), parentDC); param2->setInterfaceType(intType); ParameterList *params = ParameterList::create(C, {param1, param2}); From 589013341a72a8bca2da083db52f498fc0031d30 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 24 Apr 2019 18:55:13 -0400 Subject: [PATCH 29/30] Enhancements to 'TensorArrayProtocol'. --- .../DerivedConformanceTensorArrayProtocol.cpp | 85 +++++++++++++++++++ lib/Sema/DerivedConformances.cpp | 5 ++ test/TensorFlowRuntime/tracer.swift | 4 + 3 files changed, 94 insertions(+) diff --git a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp index 751ea6640d0d1..7447a4aa59f1e 100644 --- a/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp +++ b/lib/Sema/DerivedConformanceTensorArrayProtocol.cpp @@ -363,6 +363,89 @@ static ValueDecl *deriveTensorArrayProtocol_tensorHandleCount( return tensorHandleCountDecl; } + +/// Derive the body for the '_typeList' getter. +static void +deriveBodyTensorArrayProtocol_typeList(AbstractFunctionDecl *funcDecl) { + auto *parentDC = funcDecl->getParent(); + auto *nominal = funcDecl->getDeclContext()->getSelfNominalTypeDecl(); + auto &C = nominal->getASTContext(); + + auto *tensorGroupProto = C.getProtocol(KnownProtocolKind::TensorGroup); + auto *typeListReq = getProtocolRequirement(tensorGroupProto, C.Id_typeList); + + // Concatenate all member `_typeList` arrays. + Type arrayType = BoundGenericType::get( + C.getArrayDecl(), Type(), + {C.getTensorDataTypeDecl()->getDeclaredInterfaceType()}); + auto *arrayTypeExpr = TypeExpr::createImplicit(arrayType, C); + auto plusOpLookup = C.getArrayDecl()->lookupDirect(C.getIdentifier("+")); + assert(plusOpLookup.size() == 1 && "Ambiguous 'Array.+' operator."); + ValueDecl *plusOpDecl = plusOpLookup.front(); + auto plusOpDRE = new (C) + DeclRefExpr(plusOpDecl, DeclNameLoc(), /*Implicit*/ true); + auto plusOpExpr = new (C) + DotSyntaxCallExpr(plusOpDRE, SourceLoc(), arrayTypeExpr); + Expr *typeListExpr = ArrayExpr::create(C, SourceLoc(), {}, {}, SourceLoc()); + for (auto member : nominal->getStoredProperties()) { + auto memberType = + parentDC->mapTypeIntoContext(member->getValueInterfaceType()); + auto *memberTypeExpr = TypeExpr::createImplicit(memberType, C); + auto *memberTypeListExpr = new (C) + MemberRefExpr(memberTypeExpr, SourceLoc(), typeListReq, + DeclNameLoc(), /*Implicit*/ true); + // Create expression `lhsArg + rhsArg`. + auto *plusOpArgs = + TupleExpr::create(C, SourceLoc(), {typeListExpr, memberTypeListExpr}, + {}, {}, SourceLoc(), /*HasTrailingClosure*/ false, + /*Implicit*/ true); + typeListExpr = new (C) BinaryExpr(plusOpExpr, plusOpArgs, + /*Implicit*/ true); + } + + // Return the resulting data types array. + auto *returnStmt = new (C) ReturnStmt(SourceLoc(), typeListExpr); + auto *body = BraceStmt::create(C, SourceLoc(), {returnStmt}, SourceLoc(), + /*Implicit*/ true); + funcDecl->setBody(BraceStmt::create(C, SourceLoc(), {body}, SourceLoc(), + /*Implicit*/ true)); +} + +/// Derive a '_typeList' implementation. +static ValueDecl *deriveTensorArrayProtocol_typeList( + DerivedConformance &derived) { + auto nominal = derived.Nominal; + auto &TC = derived.TC; + ASTContext &C = TC.Context; + + auto parentDC = derived.getConformanceContext(); + Type dataTypeArrayType = BoundGenericType::get( + C.getArrayDecl(), Type(), + {C.getTensorDataTypeDecl()->getDeclaredInterfaceType()}); + auto returnType = parentDC->mapTypeIntoContext(dataTypeArrayType); + + // Create `_typeList` property declaration. + VarDecl *typeListDecl; + PatternBindingDecl *patDecl; + std::tie(typeListDecl, patDecl) = derived.declareDerivedProperty( + C.Id_typeList, returnType, returnType, /*isStatic*/ false, + /*isFinal*/ false); + + // Add `@inlinable` to the `_typeList` declaration. + if (nominal->getEffectiveAccess() > AccessLevel::Internal) + typeListDecl->getAttrs().add(new (C) InlinableAttr(/*implicit*/ true)); + + // Create `_typeList` getter. + auto *getterDecl = derived.declareDerivedPropertyGetter( + TC, typeListDecl, returnType); + getterDecl->setBodySynthesizer(deriveBodyTensorArrayProtocol_typeList); + typeListDecl->setAccessors(StorageImplInfo::getImmutableComputed(), + SourceLoc(), {getterDecl}, SourceLoc()); + derived.addMembersToConformanceContext({getterDecl, typeListDecl, patDecl}); + + return typeListDecl; +} + // Synthesize body for `init(_owning:count:)`. static void deriveBodyTensorArrayProtocol_init(AbstractFunctionDecl *funcDecl) { @@ -584,6 +667,8 @@ ValueDecl *DerivedConformance::deriveTensorArrayProtocol( return deriveTensorArrayProtocol_unpackTensorHandles(*this); if (requirement->getBaseName() == TC.Context.Id_tensorHandleCount) return deriveTensorArrayProtocol_tensorHandleCount(*this); + if (requirement->getBaseName() == TC.Context.Id_typeList) + return deriveTensorArrayProtocol_typeList(*this); if (requirement->getBaseName() == DeclBaseName::createConstructor()) return deriveTensorArrayProtocol_init(*this); TC.diagnose(requirement->getLoc(), diff --git a/lib/Sema/DerivedConformances.cpp b/lib/Sema/DerivedConformances.cpp index 78d821285aa0c..9aac23e0e4298 100644 --- a/lib/Sema/DerivedConformances.cpp +++ b/lib/Sema/DerivedConformances.cpp @@ -230,6 +230,11 @@ ValueDecl *DerivedConformance::getDerivableRequirement(TypeChecker &tc, // TensorArrayProtocol._tensorHandleCount if (name.isSimpleName(ctx.Id_tensorHandleCount)) return getRequirement(KnownProtocolKind::TensorArrayProtocol); + + // SWIFT_ENABLE_TENSORFLOW + // TensorArrayProtocol._typeList + if (name.isSimpleName(ctx.Id_typeList) && !requirement->isStatic()) + return getRequirement(KnownProtocolKind::TensorArrayProtocol); // SWIFT_ENABLE_TENSORFLOW // TensorGroup._typeList diff --git a/test/TensorFlowRuntime/tracer.swift b/test/TensorFlowRuntime/tracer.swift index 3aa5ba4aae8e3..d5b6c1b30a3e6 100644 --- a/test/TensorFlowRuntime/tracer.swift +++ b/test/TensorFlowRuntime/tracer.swift @@ -191,6 +191,10 @@ TracerTests.testAllBackends("Advanced") { return model._tensorHandleCount + optimizer._tensorHandleCount } + public var _typeList: [TensorDataType] { + return model._typeList + optimizer._typeList + } + func _makeInstance(owning inputs: C) -> State where C.Element == CTensorHandle { assert(inputs.count == 4) From baed6e33881dd7851adc52c40b01698d31c44c26 Mon Sep 17 00:00:00 2001 From: Anthony Platanios Date: Wed, 24 Apr 2019 21:28:43 -0400 Subject: [PATCH 30/30] TensorFlow/TensorFlowCore refactoring. --- cmake/modules/SwiftSource.cmake | 3 +- include/swift/AST/ASTContext.h | 12 +- include/swift/AST/KnownIdentifiers.def | 2 +- lib/AST/ASTContext.cpp | 22 +- lib/IRGen/IRGenSIL.cpp | 2 +- .../Mandatory/TFDeabstraction.cpp | 2 +- lib/SILOptimizer/Mandatory/TFPartition.cpp | 2 +- lib/SILOptimizer/PassManager/Passes.cpp | 1 - lib/Sema/CSApply.cpp | 4 +- stdlib/public/CMakeLists.txt | 1 + stdlib/public/TensorFlow/CMakeLists.txt | 24 +- stdlib/public/TensorFlow/ExecuteOp.swift.gyb | 48 ---- stdlib/public/TensorFlow/StringOps.swift | 28 -- stdlib/public/TensorFlowCore/CMakeLists.txt | 75 +++++ .../CompilerRuntime.swift | 0 .../DataTypes.swift | 0 .../Execution.swift | 0 .../ShapedArray.swift | 24 +- .../StringTensor.swift | 0 .../Tensor.swift | 263 ++++++++++++++---- .../TensorGroup.swift | 16 +- .../TensorHandle.swift | 8 +- .../TensorProtocol.swift | 0 .../TensorShape.swift | 0 .../Threading.swift | 0 .../Utilities.swift | 0 test/TensorFlow/deabstraction_finished.swift | 4 +- test/TensorFlowRuntime/accelerator_only.swift | 2 +- 28 files changed, 341 insertions(+), 202 deletions(-) delete mode 100644 stdlib/public/TensorFlow/ExecuteOp.swift.gyb delete mode 100644 stdlib/public/TensorFlow/StringOps.swift create mode 100644 stdlib/public/TensorFlowCore/CMakeLists.txt rename stdlib/public/{TensorFlow => TensorFlowCore}/CompilerRuntime.swift (100%) rename stdlib/public/{TensorFlow => TensorFlowCore}/DataTypes.swift (100%) rename stdlib/public/{TensorFlow => TensorFlowCore}/Execution.swift (100%) rename stdlib/public/{TensorFlow => TensorFlowCore}/ShapedArray.swift (99%) rename stdlib/public/{TensorFlow => TensorFlowCore}/StringTensor.swift (100%) rename stdlib/public/{TensorFlow => TensorFlowCore}/Tensor.swift (77%) rename stdlib/public/{TensorFlow => TensorFlowCore}/TensorGroup.swift (94%) rename stdlib/public/{TensorFlow => TensorFlowCore}/TensorHandle.swift (98%) rename stdlib/public/{TensorFlow => TensorFlowCore}/TensorProtocol.swift (100%) rename stdlib/public/{TensorFlow => TensorFlowCore}/TensorShape.swift (100%) rename stdlib/public/{TensorFlow => TensorFlowCore}/Threading.swift (100%) rename stdlib/public/{TensorFlow => TensorFlowCore}/Utilities.swift (100%) diff --git a/cmake/modules/SwiftSource.cmake b/cmake/modules/SwiftSource.cmake index a7fd7ec02eac1..208ab905b2bbc 100644 --- a/cmake/modules/SwiftSource.cmake +++ b/cmake/modules/SwiftSource.cmake @@ -240,7 +240,8 @@ function(_compile_swift_files # Also, disable it for DifferentiationUnittest because resilience changes # the AD code # that gets generated (leading to additional leaks) # (see: TF-328) - if(NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "TensorFlow" AND + if(NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "TensorFlowCore" AND + NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "TensorFlow" AND NOT "${SWIFTFILE_MODULE_NAME}" STREQUAL "DifferentiationUnittest") list(APPEND swift_flags "-Xfrontend" "-enable-resilience") endif() diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index 8ad2d6f4ab31b..403c21518da4e 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -481,16 +481,16 @@ class ASTContext final { CanType getAnyObjectType() const; // SWIFT_ENABLE_TENSORFLOW - /// Retrieve the decl for TensorFlow.TensorHandle iff the TensorFlow module - /// has been imported. Otherwise, this returns null. + /// Retrieve the decl for TensorFlowCore.TensorHandle iff the TensorFlowCore + /// module has been imported. Otherwise, this returns null. ClassDecl *getTensorHandleDecl() const; - /// Retrieve the decl for TensorFlow.TensorShape iff the TensorFlow module - /// has been imported. Otherwise, this returns null. + /// Retrieve the decl for TensorFlowCore.TensorShape iff the TensorFlowCore + /// module has been imported. Otherwise, this returns null. StructDecl *getTensorShapeDecl() const; - /// Retrieve the decl for TensorFlow.TensorDataType iff the TensorFlow module - /// has been imported. Otherwise, this returns null. + /// Retrieve the decl for TensorFlowCore.TensorDataType iff the TensorFlowCore + /// module has been imported. Otherwise, this returns null. StructDecl *getTensorDataTypeDecl() const; /// Retrieve the type for Swift._AutoDiffTape. diff --git a/include/swift/AST/KnownIdentifiers.def b/include/swift/AST/KnownIdentifiers.def index 949e9f55281ba..26d00cee61d79 100644 --- a/include/swift/AST/KnownIdentifiers.def +++ b/include/swift/AST/KnownIdentifiers.def @@ -121,7 +121,7 @@ IDENTIFIER(withArguments) IDENTIFIER(withKeywordArguments) // SWIFT_ENABLE_TENSORFLOW -IDENTIFIER(TensorFlow) +IDENTIFIER(TensorFlowCore) // KeyPathIterable IDENTIFIER(AllKeyPaths) IDENTIFIER(allKeyPaths) diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index 5f90215d026e1..4590484cfefec 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -821,14 +821,14 @@ CanType ASTContext::getAnyObjectType() const { } // SWIFT_ENABLE_TENSORFLOW -/// Retrieve the decl for TensorFlow.TensorHandle iff the TensorFlow module has -/// been imported. Otherwise, this returns null. +/// Retrieve the decl for TensorFlowCore.TensorHandle iff the TensorFlow module +/// has been imported. Otherwise, this returns null. ClassDecl *ASTContext::getTensorHandleDecl() const { if (getImpl().TensorHandleDecl) return getImpl().TensorHandleDecl; // See if the TensorFlow module was imported. If not, return null. - auto tfModule = getLoadedModule(Id_TensorFlow); + auto tfModule = getLoadedModule(Id_TensorFlowCore); if (!tfModule) return nullptr; @@ -842,14 +842,14 @@ ClassDecl *ASTContext::getTensorHandleDecl() const { return nullptr; } -/// Retrieve the decl for TensorFlow.TensorShape iff the TensorFlow module has -/// been imported. Otherwise, this returns null. +/// Retrieve the decl for TensorFlowCore.TensorShape iff the TensorFlow module +/// has been imported. Otherwise, this returns null. StructDecl *ASTContext::getTensorShapeDecl() const { if (getImpl().TensorShapeDecl) return getImpl().TensorShapeDecl; // See if the TensorFlow module was imported. If not, return null. - auto tfModule = getLoadedModule(Id_TensorFlow); + auto tfModule = getLoadedModule(Id_TensorFlowCore); if (!tfModule) return nullptr; @@ -863,14 +863,14 @@ StructDecl *ASTContext::getTensorShapeDecl() const { return nullptr; } -/// Retrieve the decl for TensorFlow.TensorDataType iff the TensorFlow module has -/// been imported. Otherwise, this returns null. +/// Retrieve the decl for TensorFlowCore.TensorDataType iff the TensorFlow +/// module has been imported. Otherwise, this returns null. StructDecl *ASTContext::getTensorDataTypeDecl() const { if (getImpl().TensorDataTypeDecl) return getImpl().TensorDataTypeDecl; // See if the TensorFlow module was imported. If not, return null. - auto tfModule = getLoadedModule(Id_TensorFlow); + auto tfModule = getLoadedModule(Id_TensorFlowCore); if (!tfModule) return nullptr; @@ -987,7 +987,7 @@ ProtocolDecl *ASTContext::getProtocol(KnownProtocolKind kind) const { case KnownProtocolKind::TensorFlowDataTypeCompatible: case KnownProtocolKind::TensorSendableReceivable: case KnownProtocolKind::TensorProtocol: - M = getLoadedModule(Id_TensorFlow); + M = getLoadedModule(Id_TensorFlowCore); break; default: M = getStdlibModule(); @@ -1886,7 +1886,7 @@ ASTContext::getModule(ArrayRef> ModulePath) { (ModulePath[0].first == StdlibModuleName || ModulePath[0].first == Id_Foundation || // SWIFT_ENABLE_TENSORFLOW - ModulePath[0].first == Id_TensorFlow)) + ModulePath[0].first == Id_TensorFlowCore)) recordKnownProtocols(M); return M; } diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index eca280d5a25f1..21432012fca76 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -2007,7 +2007,7 @@ void IRGenSILFunction::visitGraphOperationInst(GraphOperationInst *i) { tf::GraphOperationInfo opInfo(i); // TODO: As an optimization, do this lookup once per CurSILFn - auto tfModule = astCtx.getLoadedModule(astCtx.Id_TensorFlow); + auto tfModule = astCtx.getLoadedModule(astCtx.Id_TensorFlowCore); assert(tfModule && "could not find TensorFlow module"); auto inputTensorGroupProto = astCtx.getProtocol(KnownProtocolKind::TensorArrayProtocol); diff --git a/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp b/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp index 1a57601fd4487..b40099fc80aef 100644 --- a/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp +++ b/lib/SILOptimizer/Mandatory/TFDeabstraction.cpp @@ -2603,7 +2603,7 @@ void TFDeabstractionPass::run() { // If the TensorFlow module hasn't been imported by the program, don't do // anything. This avoids impacting compile time for non-TensorFlow using // Swift programs by doing extraneous analysis. - auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); + auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlowCore); if (!tfModule) return; diff --git a/lib/SILOptimizer/Mandatory/TFPartition.cpp b/lib/SILOptimizer/Mandatory/TFPartition.cpp index 526d1edb26550..e41335a43781c 100644 --- a/lib/SILOptimizer/Mandatory/TFPartition.cpp +++ b/lib/SILOptimizer/Mandatory/TFPartition.cpp @@ -4478,7 +4478,7 @@ void TFPartition::run() { // If the TensorFlow module hasn't been imported by the program, don't do // anything. This avoids impacting compile time for non-TensorFlow using // Swift programs by doing extraneous analysis. - tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); + tfModule = ctx.getLoadedModule(ctx.Id_TensorFlowCore); if (!tfModule) return; diff --git a/lib/SILOptimizer/PassManager/Passes.cpp b/lib/SILOptimizer/PassManager/Passes.cpp index af9dae2571d6e..4788da8f2d44f 100644 --- a/lib/SILOptimizer/PassManager/Passes.cpp +++ b/lib/SILOptimizer/PassManager/Passes.cpp @@ -144,7 +144,6 @@ void swift::runSILTFPartitionPass(SILModule &Module) { // Verify the module, if required. if (Module.getOptions().VerifyAll) Module.verify(); - } void swift::runSILOptimizationPassesWithFileSpecification(SILModule &M, diff --git a/lib/Sema/CSApply.cpp b/lib/Sema/CSApply.cpp index 9587140ce8670..4ba7e12dca696 100644 --- a/lib/Sema/CSApply.cpp +++ b/lib/Sema/CSApply.cpp @@ -2424,8 +2424,8 @@ namespace { // The result type must conform to TensorGroup or be a tuple of types that // conform to TensorGroup. - auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlow); - assert(tfModule && "could not find TensorFlow module"); + auto tfModule = ctx.getLoadedModule(ctx.Id_TensorFlowCore); + assert(tfModule && "could not find TensorFlowCore module"); auto tensorGroupProto = ctx.getProtocol(KnownProtocolKind::TensorGroup); assert(tensorGroupProto && "could not find TensorGroup protocol"); diff --git a/stdlib/public/CMakeLists.txt b/stdlib/public/CMakeLists.txt index 76ed423f4b6d7..b7e287a30b17f 100644 --- a/stdlib/public/CMakeLists.txt +++ b/stdlib/public/CMakeLists.txt @@ -69,6 +69,7 @@ endif() if(SWIFT_BUILD_STDLIB AND SWIFT_ENABLE_TENSORFLOW) # TODO: Add TensorFlow support for iOS/Raspberry Pi. add_subdirectory(CTensorFlow) + add_subdirectory(TensorFlowCore) add_subdirectory(TensorFlow) endif() diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index 678bcdf540d56..89710ab889d13 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -32,28 +32,7 @@ list(APPEND swift_stdlib_compile_flags "-force-single-frontend-invocation") list(APPEND swift_stdlib_compile_flags "-Onone") list(APPEND swift_stdlib_compile_flags "-DCOMPILING_TENSORFLOW_MODULE") -set(SOURCES - CompilerRuntime.swift - DataTypes.swift - Execution.swift - ShapedArray.swift - StringOps.swift - StringTensor.swift - Tensor.swift - TensorGroup.swift - TensorHandle.swift - TensorProtocol.swift - TensorShape.swift - Utilities.swift - Threading.swift - ExecuteOp.swift.gyb) - -# Copy TensorFlow bindings file, if it exists. -if (TENSORFLOW_SWIFT_BINDINGS) - file(GLOB_RECURSE TENSORFLOW_SWIFT_BINDINGS_SOURCES - "${TENSORFLOW_SWIFT_BINDINGS}/*.swift") - list(APPEND SOURCES "${TENSORFLOW_SWIFT_BINDINGS_SOURCES}") -endif() +set(SOURCES "") # Copy TensorFlow high-level API sources, if they exist. if (TENSORFLOW_SWIFT_APIS) @@ -76,6 +55,7 @@ add_swift_target_library(swiftTensorFlow ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} IS_ INCORPORATE_OBJECT_LIBRARIES swiftCTensorFlow TARGET_SDKS OSX LINUX PRIVATE_LINK_LIBRARIES "${TF_LIBRARIES}" + SWIFT_MODULE_DEPENDS TensorFlowCore SWIFT_MODULE_DEPENDS SwiftOnoneSupport SWIFT_MODULE_DEPENDS_IOS Darwin SWIFT_MODULE_DEPENDS_OSX Darwin diff --git a/stdlib/public/TensorFlow/ExecuteOp.swift.gyb b/stdlib/public/TensorFlow/ExecuteOp.swift.gyb deleted file mode 100644 index 92e04ed5319f9..0000000000000 --- a/stdlib/public/TensorFlow/ExecuteOp.swift.gyb +++ /dev/null @@ -1,48 +0,0 @@ -//===-- ExecuteOp.swift.gyb -----------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains _TFCExecuteOp which allows dispatching an op and -// returning an arbitrary set of tensor-groups. -// -// TODO: A nice wrapper for TFEOp could possibly make this simpler to use. This -// may need to be extended in order to work with multiple tfops. -// -//===----------------------------------------------------------------------===// - -@usableFromInline -func _TFCExecuteOp(_ op: CTFEOp, _ s: CTFStatus) { - var count: Int32 = 0 - var unused: CTensorHandle? - _TFCEagerExecute(op, &unused, &count, s) - checkOk(s) -} - -%for n in range(1, 11): -// Calls _TFCEagerExecute under the hood and unpacks into TensorGroup conforming -// types. -@usableFromInline -func _TFCExecuteOp<${", ".join(["T" + str(i) + " : TensorGroup" for i in range(n)])}> - (_ op: CTFEOp, _ s: CTFStatus) - -> (${", ".join(["T" + str(i) for i in range(n)])}) { - - var count: Int32 = ${" + ".join(["T" + str(i) + "._tensorHandleCount" for i in range(n)])} - let buffer: UnsafeMutablePointer = - UnsafeMutablePointer.allocate(capacity: Int(count)) - defer { buffer.deallocate() } - _TFCEagerExecute(op, UnsafeMutablePointer(buffer), &count, s) - checkOk(s) -%for i in range(n): -let off${i}: Int32 = ${"0" if i == 0 else "off" + str(i - 1) + " + T" + str(i - 1) + "._tensorHandleCount"} -%end - return (${", ".join(["T" + str(i) + ".init(_owning: buffer.advanced(by: Int(off" + str(i) + ")))" for i in range(n)])}) -} -%end diff --git a/stdlib/public/TensorFlow/StringOps.swift b/stdlib/public/TensorFlow/StringOps.swift deleted file mode 100644 index 907f7de106533..0000000000000 --- a/stdlib/public/TensorFlow/StringOps.swift +++ /dev/null @@ -1,28 +0,0 @@ -//===-- StringOps.swift --------------------------------------*- swift -*-===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -// This file contains definitions of most string tensor operations. -// -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// Element-wise binary comparison -//===----------------------------------------------------------------------===// - -public extension StringTensor { - /// Computes `self == other` element-wise. - /// - Note: `elementsEqual` supports broadcasting. - @inlinable @inline(__always) - func elementsEqual(_ other: StringTensor) -> Tensor { - return Raw.equal(self, other) - } -} diff --git a/stdlib/public/TensorFlowCore/CMakeLists.txt b/stdlib/public/TensorFlowCore/CMakeLists.txt new file mode 100644 index 0000000000000..4f4b72d291be4 --- /dev/null +++ b/stdlib/public/TensorFlowCore/CMakeLists.txt @@ -0,0 +1,75 @@ +#===--- CMakeLists.txt - Build the TensorFlow support library ------------===# +# +# This source file is part of the Swift.org open source project +# +# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +# Licensed under Apache License v2.0 with Runtime Library Exception +# +# See https://swift.org/LICENSE.txt for license information +# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +# +#===----------------------------------------------------------------------===# +# +# SWIFT_ENABLE_TENSORFLOW +# +#===----------------------------------------------------------------------===# + +if(NOT SWIFT_ENABLE_TENSORFLOW) + return() +endif() + +find_package(TensorFlow REQUIRED) +message(STATUS "Building TensorFlowCore.") + +set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) +set(swift_stdlib_compile_flags "${SWIFT_RUNTIME_SWIFT_COMPILE_FLAGS}") +list(APPEND swift_stdlib_compile_flags "-Xllvm" "-sil-inline-generics") +list(APPEND swift_stdlib_compile_flags "-Xllvm" "-sil-partial-specialization") +list(APPEND swift_stdlib_compile_flags "-Xfrontend" "-enable-sil-ownership") +list(APPEND swift_stdlib_compile_flags "-force-single-frontend-invocation") +# FIXME(SR-7972): Some tests fail when TensorFlow is optimized. +# list(APPEND swift_stdlib_compile_flags "-O" "-whole-module-optimization") +list(APPEND swift_stdlib_compile_flags "-Onone") +list(APPEND swift_stdlib_compile_flags "-DCOMPILING_TENSORFLOW_MODULE") + +set(SOURCES + CompilerRuntime.swift + DataTypes.swift + Execution.swift + ShapedArray.swift + StringTensor.swift + Tensor.swift + TensorGroup.swift + TensorHandle.swift + TensorProtocol.swift + TensorShape.swift + Utilities.swift + Threading.swift) + +# Copy TensorFlow bindings file, if it exists. +if (TENSORFLOW_SWIFT_BINDINGS) + file(GLOB_RECURSE TENSORFLOW_SWIFT_BINDINGS_SOURCES + "${TENSORFLOW_SWIFT_BINDINGS}/*.swift") + list(APPEND SOURCES "${TENSORFLOW_SWIFT_BINDINGS_SOURCES}") +endif() + +add_swift_target_library(swiftTensorFlowCore ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} IS_STDLIB + "${SOURCES}" + + INCORPORATE_OBJECT_LIBRARIES swiftCTensorFlow + TARGET_SDKS OSX LINUX + PRIVATE_LINK_LIBRARIES "${TF_LIBRARIES}" + SWIFT_MODULE_DEPENDS SwiftOnoneSupport + SWIFT_MODULE_DEPENDS_IOS Darwin + SWIFT_MODULE_DEPENDS_OSX Darwin + SWIFT_MODULE_DEPENDS_TVOS Darwin + SWIFT_MODULE_DEPENDS_WATCHOS Darwin + SWIFT_MODULE_DEPENDS_LINUX Glibc + SWIFT_MODULE_DEPENDS_FREEBSD Glibc + SWIFT_MODULE_DEPENDS_CYGWIN Glibc + SWIFT_MODULE_DEPENDS_HAIKU Glibc + ${TENSORFLOW_DEPENDS_PYTHON} + SWIFT_COMPILE_FLAGS "${swift_stdlib_compile_flags}" + LINK_FLAGS "${SWIFT_RUNTIME_SWIFT_LINK_FLAGS}" + INSTALL_IN_COMPONENT stdlib + EXTRA_RPATHS "${SWIFT_TENSORFLOW_TARGET_LIB_DIR}") diff --git a/stdlib/public/TensorFlow/CompilerRuntime.swift b/stdlib/public/TensorFlowCore/CompilerRuntime.swift similarity index 100% rename from stdlib/public/TensorFlow/CompilerRuntime.swift rename to stdlib/public/TensorFlowCore/CompilerRuntime.swift diff --git a/stdlib/public/TensorFlow/DataTypes.swift b/stdlib/public/TensorFlowCore/DataTypes.swift similarity index 100% rename from stdlib/public/TensorFlow/DataTypes.swift rename to stdlib/public/TensorFlowCore/DataTypes.swift diff --git a/stdlib/public/TensorFlow/Execution.swift b/stdlib/public/TensorFlowCore/Execution.swift similarity index 100% rename from stdlib/public/TensorFlow/Execution.swift rename to stdlib/public/TensorFlowCore/Execution.swift diff --git a/stdlib/public/TensorFlow/ShapedArray.swift b/stdlib/public/TensorFlowCore/ShapedArray.swift similarity index 99% rename from stdlib/public/TensorFlow/ShapedArray.swift rename to stdlib/public/TensorFlowCore/ShapedArray.swift index 43de753d4acf4..fe6b7232caf0f 100644 --- a/stdlib/public/TensorFlow/ShapedArray.swift +++ b/stdlib/public/TensorFlowCore/ShapedArray.swift @@ -436,16 +436,6 @@ public extension _ShapedArrayProtocol } } -fileprivate extension _ShapedArrayProtocol where Scalar : Equatable { - func _isEqual(to other: Self) -> Bool { - return shape == other.shape && withUnsafeBufferPointer { selfBuf in - other.withUnsafeBufferPointer { otherBuf in - selfBuf.elementsEqual(otherBuf) - } - } - } -} - //===----------------------------------------------------------------------===// // ShapedArray //===----------------------------------------------------------------------===// @@ -796,7 +786,7 @@ extension ShapedArray : CustomReflectable { } // Codable conformance. -extension ShapedArray : Codable where Scalar : Codable { +extension ShapedArray : Codable where Scalar: Codable { private enum CodingKeys: String, CodingKey { case shape case scalars @@ -1181,7 +1171,7 @@ extension ShapedArraySlice : CustomReflectable { // Codable conformance. extension ShapedArraySlice : Codable where Scalar : Codable { - private enum CodingKeys : String, CodingKey { + private enum CodingKeys: String, CodingKey { case shape case scalars } @@ -1199,3 +1189,13 @@ extension ShapedArraySlice : Codable where Scalar : Codable { self.init(shape: shape, scalars: scalars) } } + +fileprivate extension _ShapedArrayProtocol where Scalar : Equatable { + func _isEqual(to other: Self) -> Bool { + return shape == other.shape && withUnsafeBufferPointer { selfBuf in + other.withUnsafeBufferPointer { otherBuf in + selfBuf.elementsEqual(otherBuf) + } + } + } +} diff --git a/stdlib/public/TensorFlow/StringTensor.swift b/stdlib/public/TensorFlowCore/StringTensor.swift similarity index 100% rename from stdlib/public/TensorFlow/StringTensor.swift rename to stdlib/public/TensorFlowCore/StringTensor.swift diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlowCore/Tensor.swift similarity index 77% rename from stdlib/public/TensorFlow/Tensor.swift rename to stdlib/public/TensorFlowCore/Tensor.swift index af0ff6f0fa27c..330923c201d4f 100644 --- a/stdlib/public/TensorFlow/Tensor.swift +++ b/stdlib/public/TensorFlowCore/Tensor.swift @@ -56,7 +56,7 @@ public struct Tensor : TensorProtocol { @usableFromInline @inline(never) @_silgen_name("__tf_to_accel") -func _TFToAcclerator(_ handle: TensorHandle) -> TensorHandle { +func _TFToAccelerator(_ handle: TensorHandle) -> TensorHandle { return handle } @@ -172,7 +172,7 @@ public extension Tensor { /// Mark memory transfer to accelerator. @inlinable @inline(__always) func toAccelerator() -> Tensor { - return Tensor(handle: _TFToAcclerator(handle)) + return Tensor(handle: _TFToAccelerator(handle)) } /// Mark memory transfer to host. @@ -416,7 +416,10 @@ extension _TensorElementLiteral : ExpressibleByArrayLiteral { public typealias ArrayLiteralElement = _TensorElementLiteral @inlinable @inline(__always) public init(arrayLiteral elements: _TensorElementLiteral...) { - tensor = Raw.pack(elements.map { $0.tensor }) + // FIXME: We cannot use Raw.pack here because _TensorElementLiteral does not + // conform to tensor group and if we do, several partitioning tests fail. + tensor = Tensor(handle: #tfop( + "Pack", elements, T$dtype: Scalar.tensorFlowDataType)) } } @@ -431,7 +434,10 @@ extension Tensor : ExpressibleByArrayLiteral { internal init( _tensorElementLiterals elements: [_TensorElementLiteral] ) { - self = Raw.pack(elements.map { $0.tensor }) + // FIXME: We cannot use Raw.pack here because _TensorElementLiteral does not + // conform to tensor group and if we do, several partitioning tests fail. + self.init(handle: #tfop( + "Pack", elements, T$dtype: Scalar.tensorFlowDataType)) } /// Creates a tensor initialized with the given elements. @@ -442,7 +448,84 @@ extension Tensor : ExpressibleByArrayLiteral { } //===----------------------------------------------------------------------===// -// Properties +// Scalar Conversion +//===----------------------------------------------------------------------===// + +public extension Tensor { + /// Returns `true` if `rank` is equal to 0 and `false` otherwise. + @inlinable + var isScalar: Bool { + @inline(__always) + get { + return rank == 0 + } + } + + /// Returns the single scalar element if `rank` is equal to 0 and `nil` + /// otherwise. + @inlinable + var scalar: Scalar? { + @inline(__always) + get { + return Scalar(self) + } + } + + /// Reshape to scalar. + /// - Precondition: The tensor has exactly one scalar. + @inlinable + @differentiable( + wrt: self, + vjp: _vjpScalarized where Scalar : TensorFlowFloatingPoint) + func scalarized() -> Scalar { + return _TFGetScalarOrDie(Raw.reshape( + self, shape: Tensor(handle: _TFTensorFromScalars1D([]))).handle) + } +} + +internal extension Tensor where Scalar : TensorFlowFloatingPoint { + @inlinable + func _vjpScalarized() -> (Scalar, (Scalar) -> Tensor) { + return (scalarized(), { v in Tensor(v) }) + } +} + +public extension TensorFlowScalar { + @inlinable @inline(__always) + init?(_ tensor: Tensor) { + guard let scalar = _TFGetScalar(tensor.handle) else { + return nil + } + self = scalar + } +} + +//===----------------------------------------------------------------------===// +// Array Conversion +//===----------------------------------------------------------------------===// + +public extension Tensor { + @inlinable + var array: ShapedArray { + @inline(__always) + get { + debugLog("Returning a host copy of array.") + internalConsistencyCheck(toHost().handle.isConcrete) + + // This is considered to be a well known way to produce a copy to the + // host, so an "implicit copy to host" warning should not be produced. + return toHost().handle.makeHostCopy() + } + } + + @inlinable + var scalars: [Scalar] { + return array.scalars + } +} + +//===----------------------------------------------------------------------===// +// Tensor Properties //===----------------------------------------------------------------------===// public extension Tensor { @@ -474,85 +557,153 @@ public extension Tensor { return Int(_TFGetScalarOrDie(scalarCountTensor.handle)) } } -} -//===----------------------------------------------------------------------===// -// Shape Transformations -//===----------------------------------------------------------------------===// + /// The rank of the tensor, represented as a `Tensor`. + @inlinable + var rankTensor: Tensor { + @_semantics("autodiff.nonvarying") + get { + return Raw.rank(self) + } + } -public extension Tensor { - /// Reshape to scalar. - /// - Precondition: The tensor has exactly one scalar. + /// The dimensions of the tensor, represented as a `Tensor`. @inlinable - @differentiable(wrt: self, - vjp: _vjpScalarized where Scalar : TensorFlowFloatingPoint) - func scalarized() -> Scalar { - return _TFGetScalarOrDie(reshaped(to: []).handle) + var shapeTensor: Tensor { + @_semantics("autodiff.nonvarying") + get { + return Raw.shape(self) + } } -} -extension Tensor where Scalar : TensorFlowFloatingPoint { + /// The number of scalars in the tensor, represented as a `Tensor`. @inlinable - func _vjpScalarized() -> (Scalar, (Scalar) -> Tensor) { - return (scalarized(), { v in Tensor(v) }) + var scalarCountTensor: Tensor { + @_semantics("autodiff.nonvarying") + get { + return Raw.size(self) + } } } //===----------------------------------------------------------------------===// -// Scalar conversion +// Equatable //===----------------------------------------------------------------------===// -public extension Tensor { - /// Returns `true` if `rank` is equal to 0 and `false` otherwise. +extension Tensor : Equatable where Scalar : Equatable { @inlinable - var isScalar: Bool { - @inline(__always) - get { - return rank == 0 - } + public static func == (lhs: Tensor, rhs: Tensor) -> Bool { + let equal = Raw.equal(lhs, rhs) + let axes = Raw.range( + start: Tensor(0), + limit: Tensor(Int32(equal.rank)), + delta: Tensor(1)) + return Raw.all(equal, reductionIndices: axes).scalarized() } - /// Returns the single scalar element if `rank` is equal to 0 and `nil` - /// otherwise. @inlinable - var scalar: Scalar? { - @inline(__always) - get { - return Scalar(self) - } + public static func != (lhs: Tensor, rhs: Tensor) -> Bool { + let equal = Raw.equal(lhs, rhs) + let axes = Raw.range( + start: Tensor(0), + limit: Tensor(Int32(equal.rank)), + delta: Tensor(1)) + return Raw.any(equal, reductionIndices: axes).scalarized() } } -public extension TensorFlowScalar { - @inlinable @inline(__always) - init?(_ tensor: Tensor) { - guard let scalar = _TFGetScalar(tensor.handle) else { - return nil - } - self = scalar - } +//===----------------------------------------------------------------------===// +// Broadcasting +//===----------------------------------------------------------------------===// + +// The following operators are placed in `TensorFlowCore` instead of +// `TensorFlow` in order to allow `Tensor` to conform to `AdditiveArithmetic` +// and `Differentiable`. + +internal extension Tensor where Scalar : Numeric { + @inlinable + func unbroadcast(toShape otherShape: Tensor) -> Tensor { + let rankDiff = Raw.expandDims( + rankTensor - otherShape.scalarCountTensor, dim: Tensor(0)) + let ones = Raw.fill(dims: rankDiff, value: Tensor(1)) + let paddedShape = Raw.concatV2([ones, otherShape], axis: Tensor(0)) + let broadcastIndices = Raw.where_(Raw.notEqual(paddedShape, shapeTensor)) + let reductionIndices = Raw.reshape( + broadcastIndices, shape: Tensor([-1])) + let unbroadcasted = Raw.sum( + self, reductionIndices: reductionIndices, keepDims: false) + return Raw.reshape(unbroadcasted, shape: otherShape) + } } //===----------------------------------------------------------------------===// -// Array Conversion +// Additive Group //===----------------------------------------------------------------------===// -public extension Tensor { +extension Tensor : AdditiveArithmetic where Scalar : Numeric { + /// A scalar zero tensor. @inlinable - var array: ShapedArray { - @inline(__always) - get { - debugLog("Returning a host copy of array.") - internalConsistencyCheck(toHost().handle.isConcrete) + public static var zero: Tensor { + return Tensor(0) + } - // This is considered to be a well known way to produce a copy to the - // host, so an "implicit copy to host" warning should not be produced. - return toHost().handle.makeHostCopy() - } + /// Adds two tensors and produces their sum. + /// - Note: `+` supports broadcasting. + @inlinable @inline(__always) + @differentiable( + vjp: _vjpAdd(lhs:rhs:) where Scalar : TensorFlowFloatingPoint) + public static func + (lhs: Tensor, rhs: Tensor) -> Tensor { + return Raw.add(lhs, rhs) } + /// Subtracts one tensor from another and produces their difference. + /// - Note: `-` supports broadcasting. + @inlinable @inline(__always) + @differentiable( + vjp: _vjpSubtract(lhs:rhs:) where Scalar : TensorFlowFloatingPoint) + public static func - (lhs: Tensor, rhs: Tensor) -> Tensor { + return Raw.sub(lhs, rhs) + } +} + +internal extension Tensor where Scalar : TensorFlowFloatingPoint { @inlinable - var scalars: [Scalar] { - return array.scalars + static func _vjpAdd( + lhs: Tensor, + rhs: Tensor + ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { + return (lhs + rhs, { + [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in + (v.unbroadcast(toShape: lhsShape), v.unbroadcast(toShape: rhsShape)) + }) + } + + @inlinable + static func _vjpSubtract( + lhs: Tensor, + rhs: Tensor + ) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { + return (lhs - rhs, { + [lhsShape = lhs.shapeTensor, rhsShape = rhs.shapeTensor] v in + (v.unbroadcast(toShape: lhsShape), + Raw.neg(v).unbroadcast(toShape: rhsShape)) + }) } } + +//===----------------------------------------------------------------------===// +// Differentiable +//===----------------------------------------------------------------------===// + +extension Tensor : Differentiable where Scalar : TensorFlowFloatingPoint { + public typealias TangentVector = Tensor + public typealias CotangentVector = Tensor + public typealias AllDifferentiableVariables = Tensor + + @inlinable + public func tangentVector( + from cotangent: CotangentVector + ) -> TangentVector { + return cotangent + } +} diff --git a/stdlib/public/TensorFlow/TensorGroup.swift b/stdlib/public/TensorFlowCore/TensorGroup.swift similarity index 94% rename from stdlib/public/TensorFlow/TensorGroup.swift rename to stdlib/public/TensorFlowCore/TensorGroup.swift index 0982a3d765f56..02577eb73d8c4 100644 --- a/stdlib/public/TensorFlow/TensorGroup.swift +++ b/stdlib/public/TensorFlowCore/TensorGroup.swift @@ -36,6 +36,7 @@ public protocol TensorArrayProtocol { func _unpackTensorHandles(into address: UnsafeMutablePointer?) var _tensorHandleCount: Int32 { get } + var _typeList: [TensorDataType] { get } init(_owning tensorHandles: UnsafePointer?, count: Int) } @@ -69,13 +70,16 @@ public protocol TensorGroup : TensorArrayProtocol { public extension TensorGroup { /// The number of tensor fields in this type. static var _tensorHandleCount: Int32 { return Int32(Self._typeList.count) } - var _tensorHandleCount: Int32 { return Int32(Self._typeList.count) } /// An array of `nil`s with the same number of elements as `_outputTypeList`. /// The `nil` represents unknown shape. static var _unknownShapeList: [TensorShape?] { return Array(repeating: nil, count: _typeList.count) } + + // The following instance properties are from `TensorArrayProtocol`. + var _tensorHandleCount: Int32 { return Int32(Self._typeList.count) } + var _typeList: [TensorDataType] { return Self._typeList } init(_owning tensorHandles: UnsafePointer?, count: Int) { precondition(count == Self._typeList.count) @@ -223,9 +227,13 @@ extension Array : TensorArrayProtocol where Element : TensorGroup { } public var _tensorHandleCount: Int32 { - var count: Int32 = 0 - for elem in self { count += elem._tensorHandleCount } - return count + return Element._tensorHandleCount * Int32(count) + } + + public var _typeList: [TensorDataType] { + return Array([[TensorDataType]]( + repeating: Element._typeList, + count: Int(Element._tensorHandleCount)).joined()) } public init(_owning tensorHandles: UnsafePointer?, count: Int) { diff --git a/stdlib/public/TensorFlow/TensorHandle.swift b/stdlib/public/TensorFlowCore/TensorHandle.swift similarity index 98% rename from stdlib/public/TensorFlow/TensorHandle.swift rename to stdlib/public/TensorFlowCore/TensorHandle.swift index c3014e7488c34..5c650ea2b9f1a 100644 --- a/stdlib/public/TensorFlow/TensorHandle.swift +++ b/stdlib/public/TensorFlowCore/TensorHandle.swift @@ -111,10 +111,10 @@ extension TensorHandle where Scalar : TensorFlowScalar { ) { let contiguousSize = shape.reduce(1, *) let byteCount = contiguousSize * MemoryLayout.stride - self.init(shape: shape, byteCount: byteCount) { buffer in - scalarsInitializer(buffer.bindMemory(to: Scalar.self, - capacity: contiguousSize)) - } + self.init(shape: shape, byteCount: byteCount, bufferInitializer: { buffer in + let pointer = buffer.bindMemory(to: Scalar.self, capacity: contiguousSize) + scalarsInitializer(pointer) + }) } } diff --git a/stdlib/public/TensorFlow/TensorProtocol.swift b/stdlib/public/TensorFlowCore/TensorProtocol.swift similarity index 100% rename from stdlib/public/TensorFlow/TensorProtocol.swift rename to stdlib/public/TensorFlowCore/TensorProtocol.swift diff --git a/stdlib/public/TensorFlow/TensorShape.swift b/stdlib/public/TensorFlowCore/TensorShape.swift similarity index 100% rename from stdlib/public/TensorFlow/TensorShape.swift rename to stdlib/public/TensorFlowCore/TensorShape.swift diff --git a/stdlib/public/TensorFlow/Threading.swift b/stdlib/public/TensorFlowCore/Threading.swift similarity index 100% rename from stdlib/public/TensorFlow/Threading.swift rename to stdlib/public/TensorFlowCore/Threading.swift diff --git a/stdlib/public/TensorFlow/Utilities.swift b/stdlib/public/TensorFlowCore/Utilities.swift similarity index 100% rename from stdlib/public/TensorFlow/Utilities.swift rename to stdlib/public/TensorFlowCore/Utilities.swift diff --git a/test/TensorFlow/deabstraction_finished.swift b/test/TensorFlow/deabstraction_finished.swift index 5d257156e604f..fb6bc536e85c5 100644 --- a/test/TensorFlow/deabstraction_finished.swift +++ b/test/TensorFlow/deabstraction_finished.swift @@ -42,8 +42,8 @@ public func constexprCall(a: Tensor, idx: Tensor) -> Tensor CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}constexprCall CHECK: [[A:%.*]] = graph_op "Const"() {dtype$dtype: i32 3, value$tensor: i32 0 CHECK: [[B:%.*]] = graph_op "Const" - CHECK: [[C:%.*]] = graph_op "Const" - CHECK: [[RESULT:%.*]] = graph_op "OneHot"(%0 : $TensorHandle, [[A]] : $TensorHandle, [[B]] : $TensorHandle, [[C]] : $TensorHandle) {T$dtype: i32 1, TI$dtype: i32 3, axis: i64 1, __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $TensorHandle + CHECK: [[C:%.*]] = graph_op "Const"() {dtype$dtype: i32 1, value$tensor: f32 0x0 /* 0 */, __device: "ALL_DEVICES"} : $TensorHandle // user: %4 + CHECK: [[RESULT:%.*]] = graph_op "OneHot"(%0 : $TensorHandle, [[A]] : $TensorHandle, [[B]] : $TensorHandle, [[C]] : $TensorHandle) {axis: i64 1, T$dtype: i32 1, TI$dtype: i32 3, __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $TensorHandle CHECK: return [[RESULT]] */ diff --git a/test/TensorFlowRuntime/accelerator_only.swift b/test/TensorFlowRuntime/accelerator_only.swift index 78af15e457509..861a3731a63d4 100644 --- a/test/TensorFlowRuntime/accelerator_only.swift +++ b/test/TensorFlowRuntime/accelerator_only.swift @@ -17,7 +17,7 @@ func add_constant(_ a: Tensor) -> Tensor { } // CHECK-LABEL: @{{.*}}accelerator_only14matmul_and_add{{.*}} -// CHECK: graph_op "s16accelerator_only14matmul_and_addy10TensorFlow0F0VySfGAF_AFtF.tf_only{{.*}} +// CHECK: graph_op "s16accelerator_only14matmul_and_addy14TensorFlowCore0F0VySfGAF_AFtF.tf_only{{.*}} // CHECK: end sil function {{.*}}accelerator_only14matmul_and_add{{.*}} @TensorFlowGraph @inline(never)