diff --git a/stdlib/public/TensorFlow/CompilerRuntime.swift b/stdlib/public/TensorFlow/CompilerRuntime.swift index 1219a0ee32d7c..7a4cf1012a143 100644 --- a/stdlib/public/TensorFlow/CompilerRuntime.swift +++ b/stdlib/public/TensorFlow/CompilerRuntime.swift @@ -1910,7 +1910,7 @@ func _TFCOpSetAttrTensorShapeArray(_ op: CTFEOp, _ value: Array, _ status: CTFStatus) { let flattenedDims = value.flatMap { $0.dimensions.map(Int64.init) } - let ranks = value.map { $0.rank } + let ranks = value.map { Int32($0.rank) } setAttrShapeList(op: op, attrName: attrName, flattenedDims: flattenedDims, ranks: ranks, status: status) } @@ -1927,12 +1927,7 @@ func _TFCOpSetAttrOptionalTensorShapeArray(_ op: CTFEOp, } return [] } - let ranks = value.map { tensorShapeOpt -> Int32 in - if let tensorShape = tensorShapeOpt { - return tensorShape.rank - } - return -1 - } + let ranks = value.map { shape in (shape?.rank).map(Int32.init) ?? -1 } setAttrShapeList(op: op, attrName: attrName, flattenedDims: flattenedDims, ranks: ranks, status: status) } diff --git a/stdlib/public/TensorFlow/CompositeMath.swift b/stdlib/public/TensorFlow/CompositeMath.swift index 95356ce67ba30..6a1541536eada 100644 --- a/stdlib/public/TensorFlow/CompositeMath.swift +++ b/stdlib/public/TensorFlow/CompositeMath.swift @@ -44,7 +44,7 @@ public func softmax(_ x: Tensor) -> Tensor { /// Specifically, computes `exp(x) / exp(x).sum(alongAxes: axis)`. @inlinable @inline(__always) public func softmax( - _ x: Tensor, alongAxis axis: Int32 + _ x: Tensor, alongAxis axis: Int ) -> Tensor { let expx = exp(x) return expx / expx.sum(alongAxes: axis) diff --git a/stdlib/public/TensorFlow/Gradients.swift b/stdlib/public/TensorFlow/Gradients.swift index 238996afd1578..4430e04354462 100644 --- a/stdlib/public/TensorFlow/Gradients.swift +++ b/stdlib/public/TensorFlow/Gradients.swift @@ -509,7 +509,7 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { @inlinable func _vjpTransposed( - withPermutations permutations: [Int32] + withPermutations permutations: [Int] ) -> (Tensor, (Tensor) -> Tensor) { let value = transposed(withPermutations: permutations) return (value, { $0.transposed(withPermutations: permutations) }) @@ -517,7 +517,7 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { @inlinable func _vjpTransposed( - withPermutations permutations: Int32... + withPermutations permutations: Int... ) -> (Tensor, (Tensor) -> Tensor) { let value = transposed(withPermutations: permutations) return (value, { $0.transposed(withPermutations: permutations) }) @@ -545,7 +545,7 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { } @inlinable - func _vjpSqueezingShape(at axes: [Int32]) -> (Tensor, (Tensor) -> Tensor) { + func _vjpSqueezingShape(at axes: [Int]) -> (Tensor, (Tensor) -> Tensor) { let value = squeezingShape(at: axes) return (value, { [shape = shapeTensor] v in v.reshaped(toShape: shape) @@ -554,7 +554,7 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { @inlinable func _vjpExpandingShape( - at shapeIndex: Int32 + at shapeIndex: Int ) -> (Tensor, (Tensor) -> Tensor) { let value = expandingShape(at: shapeIndex) return (value, { v in @@ -591,6 +591,15 @@ extension Tensor where Scalar : TensorFlowFloatingPoint { }) } + @inlinable + func _vjpMean(squeezingAxes axes: [Int]) -> (Tensor, (Tensor) -> Tensor) { + let value = mean(squeezingAxes: axes) + return (value, { [shape = shapeTensor, + count = axes.map { shape[$0] }.reduce(1, *)] in + $0.broadcast(toShape: shape) / Tensor(Scalar(count)) + }) + } + @inlinable func _vjpMean( squeezingAxes axes: Tensor diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index 2aa7b78371cae..c4bd54b0b6175 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -635,7 +635,8 @@ public extension Tensor { wrt: self, vjp: _vjpTransposed(withPermutations:) where Scalar : TensorFlowFloatingPoint ) - func transposed(withPermutations permutations: [Int32]) -> Tensor { + func transposed(withPermutations permutations: [Int]) -> Tensor { + let permutations = permutations.map(Int32.init) return transposed(withPermutations: Tensor(permutations)) } @@ -646,7 +647,7 @@ public extension Tensor { wrt: self, vjp: _vjpTransposed(withPermutations:) where Scalar : TensorFlowFloatingPoint ) - func transposed(withPermutations permutations: Int32...) -> Tensor { + func transposed(withPermutations permutations: Int...) -> Tensor { return transposed(withPermutations: permutations) } @@ -658,21 +659,29 @@ public extension Tensor { ) func transposed() -> Tensor { let defaultPermutations = rankTensor - 1 - Tensor( - rangeFrom: 0, to: rank, stride: 1 + rangeFrom: 0, to: Int32(rank), stride: 1 ) return transposed(withPermutations: Tensor(defaultPermutations)) } } public extension Tensor { + /// Returns a concatenated tensor of the given tensors. + /// - Precondition: The tensors must have the same dimensions, except for the + /// specified axis. + /// - Precondition: The axis must be in the range `-rank..], alongAxis axis: Int = 0) { + self = Raw.concatV2(tensors, axis: Tensor(Int32(axis))) + } + /// Concatenates tensors along the specified axis. /// - Precondition: The tensors must have the same dimensions, except for the /// specified axis. /// - Precondition: The axis must be in the range `-rank.. Tensor { - return Raw.concatV2([self, other], axis: Tensor(axis)) + func concatenated(with other: Tensor, alongAxis axis: Int = 0) -> Tensor { + return Raw.concatV2([self, other], axis: Tensor(Int32(axis))) } /// Concatenation operator. @@ -685,27 +694,19 @@ public extension Tensor { static func ++ (lhs: Tensor, rhs: Tensor) -> Tensor { return lhs.concatenated(with: rhs) } - - /// Returns a concatenated tensor of the given tensors. - /// - Precondition: The tensors must have the same dimensions, except for the - /// specified axis. - /// - Precondition: The axis must be in the range `-rank..], alongAxis axis: Int32 = 0) { - self = Raw.concatV2(tensors, axis: Tensor(axis)) - } } internal extension Tensor where Scalar : TensorFlowFloatingPoint { @inlinable @inline(__always) - func _vjpConcatenated(with other: Tensor, alongAxis axis: Int32) + func _vjpConcatenated(with other: Tensor, alongAxis axis: Int) -> (Tensor, (Tensor) -> (Tensor, Tensor)) { let idx = axis < 0 ? axis + rank : axis let splits = Tensor([shapeTensor[idx], other.shapeTensor[idx]]) - return (Raw.concatV2([self, other], axis: Tensor(axis)), { result in + return (concatenated(with: other, alongAxis: axis), { result in let ret: (TensorHandle, TensorHandle) = #tfop("SplitV", result, splits, - Tensor(axis), + Tensor(Int32(axis)), num_split: Int64(2), T$dtype: Scalar.tensorFlowDataType, Tlen$dtype: Int32.tensorFlowDataType) @@ -1039,7 +1040,7 @@ public extension Tensor where Scalar == Bool { // to the variadic method `all(squeezingAxes:)` with zero indices. @inlinable @inline(__always) func all() -> Bool { - let axes = Tensor(rangeFrom: 0, to: rank, stride: 1) + let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) return _TFGetScalarOrDie(Raw.all(self, reductionIndices: axes).handle) } @@ -1049,7 +1050,7 @@ public extension Tensor where Scalar == Bool { // to the variadic method `any(squeezingAxes:)` with zero indices. @inlinable @inline(__always) func any() -> Bool { - let axes = Tensor(rangeFrom: 0, to: rank, stride: 1) + let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) return _TFGetScalarOrDie(Raw.any(self, reductionIndices: axes).handle) } @@ -1058,7 +1059,8 @@ public extension Tensor where Scalar == Bool { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func all(squeezingAxes axes: Int...) -> Tensor { + let axes = axes.map(Int32.init) return Raw.all(self, reductionIndices: Tensor(axes), keepDims: false) } @@ -1067,7 +1069,8 @@ public extension Tensor where Scalar == Bool { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func any(squeezingAxes axes: Int...) -> Tensor { + let axes = axes.map(Int32.init) return Raw.any(self, reductionIndices: Tensor(axes), keepDims: false) } @@ -1076,7 +1079,8 @@ public extension Tensor where Scalar == Bool { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func all(alongAxes axes: Int...) -> Tensor { + let axes = axes.map(Int32.init) return Raw.all(self, reductionIndices: Tensor(axes), keepDims: true) } @@ -1085,7 +1089,8 @@ public extension Tensor where Scalar == Bool { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func any(alongAxes axes: Int...) -> Tensor { + let axes = axes.map(Int32.init) return Raw.any(self, reductionIndices: Tensor(axes), keepDims: true) } } @@ -1095,7 +1100,7 @@ public extension Tensor where Scalar : Numeric & Comparable { // to the variadic method `min(squeezingAxes:)` with zero indices. @inlinable @inline(__always) func min() -> Tensor { - let axes = Tensor(rangeFrom: 0, to: rank, stride: 1) + let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) return Raw.min(self, reductionIndices: axes) } @@ -1103,7 +1108,7 @@ public extension Tensor where Scalar : Numeric & Comparable { // to the variadic method `max(squeezingAxes:)` with zero indices. @inlinable @inline(__always) func max() -> Tensor { - let axes = Tensor(rangeFrom: 0, to: rank, stride: 1) + let axes = Tensor(rangeFrom: 0, to: Int32(rank), stride: 1) return Raw.max(self, reductionIndices: axes) } @@ -1112,7 +1117,8 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func max(squeezingAxes axes: [Int]) -> Tensor { + let axes = axes.map(Int32.init) return Raw.max(self, reductionIndices: Tensor(axes), keepDims: false) } @@ -1121,7 +1127,7 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func max(squeezingAxes axes: Int...) -> Tensor { return max(squeezingAxes: axes) } @@ -1130,7 +1136,8 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func min(squeezingAxes axes: [Int]) -> Tensor { + let axes = axes.map(Int32.init) return Raw.min(self, reductionIndices: Tensor(axes), keepDims: false) } @@ -1139,7 +1146,7 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func min(squeezingAxes axes: Int...) -> Tensor { return min(squeezingAxes: axes) } @@ -1148,8 +1155,8 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return Raw.argMax(self, dimension: Tensor(axis)) + func argmax(squeezingAxis axis: Int) -> Tensor { + return Raw.argMax(self, dimension: Tensor(Int32(axis))) } /// Returns the indices of the minimum values along the specified axes. The @@ -1157,8 +1164,8 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { - return Raw.argMin(self, dimension: Tensor(axis)) + func argmin(squeezingAxis axis: Int) -> Tensor { + return Raw.argMin(self, dimension: Tensor(Int32(axis))) } /// Returns the minimum along the specified axes. The reduced dimensions are @@ -1166,7 +1173,8 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func min(alongAxes axes: [Int]) -> Tensor { + let axes = axes.map(Int32.init) return Raw.min(self, reductionIndices: Tensor(axes), keepDims: true) } @@ -1175,7 +1183,7 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func min(alongAxes axes: Int...) -> Tensor { return min(alongAxes: axes) } @@ -1184,7 +1192,8 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func max(alongAxes axes: [Int]) -> Tensor { + let axes = axes.map(Int32.init) return Raw.max(self, reductionIndices: Tensor(axes), keepDims: true) } @@ -1193,7 +1202,7 @@ public extension Tensor where Scalar : Numeric & Comparable { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func max(alongAxes axes: Int...) -> Tensor { return max(alongAxes: axes) } @@ -1234,7 +1243,9 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. @inlinable @inline(__always) @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum(squeezingAxes axes: [Int32]) -> Tensor { + func sum(squeezingAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return sum(squeezingAxes: Tensor(axes)) } @@ -1244,7 +1255,7 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. @inlinable @inline(__always) @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func sum(squeezingAxes axes: Int32...) -> Tensor { + func sum(squeezingAxes axes: Int...) -> Tensor { return sum(squeezingAxes: axes) } @@ -1273,7 +1284,9 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func sum(alongAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return sum(alongAxes: Tensor(axes)) } @@ -1283,7 +1296,7 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func sum(alongAxes axes: Int...) -> Tensor { return sum(alongAxes: axes) } @@ -1306,7 +1319,9 @@ public extension Tensor where Scalar : Numeric { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. @inlinable @inline(__always) - func product(squeezingAxes axes: [Int32]) -> Tensor { + func product(squeezingAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return product(squeezingAxes: Tensor(axes)) } @@ -1316,7 +1331,7 @@ public extension Tensor where Scalar : Numeric { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. @inlinable @inline(__always) - func product(squeezingAxes axes: Int32...) -> Tensor { + func product(squeezingAxes axes: Int...) -> Tensor { return product(squeezingAxes: axes) } @@ -1339,7 +1354,9 @@ public extension Tensor where Scalar : Numeric { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func product(alongAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return product(alongAxes: Tensor(axes)) } @@ -1348,7 +1365,7 @@ public extension Tensor where Scalar : Numeric { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func product(alongAxes axes: Int...) -> Tensor { return product(alongAxes: axes) } @@ -1373,7 +1390,9 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. @inlinable @inline(__always) @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean(squeezingAxes axes: [Int32]) -> Tensor { + func mean(squeezingAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return mean(squeezingAxes: Tensor(axes)) } @@ -1383,7 +1402,7 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank...rank`. @inlinable @inline(__always) @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func mean(squeezingAxes axes: Int32...) -> Tensor { + func mean(squeezingAxes axes: Int...) -> Tensor { return mean(squeezingAxes: axes) } @@ -1412,7 +1431,9 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func mean(alongAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return mean(alongAxes: Tensor(axes)) } @@ -1422,7 +1443,7 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func mean(alongAxes axes: Int...) -> Tensor { return mean(alongAxes: axes) } @@ -1445,7 +1466,9 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func variance(squeezingAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return variance(squeezingAxes: Tensor(axes)) } @@ -1455,7 +1478,7 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func variance(squeezingAxes axes: Int...) -> Tensor { return variance(squeezingAxes: axes) } @@ -1484,7 +1507,9 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func variance(alongAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return variance(alongAxes: Tensor(axes)) } @@ -1494,7 +1519,7 @@ public extension Tensor where Scalar : Numeric { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func variance(alongAxes axes: Int...) -> Tensor { return variance(alongAxes: axes) } } @@ -1522,7 +1547,7 @@ public extension Tensor where Scalar : TensorFlowFloatingPoint { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func standardDeviation(squeezingAxes axes: [Int]) -> Tensor { return sqrt(variance(squeezingAxes: axes)) } @@ -1533,7 +1558,7 @@ public extension Tensor where Scalar : TensorFlowFloatingPoint { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func standardDeviation(squeezingAxes axes: Int...) -> Tensor { return standardDeviation(squeezingAxes: axes) } @@ -1567,7 +1592,9 @@ public extension Tensor where Scalar : TensorFlowFloatingPoint { /// - Parameter axes: The dimensions to reduce. /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func standardDeviation(alongAxes axes: [Int]) -> Tensor { + // TODO(TF-433): Remove workaround for differentiating `map`. + let axes = {axes.map(Int32.init)}() return standardDeviation(alongAxes: Tensor(axes)) } @@ -1579,7 +1606,7 @@ public extension Tensor where Scalar : TensorFlowFloatingPoint { /// - Precondition: Each value in `axes` must be in the range `-rank.. Tensor { + func standardDeviation(alongAxes axes: Int...) -> Tensor { return sqrt(variance(alongAxes: axes)) } } @@ -1629,7 +1656,7 @@ public extension Tensor { @inlinable @inline(__always) func broadcast(to shape: TensorShape) -> Tensor { - return broadcast(toShape: Tensor(shape.dimensions)) + return broadcast(toShape: Tensor(shape.dimensions.map(Int32.init))) } /// Broadcast to the same shape as the specified `Tensor`. @@ -1660,7 +1687,7 @@ public extension Tensor where Scalar : Numeric { @inlinable @inline(__always) func unbroadcast(to shape: TensorShape) -> Tensor { - return unbroadcast(toShape: Tensor(shape.dimensions)) + return unbroadcast(toShape: Tensor(shape.dimensions.map(Int32.init))) } @inlinable @inline(__always) @@ -1677,12 +1704,12 @@ public extension Tensor where Scalar : Numeric { /// Returns a padded tensor according to the specified padding sizes. @inlinable func padded( - forSizes sizes: [(before: Int32, after: Int32)], + forSizes sizes: [(before: Int, after: Int)], with value: Scalar = 0 ) -> Tensor { let paddings = Tensor( - shape: [Int32(sizes.count), 2], - scalars: sizes.flatMap { [$0.before, $0.after] } + shape: [sizes.count, 2], + scalars: sizes.flatMap { [Int32($0.before), Int32($0.after)] } ) return Raw.padV2(self, paddings: paddings, constantValues: Tensor(value)) } @@ -1696,8 +1723,9 @@ public extension Tensor { /// Access the element tensor specified by an index in the leading dimension. /// - Parameter index: Index of the element tensor. @inlinable - subscript(index: Int32) -> Tensor { + subscript(index: Int) -> Tensor { get { + let index = Int32(index) let slice = Raw.stridedSlice( self, begin: Tensor([index]), end: Tensor([index + 1]), strides: Tensor([1])) @@ -1714,10 +1742,12 @@ public extension Tensor { /// Access the subtensor specified by a contiguous range of indices. /// - Parameter bounds: Contiguous range of indices. @inlinable - subscript(bounds: Range) -> Tensor { + subscript(bounds: Range) -> Tensor { return Raw.stridedSlice( - self, begin: Tensor([bounds.lowerBound]), - end: Tensor([bounds.upperBound]), strides: Tensor([1])) + self, + begin: Tensor([Int32(bounds.lowerBound)]), + end: Tensor([Int32(bounds.upperBound)]), + strides: Tensor([1])) } // TODO(danielzheng): Add strided slices? (increment by something different @@ -1731,13 +1761,14 @@ public extension Tensor { /// - Parameter lowerBounds: The lower bounds at each dimension. /// - Parameter upperBounds: The upper bounds at each dimension. @inlinable @inline(__always) - func slice(lowerBounds: [Int32], upperBounds: [Int32]) -> Tensor { + func slice(lowerBounds: [Int], upperBounds: [Int]) -> Tensor { /// TODO: Precondition `lowerBounds.count == upperBounds.count`, /// preferably in graph. - let lowerBoundsTensor = Tensor(lowerBounds) + let lowerBoundsTensor = Tensor(lowerBounds.map(Int32.init)) + let upperBoundsTensor = Tensor(upperBounds.map(Int32.init)) return Raw.slice( self, begin: lowerBoundsTensor, - size: Tensor(upperBounds) - lowerBoundsTensor) + size: upperBoundsTensor - lowerBoundsTensor) } } diff --git a/stdlib/public/TensorFlow/PythonConversion.swift b/stdlib/public/TensorFlow/PythonConversion.swift index 3a32b6e86cc2c..7677c9d97496e 100644 --- a/stdlib/public/TensorFlow/PythonConversion.swift +++ b/stdlib/public/TensorFlow/PythonConversion.swift @@ -54,7 +54,7 @@ extension ShapedArray : ConvertibleFromNumpyArray } let pyShape = numpyArray.__array_interface__["shape"] - guard let shape = Array(pyShape) else { + guard let shape = [Int](pyShape) else { debugLogNumpyError("cannot access shape of 'numpy.ndarray' instance.") return nil } @@ -119,7 +119,7 @@ extension Tensor : ConvertibleFromNumpyArray } let pyShape = numpyArray.__array_interface__["shape"] - guard let dimensions = Array(pyShape) else { + guard let dimensions = [Int](pyShape) else { debugLogNumpyError("cannot access shape of 'numpy.ndarray' instance.") return nil } diff --git a/stdlib/public/TensorFlow/ShapedArray.swift b/stdlib/public/TensorFlow/ShapedArray.swift index 16c8e236f7c38..43de753d4acf4 100644 --- a/stdlib/public/TensorFlow/ShapedArray.swift +++ b/stdlib/public/TensorFlow/ShapedArray.swift @@ -725,7 +725,7 @@ extension ShapedArray where Scalar : TensorFlowScalar { Int32.max. """) return TensorHandle( - shape: shape.map(Int32.init), + shape: shape, scalarsInitializer: { addr in addr.initialize(from: box.array, count: scalarCount) } diff --git a/stdlib/public/TensorFlow/StringTensor.swift b/stdlib/public/TensorFlow/StringTensor.swift index 83314c09c0184..162a9dc48bbd4 100644 --- a/stdlib/public/TensorFlow/StringTensor.swift +++ b/stdlib/public/TensorFlow/StringTensor.swift @@ -44,9 +44,9 @@ public struct StringTensor { @usableFromInline @inline(never) @_silgen_name("__tf_string_tensor_from_strings") func _TFStringTensorFromStrings( - _ scalars: [String], shape: [Int32] + _ scalars: [String], shape: [Int] ) -> TensorHandle { - let contiguousSize = shape.map(Int.init).reduce(1, *) + let contiguousSize = shape.reduce(1, *) precondition(scalars.count == contiguousSize, "The number of scalars does not match the shape.") @@ -110,7 +110,7 @@ func _TFStringTensorFromString(_ scalar: String) -> TensorHandle { @usableFromInline @inline(never) @_silgen_name("__tf_string_tensor_from_strings_1d") func _TFStringTensorFromStrings1D(_ scalars: [String]) -> TensorHandle { - return _TFStringTensorFromStrings(scalars, shape: [Int32(scalars.count)]) + return _TFStringTensorFromStrings(scalars, shape: [scalars.count]) } //===----------------------------------------------------------------------===// diff --git a/stdlib/public/TensorFlow/Tensor.swift b/stdlib/public/TensorFlow/Tensor.swift index 59622df1872b8..f402a93d4fd0a 100644 --- a/stdlib/public/TensorFlow/Tensor.swift +++ b/stdlib/public/TensorFlow/Tensor.swift @@ -95,9 +95,9 @@ func _TFGetScalar( @usableFromInline @inline(never) @_silgen_name("__tf_tensor_from_scalars") func _TFTensorFromScalars( - _ scalars: [Scalar], shape: [Int32] + _ scalars: [Scalar], shape: [Int] ) -> TensorHandle { - let contiguousSize = shape.map(Int.init).reduce(1, *) + let contiguousSize = shape.reduce(1, *) precondition(scalars.count == contiguousSize, "The number of scalars does not match the shape.") return TensorHandle( @@ -139,7 +139,7 @@ func _TFTensorFromScalar( @_silgen_name("__tf_tensor_from_scalars_1d") func _TFTensorFromScalars1D(_ scalars: [Scalar]) -> TensorHandle { - return _TFTensorFromScalars(scalars, shape: [Int32(scalars.count)]) + return _TFTensorFromScalars(scalars, shape: [scalars.count]) } @inlinable @inline(__always) @@ -254,7 +254,7 @@ public extension Tensor { init(_ vector: C) where C.Element == Scalar { let handle = _TFHoistable { TensorHandle( - shape: [Int32(vector.count)], + shape: [vector.count], scalarsInitializer: { addr in var currentAddr = addr for scalar in vector { @@ -300,7 +300,7 @@ public extension Tensor { shape: shape.dimensions, scalarsInitializer: { addr in addr.initialize(from: scalars.baseAddress!, - count: Int(shape.contiguousSize)) + count: shape.contiguousSize) } ) } @@ -337,7 +337,8 @@ public extension Tensor { } public extension Tensor { - /// Creates a tensor with the specified shape and a single, repeated scalar value. + /// Creates a tensor with the specified shape and a single, repeated scalar + /// value. /// /// - Parameters: /// - shape: The dimensions of the tensor. @@ -357,7 +358,7 @@ public extension Tensor { @differentiable(vjp: _vjpInit(repeating:shape:) where Scalar : TensorFlowFloatingPoint) init(repeating repeatedValue: Scalar, shape: TensorShape) { - self = Raw.fill(dims: Tensor(shape.dimensions), + self = Raw.fill(dims: Tensor(shape.dimensions.map(Int64.init)), value: Tensor(repeatedValue)) } } @@ -378,7 +379,7 @@ public extension Tensor { /// all dimensions being 1. @inlinable @inline(__always) // @differentiable(where Scalar : TensorFlowFloatingPoint) - init(broadcasting scalar: Scalar, rank: Int32) { + init(broadcasting scalar: Scalar, rank: Int) { self = Tensor(scalar).reshaped(to: TensorShape(repeating: 1, count: rank)) } @@ -526,11 +527,11 @@ extension Tensor : ExpressibleByArrayLiteral { public extension Tensor { /// The number of dimensions of the `Tensor`. @inlinable - var rank: Int32 { + var rank: Int { @inline(__always) @_semantics("autodiff.nonvarying") get { - return _TFGetScalarOrDie(rankTensor.handle) + return Int(_TFGetScalarOrDie(rankTensor.handle)) } } @@ -540,16 +541,16 @@ public extension Tensor { @inline(__always) @_semantics("autodiff.nonvarying") get { - return TensorShape(shapeTensor.scalars) + return TensorShape(shapeTensor.scalars.map(Int.init)) } } /// The number of scalars in the `Tensor`. @inlinable - var scalarCount: Int32 { + var scalarCount: Int { @inline(__always) get { - return _TFGetScalarOrDie(scalarCountTensor.handle) + return Int(_TFGetScalarOrDie(scalarCountTensor.handle)) } } } @@ -623,11 +624,11 @@ public extension Tensor where Scalar : Numeric { /// - axis: The axis to fill. The default is `-1`, a new inner-most axis. /// @inlinable @inline(__always) - init(oneHotAtIndices indices: Tensor, depth: Int32, + init(oneHotAtIndices indices: Tensor, depth: Int, onValue: Scalar = 1, offValue: Scalar = 0, axis: Int = -1) { self = Raw.oneHot( indices: indices, - depth: Tensor(depth), + depth: Tensor(Int32(depth)), onValue: Tensor(onValue), offValue: Tensor(offValue), axis: Int64(axis) @@ -643,10 +644,8 @@ public extension TensorFlowScalar { /// Convert to a tensor with the specified rank, with all dimensions equal to /// 1. @inlinable @inline(__always) - func makeTensor(rank: Int32) -> Tensor { - return Raw.fill( - dims: Tensor(ones: TensorShape(rank)), - value: Tensor(self)) + func makeTensor(rank: Int) -> Tensor { + return Tensor(repeating: self, shape: TensorShape(rank)) } } @@ -664,7 +663,8 @@ public extension Tensor { @inlinable @inline(__always) @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) func reshaped(to newShape: TensorShape) -> Tensor { - return reshaped(toShape: Tensor(newShape.dimensions)) + // TODO(TF-433): Remove workaround for differentiating `map`. + return reshaped(toShape: Tensor({newShape.dimensions.map(Int32.init)}())) } /// Reshape to the specified `Tensor` representing a shape. @@ -700,8 +700,8 @@ public extension Tensor { wrt: self, vjp: _vjpExpandingShape(at:) where Scalar : TensorFlowFloatingPoint ) - func expandingShape(at shapeIndex: Int32) -> Tensor { - return Raw.expandDims(self, dim: Tensor(shapeIndex)) + func expandingShape(at shapeIndex: Int) -> Tensor { + return Raw.expandDims(self, dim: Tensor(Int32(shapeIndex))) } /// Remove the specified dimensions of size 1 from the shape of a tensor. If @@ -709,7 +709,7 @@ public extension Tensor { /// removed. @inlinable @inline(__always) @differentiable(wrt: self where Scalar : TensorFlowFloatingPoint) - func squeezingShape(at axes: Int32...) -> Tensor { + func squeezingShape(at axes: Int...) -> Tensor { return squeezingShape(at: axes) } @@ -721,8 +721,8 @@ public extension Tensor { wrt: self, vjp: _vjpSqueezingShape(at:) where Scalar : TensorFlowFloatingPoint ) - func squeezingShape(at axes: [Int32]) -> Tensor { - return Raw.squeeze(self, squeezeDims: axes) + func squeezingShape(at axes: [Int]) -> Tensor { + return Raw.squeeze(self, squeezeDims: axes.map(Int32.init)) } /// Reshape to scalar. diff --git a/stdlib/public/TensorFlow/TensorHandle.swift b/stdlib/public/TensorFlow/TensorHandle.swift index 4599f6c7ac202..c3014e7488c34 100644 --- a/stdlib/public/TensorFlow/TensorHandle.swift +++ b/stdlib/public/TensorFlow/TensorHandle.swift @@ -73,7 +73,7 @@ public final class TensorHandle : _AnyTensorHandle /// capacity. `bufferInitializer` must initialize the entire buffer. @usableFromInline convenience init( - shape: [Int32], + shape: [Int], byteCount: Int, bufferInitializer: (UnsafeMutableRawPointer) -> Void ) { @@ -106,10 +106,10 @@ extension TensorHandle where Scalar : TensorFlowScalar { /// order. @usableFromInline convenience init( - shape: [Int32], + shape: [Int], scalarsInitializer: (UnsafeMutablePointer) -> Void ) { - let contiguousSize = shape.lazy.map(Int.init).reduce(1, *) + let contiguousSize = shape.reduce(1, *) let byteCount = contiguousSize * MemoryLayout.stride self.init(shape: shape, byteCount: byteCount) { buffer in scalarsInitializer(buffer.bindMemory(to: Scalar.self, diff --git a/stdlib/public/TensorFlow/TensorShape.swift b/stdlib/public/TensorFlow/TensorShape.swift index 7209e4d01a275..b8a604185bada 100644 --- a/stdlib/public/TensorFlow/TensorShape.swift +++ b/stdlib/public/TensorFlow/TensorShape.swift @@ -11,7 +11,7 @@ //===----------------------------------------------------------------------===// // NOTE: it may be possible to edit `TensorShape` to support "labeled tensors". -// Dimensions may be either an Int32 or an enum representing a label. +// Dimensions may be either an Int or an enum representing a label. /// A struct representing the shape of a tensor. /// @@ -20,21 +20,29 @@ @_fixed_layout public struct TensorShape : ExpressibleByArrayLiteral { /// The dimensions of the shape. - public var dimensions: [Int32] + public var dimensions: [Int] /// Initialize with an array of dimensions. The rank of the tensor is the /// length of the array. /// - Parameter dimensions: The shape dimensions. @inlinable @inline(__always) - public init(_ dimensions: [Int32]) { + public init(_ dimensions: [Int]) { self.dimensions = dimensions } + /// Initialize with a collection of dimensions. The rank of the tensor is the + /// length of the collection. + /// - Parameter dimensions: The shape dimensions. + @inlinable @inline(__always) + public init(_ dimensions: C) where C.Element == Int { + self.dimensions = Array(dimensions) + } + /// Initialize with an array literal representing the shape dimensions. The rank /// of the tensor is the number of dimensions. /// - Parameter dimensions: The shape dimensions. @inlinable @inline(__always) - public init(arrayLiteral elements: Int32...) { + public init(arrayLiteral elements: Int...) { self.init(elements) } @@ -42,27 +50,27 @@ public struct TensorShape : ExpressibleByArrayLiteral { /// of the tensor is the number of elements. /// - Parameter dimensions: The shape dimensions. @inlinable @inline(__always) - public init(_ elements: Int32...) { + public init(_ elements: Int...) { self.init(elements) } @inlinable @inline(__always) - public init(repeating repeatedValue: Int32, count: Int32) { - self.init(Array(repeating: repeatedValue, count: Int(count))) + public init(repeating repeatedValue: Int, count: Int) { + self.init(Array(repeating: repeatedValue, count: count)) } /// The rank of the shape (i.e. the number of dimensions). @inlinable - public var rank: Int32 { + public var rank: Int { @inline(__always) get { - return Int32(dimensions.count) + return dimensions.count } } /// The size of the shape as a contiguously stored array. @inlinable - public var contiguousSize: Int32 { + public var contiguousSize: Int { @inline(__always) get { return dimensions.reduce(1, *) @@ -73,65 +81,58 @@ public struct TensorShape : ExpressibleByArrayLiteral { public extension TensorShape { /// The rank of the shape (i.e. the number of dimensions). @inlinable - var count: Int32 { + var count: Int { @inline(__always) get { - return Int32(dimensions.count) + return dimensions.count } } @inlinable - var indices: Range { + var indices: Range { @inline(__always) get { - return Int32(dimensions.indices.lowerBound) - ..< Int32(dimensions.indices.upperBound) + return dimensions.indices.lowerBound + ..< dimensions.indices.upperBound } } @inlinable - var startIndex: Int32 { + var startIndex: Int { @inline(__always) get { - return Int32(dimensions.startIndex) + return dimensions.startIndex } } @inlinable - var endIndex: Int32 { + var endIndex: Int { @inline(__always) get { - return Int32(dimensions.endIndex) + return dimensions.endIndex } } /// Access the size of the i-th dimension. /// - Parameter index: The index of a dimension. @inlinable - subscript(index: Int32) -> Int32 { + subscript(index: Int) -> Int { @inline(__always) - _read { - yield dimensions[Int(index)] - } + _read { yield dimensions[index] } @inline(__always) - _modify { - yield &dimensions[Int(index)] - } + _modify { yield &dimensions[index] } } /// Access the size of the i-th dimension. /// - Parameter index: The index of a dimension. @inlinable - subscript(bounds: Range) -> TensorShape { + subscript(bounds: Range) -> TensorShape { @inline(__always) get { - return TensorShape( - Array(dimensions[Int(bounds.lowerBound)..(shape: [2,2], scalars: [1,2,3,4]) _ = a+b+c+d } +// NOTE(TF-439): Test disabled after changing `Int32` to `Int` in TF APIs. /* CHECK-LABEL: ---- INPUT FUNCTION {{.*}}test75407624 * CHECK: graph_op "Const"() {dtype$dtype: i32 1, value$tensor: [$Float: (f32 0x3F800000 /* 1 */)], shape$shape: [$Int32: i32 1] - * CHECK: [[B1X:%.*]] = graph_op "Const"() {dtype$dtype: i32 3, value$tensor: [$Int32: (i32 1)], shape$shape: [$Int32: i32 1], - * CHECK: [[BX2:%.*]] = graph_op "Const"() {dtype$dtype: i32 1, value$tensor: f32 0x3F800000 /* 1 */ - * CHECK: graph_op "Fill"([[B1X]] : $TensorHandle, [[BX2]] : $TensorHandle) - * CHECK: [[C1X:%.*]] = graph_op "Const"() {dtype$dtype: i32 3, value$tensor: [$Int32: (i32 1)], shape$shape: [$Int32: i32 1], - * CHECK: [[CX2:%.*]] = graph_op "Const"() {dtype$dtype: i32 1, value$tensor: f32 0x3F800000 /* 1 */ - * CHECK: graph_op "Fill"([[C1X]] : $TensorHandle, [[CX2]] : $TensorHandle) - * CHECK: graph_op "Const"() {dtype$dtype: i32 1, value$tensor: [$Float: (f32 0x3F800000 /* 1 */), (f32 0x40000000 /* 2 */), (f32 0x40400000 /* 3 */), (f32 0x40800000 /* 4 */)], shape$shape: [$Int32: (i32 2), (i32 2)], + * HECK: [[B1X:%.*]] = graph_op "Const"() {dtype$dtype: i32 3, value$tensor: [$Int32: (i32 1)] + * HECK: [[BX2:%.*]] = graph_op "Const"() {dtype$dtype: i32 1, value$tensor: f32 0x3F800000 /* 1 */ + * HECK: graph_op "Fill"([[B1X]] : $TensorHandle, [[BX2]] : $TensorHandle) + * HECK: [[C1X:%.*]] = graph_op "Const"() {dtype$dtype: i32 3, value$tensor: [$Int32: (i32 1)], shape$shape: [$Int32: i32 1], + * HECK: [[CX2:%.*]] = graph_op "Const"() {dtype$dtype: i32 1, value$tensor: f32 0x3F800000 /* 1 */ + * HECK: graph_op "Fill"([[C1X]] : $TensorHandle, [[CX2]] : $TensorHandle) + * HECK: graph_op "Const"() {dtype$dtype: i32 1, value$tensor: [$Float: (f32 0x3F800000 /* 1 */), (f32 0x40000000 /* 2 */), (f32 0x40400000 /* 3 */), (f32 0x40800000 /* 4 */)], shape$shape: [$Int32: (i32 2), (i32 2)], * CHECK-LABEL: ---- END OF */ @@ -133,12 +134,12 @@ public func testConvolution(x: Tensor, filter: Tensor) -> Tensor, {{.*}} : $TensorHandle) {T$dtype: i32 1, strides: [$Int32: (i32 1), (i32 2), (i32 3), (i32 4)], use_cudnn_on_gpu: i1 -1, padding: "SAME", explicit_paddings: [$Int32: ], data_format: "NHWC", dilations: [$Int32: (i32 1), (i32 1), (i32 1), (i32 1)], + * HECK: graph_op "Conv2D"({{.*}} : $TensorHandle, {{.*}} : $TensorHandle) {T$dtype: i32 1, strides: [$Int32: (i32 1), (i32 2), (i32 3), (i32 4)], use_cudnn_on_gpu: i1 -1, padding: "SAME", explicit_paddings: [$Int32: ], data_format: "NHWC", dilations: [$Int32: (i32 1), (i32 1), (i32 1), (i32 1)], * CHECK-LABEL: ---- END OF */ - // SR-8463: SimpleDataset itself is not a const, but the `elementShape` field // is, so it can be const-evaluated. struct SimpleDataset { @@ -193,7 +194,7 @@ public func testShapeList2() { } // CHECK-LABEL: ---- INPUT FUNCTION {{.*}}testShapeList -// CHECK: graph_op "AnonymousIterator"() {output_types$dtype: [$TensorDataType: (((i32 3)))], output_shapes: [$TensorShape: ([$Int32: ])] +// CHECK: graph_op "AnonymousIterator"() {output_types$dtype: [$TensorDataType: (((i32 3)))], output_shapes: [$TensorShape: ([$Int: ])] @TensorFlowGraph func isZero(_ x: Tensor) -> Tensor { diff --git a/test/TensorFlow/diagnostics.swift b/test/TensorFlow/diagnostics.swift index ecae8d44513f7..1db31fa8bef95 100644 --- a/test/TensorFlow/diagnostics.swift +++ b/test/TensorFlow/diagnostics.swift @@ -13,9 +13,14 @@ func testInferredElementResult() -> TensorHandle { _ = #tfop("bar") as TensorHandle } +// expected-note @+1 2 {{value used here}} class ClassTest { - var w = Tensor(zeros: [1, 2]) // expected-warning {{value implicitly copied to the host}} - let b = Tensor(zeros: [1, 2]) // expected-warning {{value implicitly copied to the host}} + // expected-warning @+2 {{value implicitly copied to the host}} + // expected-warning @+1 {{'Tensor' implicitly copied to the accelerator}} + var w = Tensor(zeros: [1, 2]) + // expected-warning @+2 {{value implicitly copied to the host}} + // expected-warning @+1 {{'Tensor' implicitly copied to the accelerator}} + let b = Tensor(zeros: [1, 2]) var c : Tensor { return w } // expected-warning {{properties in classes always cause a copy to the accelerator}} @@ -26,7 +31,9 @@ class ClassTest { public func f() { let x = ClassTest() + // expected-warning @+1 {{'Tensor' implicitly copied to the accelerator}} let y = x.infer(input: Tensor(ones: [2, 1])) + _ = y+y // expected-note @+1 {{value used here}} _ = x.c+x.b+x.w // expected-warning 2 {{properties in classes always cause a copy to the accelerator}} diff --git a/test/TensorFlow/integration.swift b/test/TensorFlow/integration.swift index b8714b8825547..749c067fe45fe 100644 --- a/test/TensorFlow/integration.swift +++ b/test/TensorFlow/integration.swift @@ -517,8 +517,8 @@ public func graphFuncReturningOpaqueHandles() -> (ResourceHandle, ResourceHandle } // CHECK-LABEL --- TFPartition Accelerator Result: {{.*}}graphFuncReturningOpaqueHandles{{.*}} // CHECK: bb0: -// CHECK: [[A:%.*]] = graph_op "Iterator"() {shared_name: "foo", container: "bar", output_shapes: [$TensorShape: ([$Int32: ])], output_types$dtype: [$TensorDataType: (((i32 1)))], __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $ResourceHandle -// CHECK: [[B:%.*]] = graph_op "Iterator"() {shared_name: "foo", container: "bar", output_shapes: [$TensorShape: ([$Int32: ])], output_types$dtype: [$TensorDataType: (((i32 1)))], __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $ResourceHandle +// CHECK: [[A:%.*]] = graph_op "Iterator"() {shared_name: "foo", container: "bar", output_shapes: [$TensorShape: ([$Int: ])], output_types$dtype: [$TensorDataType: (((i32 1)))], __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $ResourceHandle +// CHECK: [[B:%.*]] = graph_op "Iterator"() {shared_name: "foo", container: "bar", output_shapes: [$TensorShape: ([$Int: ])], output_types$dtype: [$TensorDataType: (((i32 1)))], __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $ResourceHandle // CHECK: [[C:%.*]] = tuple ([[A]] : $ResourceHandle, [[B]] : $ResourceHandle) // CHECK: return [[C]] : $(ResourceHandle, ResourceHandle) diff --git a/test/TensorFlow/no_copy.swift b/test/TensorFlow/no_copy.swift index 665f9f99bac62..7add0cbdf43ef 100644 --- a/test/TensorFlow/no_copy.swift +++ b/test/TensorFlow/no_copy.swift @@ -39,22 +39,23 @@ public func testEmptyScalarsArray() { CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}testEmptyScalarsArray CHECK: sil private @{{.*}}testEmptyScalarsArray{{.*}} : $@callee_owned () -> () { CHECK: bb0: - CHECK: graph_op "Const"() {dtype$dtype: i32 3, value$tensor: [$Int32: ], shape$shape: [$Int32: (i32 0), (i32 20), (i32 30)], + CHECK: graph_op "Const"() {dtype$dtype: i32 3, value$tensor: [$Int32: ], shape$shape: [$Int: (i64 0), (i64 20), (i64 30)], CHECK: graph_op "Add"({{.*}} : $TensorHandle, {{.*}} : $TensorHandle */ // This tests the attributes necessary to get arrays of integers and strings going. public func testConvolution(x: Tensor, filter: Tensor) -> Tensor { return x.toAccelerator().convolved2D(withFilter: filter.toAccelerator(), - strides: (1, 2, 3, 4), padding: .same) + strides: (1, 2, 3, 4), padding: .same) } -// CHECK-LABEL: --- TFPartition Accelerator Result: {{.*}}testConvolution -// CHECK: sil private @{{.*}}testConvolution{{.*}} : $@callee_owned (TensorHandle, TensorHandle) -> TensorHandle { -// CHECK: bb0(%0 : @unowned $TensorHandle, %1 : @unowned $TensorHandle): -// CHECK: [[A:%.*]] = graph_op "Conv2D"(%0 : $TensorHandle, %1 : $TensorHandle) {T$dtype: i32 1, strides: [$Int32: (i32 1), (i32 2), (i32 3), (i32 4)], use_cudnn_on_gpu: i1 -1, padding: "SAME", explicit_paddings: [$Int32: ], data_format: "NHWC", dilations: [$Int32: (i32 1), (i32 1), (i32 1), (i32 1)], __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $TensorHandle -// CHECK-NEXT: return [[A]] : $TensorHandle -// CHECK-NEXT:} +// NOTE(TF-439): Test disabled after changing `Int32` to `Int` in TF APIs. +// HECK-LABEL: --- TFPartition Accelerator Result: {{.*}}testConvolution +// HECK: sil private @{{.*}}testConvolution{{.*}} : $@callee_owned (TensorHandle, TensorHandle) -> TensorHandle { +// HECK: bb0(%0 : @unowned $TensorHandle, %1 : @unowned $TensorHandle): +// HECK: [[A:%.*]] = graph_op "Conv2D"(%0 : $TensorHandle, %1 : $TensorHandle) {T$dtype: i32 1, strides: [$Int32: (i32 1), (i32 2), (i32 3), (i32 4)], use_cudnn_on_gpu: i1 -1, padding: "SAME", explicit_paddings: [$Int32: ], data_format: "NHWC", dilations: [$Int32: (i32 1), (i32 1), (i32 1), (i32 1)], __device: "/job:localhost/replica:0/task:0/device:CPU:0"} : $TensorHandle +// HECK-NEXT: return [[A]] : $TensorHandle +// HECK-NEXT:} // Testcase for an op that uses the $shape modifier. public func tensorShapeModifier() { diff --git a/test/TensorFlowRuntime/dataset_api.swift b/test/TensorFlowRuntime/dataset_api.swift index be8ea98a98e96..ca845da2d97a1 100644 --- a/test/TensorFlowRuntime/dataset_api.swift +++ b/test/TensorFlowRuntime/dataset_api.swift @@ -26,7 +26,7 @@ DatasetAPITests.testAllBackends("SingleValueManualIterator") { .reshaped(to: [5, 1]) let dataset = Dataset(elements: scalars) var iterator = dataset.makeIterator() - var i: Int32 = 0 + var i: Int = 0 while let item = iterator.next() { expectEqual(scalars[i].array, item.array) i += 1 @@ -38,7 +38,7 @@ DatasetAPITests.testAllBackends("DatasetIteration") { let scalars = Tensor(rangeFrom: 0, to: 5, stride: 1) .reshaped(to: [5, 1]) let dataset = Dataset(elements: scalars) - var i: Int32 = 0 + var i: Int = 0 for item in dataset { expectEqual(scalars[i].array, item.array) i += 1 @@ -94,7 +94,7 @@ DatasetAPITests.testAllBackends("DoubleValueDatasetIteration") { let scalars2 = Tensor(rangeFrom: 5, to: 10, stride: 1) let datasetLeft = Dataset(elements: scalars1) let datasetRight = Dataset(elements: scalars2) - var i: Int32 = 0 + var i: Int = 0 for pair in zip(datasetLeft, datasetRight) { expectEqual(scalars1[i].array, pair.first.array) expectEqual(scalars2[i].array, pair.second.array) diff --git a/test/TensorFlowRuntime/dynamic_attributes.swift b/test/TensorFlowRuntime/dynamic_attributes.swift index 7be9f10269587..48b911ee50945 100644 --- a/test/TensorFlowRuntime/dynamic_attributes.swift +++ b/test/TensorFlowRuntime/dynamic_attributes.swift @@ -32,9 +32,9 @@ func loadDtypeDouble() -> TensorDataType { return dtypeDouble } -var stridesInt32 = (Int32(1), Int32(1), Int32(1), Int32(1)) +var stridesInt32 = [Int32(1), Int32(1), Int32(1), Int32(1)] @inline(never) -func loadStridesInt32() -> (Int32, Int32, Int32, Int32) { +func loadStridesInt32() -> [Int32] { return stridesInt32 } @@ -259,9 +259,10 @@ DynamicAttributeTests.testAllBackends("NormalAttribute Array") { } DynamicAttributeTests.testAllBackends("NormalAttribute Array") { - let result = convImage.convolved2D(withFilter: convFilter, - strides: loadStridesInt32(), - padding: .valid) + let result: Tensor = #tfop("Conv2D", convImage, convFilter, + T$dtype: Float.tensorFlowDataType, + strides: loadStridesInt32(), + padding: "VALID") expectPointwiseNearlyEqual(convExpectedResult, result.array) } diff --git a/test/TensorFlowRuntime/model_autodiff_runtime.swift b/test/TensorFlowRuntime/model_autodiff_runtime.swift index 9799b5519160f..248d769559ba5 100644 --- a/test/TensorFlowRuntime/model_autodiff_runtime.swift +++ b/test/TensorFlowRuntime/model_autodiff_runtime.swift @@ -47,10 +47,8 @@ public struct Dense: Layer { public extension Dense where Scalar.RawSignificand: FixedWidthInteger { init(inputSize: Int, outputSize: Int, activation: @escaping Activation) { - self.init(weight: Tensor( - glorotUniform: [Int32(inputSize), Int32(outputSize)] - ), - bias: Tensor(zeros: [Int32(outputSize)]), + self.init(weight: Tensor(glorotUniform: [inputSize, outputSize]), + bias: Tensor(zeros: [outputSize]), activation: activation) } } @@ -61,7 +59,7 @@ public struct Conv2D: Layer { public var bias: Tensor public typealias Activation = @differentiable (Tensor) -> Tensor @noDerivative public let activation: Activation - @noDerivative public let strides: (Int32, Int32) + @noDerivative public let strides: (Int, Int) @noDerivative public let padding: Padding @differentiable @@ -75,7 +73,7 @@ public struct Conv2D: Layer { self.filter = filter self.bias = bias self.activation = activation - self.strides = (Int32(strides.0), Int32(strides.1)) + self.strides = strides self.padding = padding } diff --git a/test/TensorFlowRuntime/raw_ops.swift b/test/TensorFlowRuntime/raw_ops.swift index b84c51fcdf072..94b97fe1e8291 100644 --- a/test/TensorFlowRuntime/raw_ops.swift +++ b/test/TensorFlowRuntime/raw_ops.swift @@ -22,13 +22,12 @@ public func testPointwiseBinaryOp( swiftOp: (Float, Float) -> T) { let lhsScalars: [Float] = [3, 1, 4, 1, 5, 9, 2, 7] let rhsScalars: [Float] = [2, 7, 1, 8, 2, 8, 1, 7] - let shape = [2, 4] - let tensorShape: TensorShape = TensorShape(shape.map { Int32($0) }) - let lhs = Tensor(shape: tensorShape, scalars: lhsScalars) - let rhs = Tensor(shape: tensorShape, scalars: rhsScalars) + let shape: TensorShape = [2, 4] + let lhs = Tensor(shape: shape, scalars: lhsScalars) + let rhs = Tensor(shape: shape, scalars: rhsScalars) let tfResult = tfOp(lhs, rhs) - expectEqual(ShapedArray(shape: shape, + expectEqual(ShapedArray(shape: shape.dimensions, scalars: zip(lhsScalars, rhsScalars).map(swiftOp)), tfResult.array) } diff --git a/test/TensorFlowRuntime/tensor_autodiff_indirect.swift b/test/TensorFlowRuntime/tensor_autodiff_indirect.swift index 4fb0399be1911..aad48f5a3c642 100644 --- a/test/TensorFlowRuntime/tensor_autodiff_indirect.swift +++ b/test/TensorFlowRuntime/tensor_autodiff_indirect.swift @@ -92,10 +92,8 @@ public struct Dense: Layer { public extension Dense where Scalar.RawSignificand: FixedWidthInteger { init(inputSize: Int, outputSize: Int, activation: @escaping Activation) { - self.init(weight: Tensor( - glorotUniform: [Int32(inputSize), Int32(outputSize)] - ), - bias: Tensor(zeros: [Int32(outputSize)]), + self.init(weight: Tensor(glorotUniform: [inputSize, outputSize]), + bias: Tensor(zeros: [outputSize]), activation: activation) } } diff --git a/utils/update_checkout/update-checkout-config.json b/utils/update_checkout/update-checkout-config.json index c2a180fe15c68..f3f838203d174 100644 --- a/utils/update_checkout/update-checkout-config.json +++ b/utils/update_checkout/update-checkout-config.json @@ -242,7 +242,7 @@ "icu": "release-61-1", "tensorflow": "d1db9860a24af2ce64626fe4c3bee69f83700afa", "tensorflow-swift-bindings": "a7ccb727514414d31df9e403f34fa923bdf6a519", - "tensorflow-swift-apis": "16d87eb6aa36bec77570819d8ab00ef71ec3eece" + "tensorflow-swift-apis": "23c16ae33a3826399b01caeb1b0b736531d00bde" } } }