Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
193 changes: 0 additions & 193 deletions stdlib/public/TensorFlow/Gradients.swift
Original file line number Diff line number Diff line change
Expand Up @@ -596,199 +596,6 @@ extension Tensor where Scalar : TensorFlowFloatingPoint {
}
}

//===----------------------------------------------------------------------===//
// Normalization
//===----------------------------------------------------------------------===//

extension Tensor where Scalar : TensorFlowFloatingPoint {
// TODO: Verify that these calculations are correct.
@inlinable
func _vjpBatchNormalized(
alongAxis axis: Int32,
offset: Tensor,
scale: Tensor,
epsilon: Scalar
) -> (Tensor, (Tensor) -> (Tensor, Tensor, Tensor)) {
let value = batchNormalized(alongAxis: axis, offset: offset, scale: scale,
epsilon: epsilon)
return (value, { v in
let mean = self.mean(alongAxes: axis)
let squaredDiff: Tensor = Raw.squaredDifference(self, mean)
let variance = squaredDiff.mean(alongAxes: axis)

let diff = self - mean
let inv = rsqrt(variance + epsilon)
let norm = diff * inv

let dNorm = v * scale
let dVariance = -(dNorm * diff).sum(alongAxes: axis) / 2 * pow(inv, -3)
let dMean = (-dNorm * inv).sum(alongAxes: axis) +
dVariance * (-diff * 2).mean(alongAxes: axis)
let dOffset = v.sum(alongAxes: axis)
let dScale = (norm * v).sum(alongAxes: axis)
let dim = Tensor(Tensor<Int32>(self.shapeTensor[axis]))
let tmp = (dNorm * inv) + (dVariance * 2 * dMean / dim)
let dSelf = tmp + (dMean / dim)
return (dSelf, dOffset, dScale)
})
}
}

//===----------------------------------------------------------------------===//
// Convolution and pooling
//===----------------------------------------------------------------------===//

extension Tensor where Scalar : TensorFlowFloatingPoint {
/// TensorFlow builtin conv2d gradient helper for the input.
@inlinable
@differentiable(
wrt: (filter, backpropOutput),
vjp: _vjpTFConv2DBackpropInput(_:_:_:_:_:)
)
func _TFConv2DBackpropInput(
shape: Tensor<Int32>,
filter: Tensor,
backpropOutput: Tensor,
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> Tensor {
return Raw.conv2DBackpropInput(
inputSizes: shape,
filter: filter,
outBackprop: backpropOutput,
strides: [strides.0, strides.1, strides.2, strides.3],
padding: padding.raw)
}

/// TensorFlow builtin conv2d gradient helper for the filter.
@inlinable
@differentiable(
wrt: (input, backpropOutput),
vjp: _vjpTFConv2DBackpropFilter(_:_:_:_:_:)
)
func _TFConv2DBackpropFilter(
input: Tensor,
filterSizes: Tensor<Int32>,
backpropOutput: Tensor,
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> Tensor {
return Raw.conv2DBackpropFilter(
input,
filterSizes: filterSizes,
outBackprop: backpropOutput,
strides: [strides.0, strides.1, strides.2, strides.3],
padding: padding.raw)
}

@inlinable
func _vjpTFConv2DBackpropInput(
_ shape: Tensor<Int32>,
_ filter: Tensor,
_ backpropOutput: Tensor,
_ strides: (Int32, Int32, Int32, Int32),
_ padding: Padding
) -> (Tensor, (Tensor) -> (Tensor, Tensor)) {
let value = _TFConv2DBackpropInput(shape: shape, filter: filter,
backpropOutput: backpropOutput,
strides: strides, padding: padding)
return (value, { v in
return (
self._TFConv2DBackpropFilter(input: v, filterSizes: shape,
backpropOutput: backpropOutput,
strides: strides, padding: padding),
v.convolved2D(withFilter: filter, strides: strides, padding: padding)
)
})
}

@inlinable
func _vjpTFConv2DBackpropFilter(
_ input: Tensor,
_ filterSizes: Tensor<Int32>,
_ backpropOutput: Tensor,
_ strides: (Int32, Int32, Int32, Int32),
_ padding: Padding
) -> (Tensor, (Tensor) -> (Tensor, Tensor)) {
let value = _TFConv2DBackpropFilter(input: input, filterSizes: filterSizes,
backpropOutput: backpropOutput,
strides: strides, padding: padding)
return (value, { v in
return (
self._TFConv2DBackpropInput(shape: filterSizes, filter: v,
backpropOutput: backpropOutput,
strides: strides, padding: padding),
input.convolved2D(withFilter: v, strides: strides, padding: padding)
)
})
}

@inlinable
func _vjpConvolved2D(
filter: Tensor,
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> (Tensor, (Tensor) -> (Tensor, Tensor)) {
let value = convolved2D(withFilter: filter, strides: strides,
padding: padding)
return (value, { v in
return (
self._TFConv2DBackpropInput(
shape: self.shapeTensor, filter: filter, backpropOutput: v,
strides: strides, padding: padding
),
self._TFConv2DBackpropFilter(
input: self, filterSizes: filter.shapeTensor, backpropOutput: v,
strides: strides, padding: padding
)
)
})
}

@inlinable
func _vjpMaxPooled(
kernelSize: (Int32, Int32, Int32, Int32),
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> (Tensor, (Tensor) -> Tensor) {
// TODO: Currently this is not higher order differentiable. Redefine in
// closed form.
let value = maxPooled(kernelSize: kernelSize, strides: strides,
padding: padding)
return (value, { v in
return Raw.maxPoolGradV2(
origInput: self,
origOutput: value,
grad: v,
ksize: Tensor<Int32>(kernelSize),
strides: Tensor<Int32>(strides),
padding: padding.raw
)
})
}

@inlinable
func _vjpAveragePooled(
kernelSize: (Int32, Int32, Int32, Int32),
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> (Tensor, (Tensor) -> Tensor) {
// TODO: Currently this is not higher order differentiable. Redefine in
// closed form.
let value = averagePooled(kernelSize: kernelSize, strides: strides,
padding: padding)
return (value, { v in
return Raw.avgPoolGrad(
origInputShape: self.shapeTensor,
grad: v,
ksize: [kernelSize.0, kernelSize.1, kernelSize.2, kernelSize.3],
strides: [strides.0, strides.1, strides.2, strides.3],
padding: padding.raw
)
})
}
}

//===----------------------------------------------------------------------===//
// Composite math
//===----------------------------------------------------------------------===//
Expand Down
138 changes: 0 additions & 138 deletions stdlib/public/TensorFlow/Ops.swift
Original file line number Diff line number Diff line change
Expand Up @@ -1587,141 +1587,3 @@ public extension Tensor {
size: Tensor<Int32>(upperBounds) - lowerBoundsTensor)
}
}

//===----------------------------------------------------------------------===//
// Normalization
//===----------------------------------------------------------------------===//

public extension Tensor where Scalar : BinaryFloatingPoint {
/// Computes the batch normalized tensor along the specified axis.
///
/// Specifically, returns `(self - mu)/(var + epsilon) * gamma + beta` where
/// `mu` and `var` are respectively the mean and variance of `self` along
/// `axis`.
///
/// - Parameters:
/// - axis: The batch dimension.
/// - offset: The offset, also known as beta.
/// - scale: The scale, also known as gamma.
/// - epsilon: A small value added to the denominator for numerical
/// stability.
@inlinable
@differentiable(
wrt: (self, offset, scale), vjp: _vjpBatchNormalized
where Scalar : TensorFlowFloatingPoint
)
func batchNormalized(
alongAxis axis: Int32,
offset: Tensor = Tensor(0),
scale: Tensor = Tensor(1),
epsilon: Scalar = 0.001
) -> Tensor {
let mean = self.mean(alongAxes: axis)
let squaredDiff: Tensor = Raw.squaredDifference(self, mean)
let variance = squaredDiff.mean(alongAxes: axis)
let inv = rsqrt(variance + epsilon) * scale
return self * inv + offset - mean * inv
}
}

//===----------------------------------------------------------------------===//
// Convolution and pooling
//===----------------------------------------------------------------------===//

/// A padding scheme. Used by padding, convolution, and pooling ops.
// @_frozen // SR-9739
public enum Padding {
/// The "valid" padding scheme.
case valid
/// The "same" padding scheme.
case same
}

internal extension Padding {
@inlinable
var raw: Raw.Padding {
switch self {
case .same: return .same
case .valid: return .valid
}
}
}

public extension Tensor where Scalar : FloatingPoint {
/// Computes a 2-D convolution using `self` as input, with the specified
/// filter, strides, and padding.
///
/// - Parameters:
/// - filter: The convolution filter.
/// - strides: The strides of the sliding filter for each dimension of the
/// input.
/// - padding: The padding for the operation.
/// - Precondition: `self` must have rank 4.
/// - Precondition: `filter` must have rank 4.
@inlinable @inline(__always)
@differentiable(
wrt: (self, filter), vjp: _vjpConvolved2D(filter:strides:padding:)
where Scalar : TensorFlowFloatingPoint
)
func convolved2D(
withFilter filter: Tensor,
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> Tensor {
return Raw.conv2D(
self,
filter: filter,
strides: [strides.0, strides.1, strides.2, strides.3],
padding: padding.raw)
}

/// Computes a 2-D max pooling, with the specified kernel sizes, strides, and
/// padding.
///
/// - Parameters:
/// - kernelSize: The dimensions of the pooling kernel.
/// - strides: The strides of the sliding filter for each dimension of the
/// input.
/// - padding: The padding for the operation.
@inlinable @inline(__always)
@differentiable(
wrt: self, vjp: _vjpMaxPooled(kernelSize:strides:padding:)
where Scalar : TensorFlowFloatingPoint
)
func maxPooled(
kernelSize: (Int32, Int32, Int32, Int32),
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> Tensor {
return Raw.maxPoolV2(
self,
ksize: Tensor<Int32>(kernelSize),
strides: Tensor<Int32>(strides),
padding: padding.raw)
}

/// Computes a 2-D average pooling, with the specified kernel sizes, strides,
/// and padding.
///
/// - Parameters:
/// - kernelSize: The dimensions of the pooling kernel.
/// - strides: The strides of the sliding filter for each dimension of the
/// input.
/// - padding: The padding for the operation.
@inlinable @inline(__always)
@differentiable(
wrt: self, vjp: _vjpAveragePooled(kernelSize:strides:padding:)
where Scalar : TensorFlowFloatingPoint
)
func averagePooled(
kernelSize: (Int32, Int32, Int32, Int32),
strides: (Int32, Int32, Int32, Int32),
padding: Padding
) -> Tensor {
return Raw.avgPool(
value: self,
ksize: [kernelSize.0, kernelSize.1, kernelSize.2, kernelSize.3],
strides: [strides.0, strides.1, strides.2, strides.3],
padding: padding.raw)
}
}
24 changes: 0 additions & 24 deletions test/TensorFlowRuntime/tensor.swift
Original file line number Diff line number Diff line change
Expand Up @@ -273,30 +273,6 @@ TensorTests.testAllBackends("ReductionToScalar") {
_hostOp(extra)
}

TensorTests.testAllBackends("BatchNormalization") {
let x = Tensor<Float>(shape: [2, 4],
scalars: [0, 0, 0, 0, 0.5, -0.05, 0.3, -0.02])
let normalized = x.batchNormalized(alongAxis: 0, epsilon: 0.001)
expectEqual([2, 4], normalized.shape)
expectPointwiseNearlyEqual(
[-0.99209, 0.62017, -0.97849, 0.30151,
0.99209, -0.62017, 0.97849, -0.30151],
normalized.scalars, byError: 0.0001)
}

TensorTests.testAllBackends("Convolution") {
let x = Tensor<Float>(repeating: 0.5, shape: [1, 1, 3, 3])
let filter = Tensor<Float>(shape: [1, 1, 3, 3],
scalars: [0, 1, 0, 1, 1, 1, 0, 1, 0])
let y = x.convolved2D(withFilter: filter, strides: (1, 1, 1, 1),
padding: .same)
expectEqual(ShapedArray(shape: [1, 1, 3, 3],
scalars: [0.5, 1.5, 0.5,
0.5, 1.5, 0.5,
0.5, 1.5, 0.5]),
y.array)
}

TensorTests.testAllBackends("3Adds") {
let a = Tensor<Float>([1])
let b = Tensor<Float>([2])
Expand Down
2 changes: 1 addition & 1 deletion utils/update_checkout/update-checkout-config.json
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@
"icu": "release-61-1",
"tensorflow": "7818652c950b1b1922efe5f4345886058d0ffba5",
"tensorflow-swift-bindings": "c852b63b6ac3c4b53199aab96c021501978b843d",
"tensorflow-swift-apis": "905f0fbf3cc74e45df8d8c7a5cda95b2ad6ac905"
"tensorflow-swift-apis": "8264ac065fa3299cbbefe3eb4ab4d65f0b6faf4f"
}
}
}
Expand Down