Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,17 @@ You will also need [CMake](https://cmake.org), [Ninja](https://ninja-build.org),
```shell
brew install cmake ninja
brew cask install caskroom/versions/java8 # required for Bazel
brew install bazel # required for TensorFlow support
```

Instructions for installing CMake, Ninja, and Bazel directly can be found [below](#build-dependencies).
Additionally, [Bazel](https://www.bazel.build) v0.22.0 is required to build with TensorFlow support. Instructions to download Bazel directly can be found [below](###bazel). You can find instructions for installing CMake, and Ninja directly [below](#build-dependencies) as well.

#### Linux

For Ubuntu, you'll need the following development dependencies:

sudo apt-get install git cmake ninja-build clang python uuid-dev libicu-dev icu-devtools libbsd-dev libedit-dev libxml2-dev libsqlite3-dev swig libpython-dev libncurses5-dev pkg-config libblocksruntime-dev libcurl4-openssl-dev systemtap-sdt-dev tzdata rsync

Additionally, [Bazel](https://www.bazel.build) is required to build with TensorFlow support. Ubuntu installation instructions are [here](https://docs.bazel.build/versions/master/install-ubuntu.html).
Additionally, [Bazel](https://www.bazel.build) v0.22.0 is required to build with TensorFlow support. Ubuntu installation instructions can be found [below](###bazel).

**Note:** LLDB currently requires at least `swig-1.3.40` but will successfully build
with version 2 shipped with Ubuntu.
Expand Down Expand Up @@ -341,3 +340,4 @@ next to the other projects and it will be bootstrapped automatically:
The Bazel website has detailed installation instructions for
[macOS](https://docs.bazel.build/versions/master/install-os-x.html) and
[Ubuntu](https://docs.bazel.build/versions/master/install-ubuntu.html).
When picking the version to download in step 2, select v0.22.0 which can be found in the release notes [here](https://github.com/bazelbuild/bazel/releases/tag/0.22.0).
1 change: 0 additions & 1 deletion lib/SILOptimizer/Mandatory/Differentiation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2293,7 +2293,6 @@ static SILFunction *getOrCreateReabstractionThunk(SILOptFunctionBuilder &fb,
auto indRes = *fromIndResultsIter++;
auto *load = builder.createLoad(loc, indRes,
getBufferLOQ(indRes->getType().getASTType(), *thunk));
builder.createRetainValue(loc, load, builder.getDefaultAtomicity());
results.push_back(load);
continue;
}
Expand Down
27 changes: 11 additions & 16 deletions stdlib/public/TensorFlow/Gradients.swift
Original file line number Diff line number Diff line change
Expand Up @@ -553,12 +553,10 @@ extension Tensor where Scalar : TensorFlowFloatingPoint {
}

@inlinable
func _vjpExpandingShape(
at shapeIndex: Int
) -> (Tensor, (Tensor) -> Tensor) {
let value = expandingShape(at: shapeIndex)
func _vjpExpandingShape(at axes: [Int]) -> (Tensor, (Tensor) -> Tensor) {
let value = self.expandingShape(at: axes)
return (value, { v in
v.squeezingShape(at: shapeIndex)
v.squeezingShape(at: axes)
})
}
}
Expand All @@ -579,7 +577,11 @@ extension Tensor where Scalar : TensorFlowFloatingPoint {
squeezingAxes axes: Tensor<Int32>
) -> (Tensor, (Tensor) -> Tensor) {
let value = sum(squeezingAxes: axes)
return (value, { [shape = shapeTensor] in $0.broadcast(toShape: shape) })
return (value, { [shape = shapeTensor] in
var res = $0
for i in axes.array.scalars { res = res.expandingShape(at: Int(i)) }
return res.broadcast(toShape: shape)
})
}

@inlinable
Expand All @@ -591,23 +593,16 @@ extension Tensor where Scalar : TensorFlowFloatingPoint {
})
}

@inlinable
func _vjpMean(squeezingAxes axes: [Int]) -> (Tensor, (Tensor) -> Tensor) {
let value = mean(squeezingAxes: axes)
return (value, { [shape = shapeTensor,
count = axes.map { shape[$0] }.reduce(1, *)] in
$0.broadcast(toShape: shape) / Tensor(Scalar(count))
})
}

@inlinable
func _vjpMean(
squeezingAxes axes: Tensor<Int32>
) -> (Tensor, (Tensor) -> Tensor) {
let value = mean(squeezingAxes: axes)
let count = Raw.gather(params: shapeTensor, indices: axes).product()
return (value, { [shape = shapeTensor] in
$0.broadcast(toShape: shape) / Tensor(count)
var res = $0
for i in axes.array.scalars { res = res.expandingShape(at: Int(i)) }
return res.broadcast(toShape: shape) / Tensor(count)
})
}
}
Expand Down
16 changes: 13 additions & 3 deletions stdlib/public/TensorFlow/Tensor.swift
Original file line number Diff line number Diff line change
Expand Up @@ -706,14 +706,24 @@ public extension Tensor {
}

/// Returns a shape-expanded `Tensor`, with a dimension of 1 inserted at the
/// specified shape index.
/// specified shape indices.
@inlinable @inline(__always)
@differentiable(wrt: self where Scalar : TensorFlowFloatingPoint)
func expandingShape(at axes: Int...) -> Tensor {
return expandingShape(at: axes)
}

/// Returns a shape-expanded `Tensor`, with a dimension of 1 inserted at the
/// specified shape indices.
@inlinable @inline(__always)
@differentiable(
wrt: self, vjp: _vjpExpandingShape(at:)
where Scalar : TensorFlowFloatingPoint
)
func expandingShape(at shapeIndex: Int) -> Tensor {
return Raw.expandDims(self, dim: Tensor<Int32>(Int32(shapeIndex)))
func expandingShape(at axes: [Int]) -> Tensor {
var res = self
for i in axes { res = Raw.expandDims(res, dim: Tensor<Int32>(Int32(i))) }
return res
}

/// Remove the specified dimensions of size 1 from the shape of a tensor. If
Expand Down
14 changes: 14 additions & 0 deletions test/TensorFlowRuntime/tensor.swift
Original file line number Diff line number Diff line change
Expand Up @@ -674,6 +674,20 @@ TensorTests.testAllBackends("MLPClassifierStruct") {
expectPointwiseNearlyEqual([0.816997], prediction.scalars)
}

TensorTests.testAllBackends("ExpandingShape") {
// 2 x 3 -> 1 x 2 x 1 x 3 x 1
let matrix = Tensor<Int32>([[0, 1, 2], [3, 4, 5]])
let reshaped = matrix.expandingShape(at: 0,2,4)

expectEqual([1, 2, 1, 3, 1], reshaped.shape)
expectEqual(Array(0..<6), reshaped.scalars)

// 1 x 2 x 1 x 3 x 1 -> 2 x 3
let rereshaped = reshaped.squeezingShape(at: 0,2,4)
expectEqual([2, 3], rereshaped.shape)
expectEqual(Array(0..<6), rereshaped.scalars)
}

TensorTests.testAllBackends("Reshape") {
// 2 x 3 -> 1 x 3 x 1 x 2 x 1
let matrix = Tensor<Int32>([[0, 1, 2], [3, 4, 5]])
Expand Down
13 changes: 7 additions & 6 deletions test/TensorFlowRuntime/tensor_autodiff_runtime.swift
Original file line number Diff line number Diff line change
Expand Up @@ -98,38 +98,39 @@ TensorADTests.testAllBackends("Abs") {
TensorADTests.testAllBackends("sum") {
let input = Tensor<Float>(repeating: 42, shape: [2, 2])
let sumPullbackScalar = pullback(at: input) { (a: Tensor<Float>) in a.sum() }
let sumPullbackSqueezingAxes = pullback(at: input) { (a: Tensor<Float>) in a.sum(squeezingAxes: 0, 1) }
let sumPullbackAlongAxes = pullback(at: input) { (a: Tensor<Float>) in a.sum(alongAxes: 0, 1) }

let expected = Tensor<Float>(ones: [2, 2])
expectEqual(expected, sumPullbackScalar(Tensor(1)))
// expectEqual(expected, sumPullbackSqueezingAxes(Tensor(1)))
expectEqual(expected, sumPullbackSqueezingAxes(Tensor(1)))
expectEqual(expected, sumPullbackAlongAxes(Tensor(1)))
expectEqual(expected * 3, sumPullbackScalar(Tensor(3)))
// expectEqual(expected * 3, sumPullbackSqueezingAxes(Tensor(3)))
expectEqual(expected * 3, sumPullbackSqueezingAxes(Tensor(3)))
expectEqual(expected * 3, sumPullbackAlongAxes(Tensor(3)))
}

TensorADTests.testAllBackends("mean") {
let meanGradScalar = gradient { (a: Tensor<Float>) in a.mean() }
// let meanGradSqueezingAxes = gradient { (a: Tensor<Float>) in a.mean(squeezingAxes: 0, 1) }
let meanGradSqueezingAxes = gradient { (a: Tensor<Float>) in a.mean(squeezingAxes: 0, 1) }
let meanGradAlongAxes = gradient { (a: Tensor<Float>) in a.mean(alongAxes: 0, 1) }

let input = Tensor<Float>(ones: [2, 2])
let expected = Tensor<Float>(repeating: 0.25, shape: [2, 2])
expectEqual(expected, meanGradScalar(input))
// expectEqual(expected, meanGradSqueezingAxes(input))
expectEqual(expected, meanGradSqueezingAxes(input))
expectEqual(expected, meanGradAlongAxes(input))
}

TensorADTests.testAllBackends("variance") {
let varianceGradScalar = gradient { (a: Tensor<Float>) in a.variance() }
// let varianceGradSqueezingAxes = gradient { (a: Tensor<Float>) in a.variance(squeezingAxes: 0, 1) }
let varianceGradSqueezingAxes = gradient { (a: Tensor<Float>) in a.variance(squeezingAxes: 0, 1) }
let varianceGradAlongAxes = gradient { (a: Tensor<Float>) in a.variance(alongAxes: 0, 1) }

let input: Tensor<Float> = [[1, 2], [3, 4]]
let expected: Tensor<Float> = [[-0.75, -0.25], [0.25, 0.75]]
expectEqual(expected, varianceGradScalar(input))
// expectEqual(expected, varianceGradSqueezingAxes(input))
expectEqual(expected, varianceGradSqueezingAxes(input))
expectEqual(expected, varianceGradAlongAxes(input))
}

Expand Down
4 changes: 2 additions & 2 deletions utils/update_checkout/update-checkout-config.json
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@
"llvm": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a",
"clang": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a",
"swift": "tensorflow",
"lldb": "bdb96e8b352f7cb18e2b3b66f2d3f75d92f81dcd",
"lldb": "4cf2c810dd91795935181610fd406c96e0d6d957",
"cmark": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a",
"llbuild": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a",
"swiftpm": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a",
Expand All @@ -242,7 +242,7 @@
"icu": "release-61-1",
"tensorflow": "447e512d332ab86172a3b13119900b4d021d0c65",
"tensorflow-swift-bindings": "a7ccb727514414d31df9e403f34fa923bdf6a519",
"tensorflow-swift-apis": "cc4abe206dce7f47fabebfcf28effdcc2ad5992b"
"tensorflow-swift-apis": "cfefd63fa60a55d1da1e2a412ed561eb3448e691"
}
}
}
Expand Down