From 08d1f4db296906f8cd7a92aa1d9d09ff53c69004 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Wed, 21 Aug 2019 14:52:24 +0530 Subject: [PATCH 01/12] vjp fixes in conv3d --- Sources/TensorFlow/Operators/NN.swift | 12 ++--- Tests/TensorFlowTests/LayerTests.swift | 68 +++++++++++++++++++++----- 2 files changed, 63 insertions(+), 17 deletions(-) diff --git a/Sources/TensorFlow/Operators/NN.swift b/Sources/TensorFlow/Operators/NN.swift index 2e30f521f..1bde663ae 100644 --- a/Sources/TensorFlow/Operators/NN.swift +++ b/Sources/TensorFlow/Operators/NN.swift @@ -269,9 +269,9 @@ func _vjpConv3DBackpropInput( padding: padding) return (value, { v in return ( - conv3DBackpropFilter(x, input: v, filterSizes: shape, strides: strides, - padding: padding), - conv3D(v, filter: filter, strides: strides, padding: padding) + conv3D(v, filter: filter, strides: strides, padding: padding), + conv3DBackpropFilter(x, input: v, filterSizes: filter.shapeTensor, strides: strides, + padding: padding) ) }) } @@ -307,9 +307,9 @@ func _vjpConv3DBackpropFilter( strides: strides, padding: padding) return (value, { v in return ( - conv3DBackpropInput(x, shape: filterSizes, filter: v, strides: strides, - padding: padding), - conv3D(input, filter: v, strides: strides, padding: padding) + conv3D(input, filter: v, strides: strides, padding: padding), + conv3DBackpropInput(x, shape: x.shapeTensor, filter: v, strides: strides, + padding: padding) ) }) } diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index 514a5cf33..665f41622 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -113,28 +113,28 @@ final class LayerTests: XCTestCase { func testConv2DGradient() { let filter = Tensor(shape: [3, 3, 2, 4], scalars: (0..<72).map(Float.init)) let bias = Tensor(zeros: [4]) - let layer = Conv2D(filter: filter, - bias: bias, + let layer = Conv2D(filter: filter, + bias: bias, activation: identity, - strides: (2, 2), + strides: (2, 2), padding: .valid) let input = Tensor(shape: [2, 4, 4, 2], scalars: (0..<64).map(Float.init)) let grads = gradient( at: input, layer) { $1($0).sum() } - // The expected gradients were computed using the following Python code: + // The expected gradients were computed using the following Python code: // ``` // x = tf.reshape(tf.range(64, dtype=tf.float32), [2, 4, 4, 2]) // filter = tf.reshape(tf.range(72, dtype=tf.float32), [3, 3, 2, 4]) // bias = tf.zeros([4]) // with tf.GradientTape() as t: // t.watch([x, filter, bias]) - // y = tf.math.reduce_sum(tf.nn.conv2d(input=x, + // y = tf.math.reduce_sum(tf.nn.conv2d(input=x, // filters=filter, // strides=[1, 2, 2, 1], // data_format="NHWC", // padding="VALID") + bias) // grads = t.gradient(y, [x, filter, bias]) // ``` - XCTAssertEqual(grads.0, + XCTAssertEqual(grads.0, [[[[ 6, 22], [ 38, 54], [ 70, 86], [ 0, 0]], [[102, 118], [134, 150], [166, 182], [ 0, 0]], [[198, 214], [230, 246], [262, 278], [ 0, 0]], @@ -143,7 +143,7 @@ final class LayerTests: XCTestCase { [[102, 118], [134, 150], [166, 182], [ 0, 0]], [[198, 214], [230, 246], [262, 278], [ 0, 0]], [[ 0, 0], [ 0, 0], [ 0, 0], [ 0, 0]]]]) - XCTAssertEqual(grads.1.filter, + XCTAssertEqual(grads.1.filter, [[[[32, 32, 32, 32], [34, 34, 34, 34]], [[36, 36, 36, 36], [38, 38, 38, 38]], [[40, 40, 40, 40], [42, 42, 42, 42]]], @@ -197,6 +197,52 @@ final class LayerTests: XCTestCase { XCTAssertEqual(output, expected) } + func testConv3DGradient() { + let filter = Tensor(shape: [1, 4, 4, 1, 1], scalars: (0..<16).map(Float.init)) + let bias = Tensor(ones: [2]) + let layer = Conv3D(filter: filter, + bias: bias, + activation: identity, + strides: (2, 2, 2), + padding: .valid) + let input = Tensor(shape: [1, 4, 4, 4, 1], scalars: (0..<64).map(Float.init)) + let grads = gradient( at: input, layer) { $1($0).sum() } + // The expected gradients were computed using the following Python code: + // ``` + // x = tf.reshape(tf.range(64, dtype=tf.float32), [1, 4, 4, 4, 1]) + // filter = tf.reshape(tf.range(16, dtype=tf.float32), [1, 4, 4, 1, 1]) + // bias = tf.ones([2]) + // with tf.GradientTape() as t: + // t.watch([x, filter, bias]) + // y = tf.math.reduce_sum(tf.nn.conv3d(input=x, + // filters=filter, + // strides=[1, 2, 2, 2, 1], + // padding="VALID") + bias) + // grads = t.gradient(y, [x, filter, bias]) + // ``` + + XCTAssertEqual(grads.0, + [[[[ 6, 22], [ 38, 54], [ 70, 86], [ 0, 0]], + [[102, 118], [134, 150], [166, 182], [ 0, 0]], + [[198, 214], [230, 246], [262, 278], [ 0, 0]], + [[ 0, 0], [ 0, 0], [ 0, 0], [ 0, 0]]], + [[[ 6, 22], [ 38, 54], [ 70, 86], [ 0, 0]], + [[102, 118], [134, 150], [166, 182], [ 0, 0]], + [[198, 214], [230, 246], [262, 278], [ 0, 0]], + [[ 0, 0], [ 0, 0], [ 0, 0], [ 0, 0]]]]) + XCTAssertEqual(grads.1.filter, + [[[[32, 32, 32, 32], [34, 34, 34, 34]], + [[36, 36, 36, 36], [38, 38, 38, 38]], + [[40, 40, 40, 40], [42, 42, 42, 42]]], + [[[48, 48, 48, 48], [50, 50, 50, 50]], + [[52, 52, 52, 52], [54, 54, 54, 54]], + [[56, 56, 56, 56], [58, 58, 58, 58]]], + [[[64, 64, 64, 64], [66, 66, 66, 66]], + [[68, 68, 68, 68], [70, 70, 70, 70]], + [[72, 72, 72, 72], [74, 74, 74, 74]]]]) + XCTAssertEqual(grads.1.bias, [2, 2, 2, 2]) + } + func testDepthConv2D() { let filter = Tensor(shape: [2, 2, 2, 2], scalars: (0..<16).map(Float.init)) let bias = Tensor([1, 2, 3, 4]) @@ -582,7 +628,7 @@ final class LayerTests: XCTestCase { // [ 0.0, 0.0, 0.0, 0.0]]) // XCTAssertEqual(𝛁rnn.cell.bias, [ 0.2496884, 0.66947335, 0.7978788, -0.22378457]) } - + func testLSTM() { withRandomSeedForTensorFlow((0xFeed, 0xBeef)) { let x = Tensor(rangeFrom: 0.0, to: 0.4, stride: 0.1).rankLifted() @@ -714,7 +760,7 @@ final class LayerTests: XCTestCase { // lnLayer = tf.keras.layers.LayerNormalization(axis=1, epsilon=0.001) // with tf.GradientTape() as t: // t.watch(x) - // y = lnLayer(x) + // y = lnLayer(x) // z = tf.math.reduce_sum(tf.math.square(y)) // print(y, t.gradient(z, [x] + lnLayer.trainable_variables)) // ``` @@ -733,8 +779,8 @@ final class LayerTests: XCTestCase { [-0.0019815 , 0.00164783, 0.00130618, 0.00119543, -0.00216818]], accuracy: 1e-5) assertEqual( - grad.1.offset, - [-0.645803 , -5.8017054 , 0.03168535, 5.973418 , 0.44240427], + grad.1.offset, + [-0.645803 , -5.8017054 , 0.03168535, 5.973418 , 0.44240427], accuracy: 1e-5) assertEqual( grad.1.scale, From a37b4ccfe1f44565c6612fd8ed032f86c50647c2 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Wed, 21 Aug 2019 15:13:28 +0530 Subject: [PATCH 02/12] Removing test --- Tests/TensorFlowTests/LayerTests.swift | 46 -------------------------- 1 file changed, 46 deletions(-) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index 665f41622..96225168b 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -197,52 +197,6 @@ final class LayerTests: XCTestCase { XCTAssertEqual(output, expected) } - func testConv3DGradient() { - let filter = Tensor(shape: [1, 4, 4, 1, 1], scalars: (0..<16).map(Float.init)) - let bias = Tensor(ones: [2]) - let layer = Conv3D(filter: filter, - bias: bias, - activation: identity, - strides: (2, 2, 2), - padding: .valid) - let input = Tensor(shape: [1, 4, 4, 4, 1], scalars: (0..<64).map(Float.init)) - let grads = gradient( at: input, layer) { $1($0).sum() } - // The expected gradients were computed using the following Python code: - // ``` - // x = tf.reshape(tf.range(64, dtype=tf.float32), [1, 4, 4, 4, 1]) - // filter = tf.reshape(tf.range(16, dtype=tf.float32), [1, 4, 4, 1, 1]) - // bias = tf.ones([2]) - // with tf.GradientTape() as t: - // t.watch([x, filter, bias]) - // y = tf.math.reduce_sum(tf.nn.conv3d(input=x, - // filters=filter, - // strides=[1, 2, 2, 2, 1], - // padding="VALID") + bias) - // grads = t.gradient(y, [x, filter, bias]) - // ``` - - XCTAssertEqual(grads.0, - [[[[ 6, 22], [ 38, 54], [ 70, 86], [ 0, 0]], - [[102, 118], [134, 150], [166, 182], [ 0, 0]], - [[198, 214], [230, 246], [262, 278], [ 0, 0]], - [[ 0, 0], [ 0, 0], [ 0, 0], [ 0, 0]]], - [[[ 6, 22], [ 38, 54], [ 70, 86], [ 0, 0]], - [[102, 118], [134, 150], [166, 182], [ 0, 0]], - [[198, 214], [230, 246], [262, 278], [ 0, 0]], - [[ 0, 0], [ 0, 0], [ 0, 0], [ 0, 0]]]]) - XCTAssertEqual(grads.1.filter, - [[[[32, 32, 32, 32], [34, 34, 34, 34]], - [[36, 36, 36, 36], [38, 38, 38, 38]], - [[40, 40, 40, 40], [42, 42, 42, 42]]], - [[[48, 48, 48, 48], [50, 50, 50, 50]], - [[52, 52, 52, 52], [54, 54, 54, 54]], - [[56, 56, 56, 56], [58, 58, 58, 58]]], - [[[64, 64, 64, 64], [66, 66, 66, 66]], - [[68, 68, 68, 68], [70, 70, 70, 70]], - [[72, 72, 72, 72], [74, 74, 74, 74]]]]) - XCTAssertEqual(grads.1.bias, [2, 2, 2, 2]) - } - func testDepthConv2D() { let filter = Tensor(shape: [2, 2, 2, 2], scalars: (0..<16).map(Float.init)) let bias = Tensor([1, 2, 3, 4]) From 23fcd8a77edde868229d117afac282a59823b787 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Wed, 21 Aug 2019 17:01:25 +0530 Subject: [PATCH 03/12] Adding test --- Sources/TensorFlow/Operators/NN.swift | 8 +++--- Tests/TensorFlowTests/LayerTests.swift | 36 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/Sources/TensorFlow/Operators/NN.swift b/Sources/TensorFlow/Operators/NN.swift index 1bde663ae..53f0a13e2 100644 --- a/Sources/TensorFlow/Operators/NN.swift +++ b/Sources/TensorFlow/Operators/NN.swift @@ -229,7 +229,7 @@ func _vjpConv3D( let value = conv3D(x, filter: filter, strides: strides, padding: padding) return (value, { v in - return ( + ( conv3DBackpropInput(v, shape: x.shapeTensor, filter: filter, strides: strides, padding: padding), conv3DBackpropFilter(v, input: x, filterSizes: filter.shapeTensor, @@ -268,7 +268,7 @@ func _vjpConv3DBackpropInput( let value = conv3DBackpropInput(x, shape: shape, filter: filter, strides: strides, padding: padding) return (value, { v in - return ( + ( conv3D(v, filter: filter, strides: strides, padding: padding), conv3DBackpropFilter(x, input: v, filterSizes: filter.shapeTensor, strides: strides, padding: padding) @@ -287,7 +287,7 @@ func conv3DBackpropFilter( padding: Padding = .valid ) -> Tensor { return Raw.conv3DBackpropFilterV2( - x, + input, filterSizes: filterSizes, outBackprop: x, strides: [Int32(strides.0), Int32(strides.1), Int32(strides.2), @@ -306,7 +306,7 @@ func _vjpConv3DBackpropFilter( let value = conv3DBackpropFilter(x, input: input, filterSizes: filterSizes, strides: strides, padding: padding) return (value, { v in - return ( + ( conv3D(input, filter: v, strides: strides, padding: padding), conv3DBackpropInput(x, shape: x.shapeTensor, filter: v, strides: strides, padding: padding) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index 96225168b..1f5b68b1c 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -197,6 +197,41 @@ final class LayerTests: XCTestCase { XCTAssertEqual(output, expected) } + func testConv3DGradient() { + let filter = Tensor(shape: [1, 4, 4, 1, 1], scalars: (0..<16).map(Float.init)) + let bias = Tensor(ones: [2]) + let layer = Conv3D(filter: filter, + bias: bias, + activation: identity, + strides: (2, 2, 2), + padding: .same) + let input = Tensor(shape: [1, 4, 4, 4, 1], scalars: (0..<64).map(Float.init)) + let grads = gradient( at: input, layer) { $1($0).sum() } + XCTAssertEqual(grads.0, + [[[[[10.0], [20.0], [24.0], [12.0]], + [[20.0], [40.0], [48.0], [24.0]], + [[36.0], [72.0], [80.0], [40.0]], + [[18.0], [36.0], [40.0], [20.0]]], + [[[ 0.0], [ 0.0], [ 0.0], [ 0.0]], + [[ 0.0], [ 0.0], [ 0.0], [ 0.0]], + [[ 0.0], [ 0.0], [ 0.0], [ 0.0]], + [[ 0.0], [ 0.0], [ 0.0], [ 0.0]]], + [[[10.0], [20.0], [24.0], [12.0]], + [[20.0], [40.0], [48.0], [24.0]], + [[36.0], [72.0], [80.0], [40.0]], + [[18.0], [36.0], [40.0], [20.0]]], + [[[ 0.0], [ 0.0], [ 0.0], [ 0.0]], + [[ 0.0], [ 0.0], [ 0.0], [ 0.0]], + [[ 0.0], [ 0.0], [ 0.0], [ 0.0]], + [[ 0.0], [ 0.0], [ 0.0], [ 0.0]]]]]) + XCTAssertEqual(grads.1.filter, + [[[[[ 84.0]], [[168.0]], [[176.0]], [[ 88.0]]], + [[[168.0]], [[336.0]], [[352.0]], [[176.0]]], + [[[200.0]], [[400.0]], [[416.0]], [[208.0]]], + [[[100.0]], [[200.0]], [[208.0]], [[104.0]]]]]) + XCTAssertEqual(grads.1.bias, [8.0, 8.0]) + } + func testDepthConv2D() { let filter = Tensor(shape: [2, 2, 2, 2], scalars: (0..<16).map(Float.init)) let bias = Tensor([1, 2, 3, 4]) @@ -750,6 +785,7 @@ final class LayerTests: XCTestCase { ("testConv2DGradient", testConv2DGradient), ("testConv2DDilation", testConv2DDilation), ("testConv3D", testConv3D), + ("testConv3DGradient", testConv3DGradient), ("testDepthConv2D", testDepthConv2D), ("testSeparableConv2D", testSeparableConv2D), ("testZeroPadding1D", testZeroPadding1D), From 4d575e98763d840c1e53291a7d04e2e70d87d88a Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Wed, 21 Aug 2019 17:05:22 +0530 Subject: [PATCH 04/12] formatting errors --- Sources/TensorFlow/Operators/NN.swift | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/Sources/TensorFlow/Operators/NN.swift b/Sources/TensorFlow/Operators/NN.swift index 53f0a13e2..a4b270f77 100644 --- a/Sources/TensorFlow/Operators/NN.swift +++ b/Sources/TensorFlow/Operators/NN.swift @@ -229,12 +229,10 @@ func _vjpConv3D( let value = conv3D(x, filter: filter, strides: strides, padding: padding) return (value, { v in - ( - conv3DBackpropInput(v, shape: x.shapeTensor, filter: filter, + (conv3DBackpropInput(v, shape: x.shapeTensor, filter: filter, strides: strides, padding: padding), - conv3DBackpropFilter(v, input: x, filterSizes: filter.shapeTensor, - strides: strides, padding: padding) - ) + conv3DBackpropFilter(v, input: x, filterSizes: filter.shapeTensor, + strides: strides, padding: padding)) }) } @@ -268,11 +266,9 @@ func _vjpConv3DBackpropInput( let value = conv3DBackpropInput(x, shape: shape, filter: filter, strides: strides, padding: padding) return (value, { v in - ( - conv3D(v, filter: filter, strides: strides, padding: padding), - conv3DBackpropFilter(x, input: v, filterSizes: filter.shapeTensor, strides: strides, - padding: padding) - ) + (conv3D(v, filter: filter, strides: strides, padding: padding), + conv3DBackpropFilter(x, input: v, filterSizes: filter.shapeTensor, strides: strides, + padding: padding)) }) } @@ -306,11 +302,9 @@ func _vjpConv3DBackpropFilter( let value = conv3DBackpropFilter(x, input: input, filterSizes: filterSizes, strides: strides, padding: padding) return (value, { v in - ( - conv3D(input, filter: v, strides: strides, padding: padding), - conv3DBackpropInput(x, shape: x.shapeTensor, filter: v, strides: strides, - padding: padding) - ) + (conv3D(input, filter: v, strides: strides, padding: padding), + conv3DBackpropInput(x, shape: x.shapeTensor, filter: v, strides: strides, + padding: padding)) }) } From 242d2aa7aa3ab98c35517e8eb1e1ef0bb5f0acb7 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Wed, 21 Aug 2019 17:06:55 +0530 Subject: [PATCH 05/12] formatting errors --- Sources/TensorFlow/Operators/NN.swift | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Sources/TensorFlow/Operators/NN.swift b/Sources/TensorFlow/Operators/NN.swift index a4b270f77..032dfbeab 100644 --- a/Sources/TensorFlow/Operators/NN.swift +++ b/Sources/TensorFlow/Operators/NN.swift @@ -230,9 +230,9 @@ func _vjpConv3D( padding: padding) return (value, { v in (conv3DBackpropInput(v, shape: x.shapeTensor, filter: filter, - strides: strides, padding: padding), + strides: strides, padding: padding), conv3DBackpropFilter(v, input: x, filterSizes: filter.shapeTensor, - strides: strides, padding: padding)) + strides: strides, padding: padding)) }) } @@ -268,7 +268,7 @@ func _vjpConv3DBackpropInput( return (value, { v in (conv3D(v, filter: filter, strides: strides, padding: padding), conv3DBackpropFilter(x, input: v, filterSizes: filter.shapeTensor, strides: strides, - padding: padding)) + padding: padding)) }) } @@ -304,7 +304,7 @@ func _vjpConv3DBackpropFilter( return (value, { v in (conv3D(input, filter: v, strides: strides, padding: padding), conv3DBackpropInput(x, shape: x.shapeTensor, filter: v, strides: strides, - padding: padding)) + padding: padding)) }) } From ef4e0fb27a9505ae880e40f436993783a0e9dae6 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Wed, 21 Aug 2019 17:09:48 +0530 Subject: [PATCH 06/12] add python tensorflow example --- Tests/TensorFlowTests/LayerTests.swift | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index 1f5b68b1c..f894274f9 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -207,6 +207,19 @@ final class LayerTests: XCTestCase { padding: .same) let input = Tensor(shape: [1, 4, 4, 4, 1], scalars: (0..<64).map(Float.init)) let grads = gradient( at: input, layer) { $1($0).sum() } + // The expected gradients were computed using the following Python code: + // ``` + // x = tf.reshape(tf.range(64, dtype=tf.float32), [1, 4, 4, 4, 1]) + // filter = tf.reshape(tf.range(72, dtype=tf.float32), [1, 4, 4, 1, 1]) + // bias = tf.ones([2]) + // with tf.GradientTape() as t: + // t.watch([x, filter, bias]) + // y = tf.math.reduce_sum(tf.nn.conv3d(input=x, + // filters=filter, + // strides=[1, 2, 2, 2, 1], + // padding="SAME") + bias) + // grads = t.gradient(y, [x, filter, bias]) + // ``` XCTAssertEqual(grads.0, [[[[[10.0], [20.0], [24.0], [12.0]], [[20.0], [40.0], [48.0], [24.0]], From b632ad13bf0f48b384b043fc18a37f6be64bbece Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Fri, 23 Aug 2019 08:37:24 +0530 Subject: [PATCH 07/12] review changes --- Sources/TensorFlow/Operators/NN.swift | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Sources/TensorFlow/Operators/NN.swift b/Sources/TensorFlow/Operators/NN.swift index 032dfbeab..de769016a 100644 --- a/Sources/TensorFlow/Operators/NN.swift +++ b/Sources/TensorFlow/Operators/NN.swift @@ -266,9 +266,9 @@ func _vjpConv3DBackpropInput( let value = conv3DBackpropInput(x, shape: shape, filter: filter, strides: strides, padding: padding) return (value, { v in - (conv3D(v, filter: filter, strides: strides, padding: padding), - conv3DBackpropFilter(x, input: v, filterSizes: filter.shapeTensor, strides: strides, - padding: padding)) + (conv3D(v, filter: filter, strides: strides, padding: padding), + conv3DBackpropFilter(x, input: v, filterSizes: filter.shapeTensor, strides: strides, + padding: padding)) }) } @@ -302,9 +302,9 @@ func _vjpConv3DBackpropFilter( let value = conv3DBackpropFilter(x, input: input, filterSizes: filterSizes, strides: strides, padding: padding) return (value, { v in - (conv3D(input, filter: v, strides: strides, padding: padding), - conv3DBackpropInput(x, shape: x.shapeTensor, filter: v, strides: strides, - padding: padding)) + (conv3D(input, filter: v, strides: strides, padding: padding), + conv3DBackpropInput(x, shape: x.shapeTensor, filter: v, strides: strides, + padding: padding)) }) } From 12426867d863fd00ae0f4b3e33eac70cd8c628fe Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Thu, 29 Aug 2019 21:02:56 +0530 Subject: [PATCH 08/12] Standardization wrt #499 --- Tests/TensorFlowTests/LayerTests.swift | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index f7cb75751..9c2bbce02 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -209,16 +209,18 @@ final class LayerTests: XCTestCase { let grads = gradient( at: input, layer) { $1($0).sum() } // The expected gradients were computed using the following Python code: // ``` - // x = tf.reshape(tf.range(64, dtype=tf.float32), [1, 4, 4, 4, 1]) - // filter = tf.reshape(tf.range(72, dtype=tf.float32), [1, 4, 4, 1, 1]) - // bias = tf.ones([2]) - // with tf.GradientTape() as t: - // t.watch([x, filter, bias]) - // y = tf.math.reduce_sum(tf.nn.conv3d(input=x, - // filters=filter, - // strides=[1, 2, 2, 2, 1], - // padding="SAME") + bias) - // grads = t.gradient(y, [x, filter, bias]) + // import tensorflow as tf + // x = tf.reshape(tf.range(64, dtype=tf.float32), [1, 4, 4, 4, 1]) + // filter = tf.reshape(tf.range(72, dtype=tf.float32), [1, 4, 4, 1, 1]) + // bias = tf.ones([2]) + // with tf.GradientTape() as tape: + // tape.watch([x, filter, bias]) + // y = tf.math.reduce_sum(tf.nn.conv3d(input=x, + // filters=filter, + // strides=[1, 2, 2, 2, 1], + // padding="SAME") + bias) + // grads = tape.gradient(y, [x, filter, bias]) + // print(grads) // ``` XCTAssertEqual(grads.0, [[[[[10.0], [20.0], [24.0], [12.0]], From 2a696a51767c769a44016301c17ec6d15fec57d2 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Thu, 29 Aug 2019 21:04:15 +0530 Subject: [PATCH 09/12] print gradients in example --- Tests/TensorFlowTests/LayerTests.swift | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index 9c2bbce02..2e45b6fd6 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -219,8 +219,7 @@ final class LayerTests: XCTestCase { // filters=filter, // strides=[1, 2, 2, 2, 1], // padding="SAME") + bias) - // grads = tape.gradient(y, [x, filter, bias]) - // print(grads) + // print(tape.gradient(y, [x, filter, bias])) // ``` XCTAssertEqual(grads.0, [[[[[10.0], [20.0], [24.0], [12.0]], From 8602440d000da56868a64bfe871abcb7ecbcdd51 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Thu, 29 Aug 2019 21:05:07 +0530 Subject: [PATCH 10/12] change message --- Tests/TensorFlowTests/LayerTests.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index 2e45b6fd6..2f95e26dc 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -207,7 +207,7 @@ final class LayerTests: XCTestCase { padding: .same) let input = Tensor(shape: [1, 4, 4, 4, 1], scalars: (0..<64).map(Float.init)) let grads = gradient( at: input, layer) { $1($0).sum() } - // The expected gradients were computed using the following Python code: + // The expected value of the gradient was computed using the following Python code: // ``` // import tensorflow as tf // x = tf.reshape(tf.range(64, dtype=tf.float32), [1, 4, 4, 4, 1]) From 4106fc6fd4f9d34af241111ea3f7f003e427afc0 Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Fri, 30 Aug 2019 13:13:21 +0530 Subject: [PATCH 11/12] Fix indentation --- Tests/TensorFlowTests/LayerTests.swift | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index 2f95e26dc..e518ba329 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -201,10 +201,10 @@ final class LayerTests: XCTestCase { let filter = Tensor(shape: [1, 4, 4, 1, 1], scalars: (0..<16).map(Float.init)) let bias = Tensor(ones: [2]) let layer = Conv3D(filter: filter, - bias: bias, - activation: identity, - strides: (2, 2, 2), - padding: .same) + bias: bias, + activation: identity, + strides: (2, 2, 2), + padding: .same) let input = Tensor(shape: [1, 4, 4, 4, 1], scalars: (0..<64).map(Float.init)) let grads = gradient( at: input, layer) { $1($0).sum() } // The expected value of the gradient was computed using the following Python code: From 86c28f8f09b86a6b78cc06e5a55f73691e3bbb3e Mon Sep 17 00:00:00 2001 From: PAWAN SASANKA AMMANAMANCHI Date: Sat, 31 Aug 2019 21:59:43 +0530 Subject: [PATCH 12/12] minor indentation errors --- Tests/TensorFlowTests/LayerTests.swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Tests/TensorFlowTests/LayerTests.swift b/Tests/TensorFlowTests/LayerTests.swift index e518ba329..766c5da93 100644 --- a/Tests/TensorFlowTests/LayerTests.swift +++ b/Tests/TensorFlowTests/LayerTests.swift @@ -198,7 +198,7 @@ final class LayerTests: XCTestCase { } func testConv3DGradient() { - let filter = Tensor(shape: [1, 4, 4, 1, 1], scalars: (0..<16).map(Float.init)) + let filter = Tensor(shape: [1, 4, 4, 1, 1], scalars: (0..<16).map(Float.init)) let bias = Tensor(ones: [2]) let layer = Conv3D(filter: filter, bias: bias, @@ -206,7 +206,7 @@ final class LayerTests: XCTestCase { strides: (2, 2, 2), padding: .same) let input = Tensor(shape: [1, 4, 4, 4, 1], scalars: (0..<64).map(Float.init)) - let grads = gradient( at: input, layer) { $1($0).sum() } + let grads = gradient(at: input, layer) { $1($0).sum() } // The expected value of the gradient was computed using the following Python code: // ``` // import tensorflow as tf