From c0eb14d67c02649069b76e30b58c202eee8302b5 Mon Sep 17 00:00:00 2001 From: Na Li Date: Tue, 31 Mar 2020 10:28:29 -0700 Subject: [PATCH 1/7] [core]Modularize add. --- tfjs-core/src/gradients/Add_grad.ts | 48 ++++++++++++++ tfjs-core/src/kernel_names.ts | 13 ++-- tfjs-core/src/ops/add.ts | 65 +++++++++++++++++++ tfjs-core/src/ops/binary_ops.ts | 56 ---------------- tfjs-core/src/ops/ops.ts | 11 ++-- tfjs-core/src/public/chained_ops/add.ts | 29 +++++++++ .../chained_ops/register_all_chained_ops.ts | 2 +- .../register_all_chained_ops_test.ts | 5 ++ tfjs-core/src/register_all_gradients.ts | 3 +- tfjs-core/src/tensor.ts | 6 -- 10 files changed, 164 insertions(+), 74 deletions(-) create mode 100644 tfjs-core/src/gradients/Add_grad.ts create mode 100644 tfjs-core/src/ops/add.ts create mode 100644 tfjs-core/src/public/chained_ops/add.ts diff --git a/tfjs-core/src/gradients/Add_grad.ts b/tfjs-core/src/gradients/Add_grad.ts new file mode 100644 index 00000000000..5d2bd450578 --- /dev/null +++ b/tfjs-core/src/gradients/Add_grad.ts @@ -0,0 +1,48 @@ +import {Add} from '../kernel_names'; +import {GradConfig} from '../kernel_registry'; +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import * as broadcast_util from '../ops/broadcast_util'; +import {Tensor} from '../tensor'; + +export const addGradConfig: GradConfig = { + kernelName: Add, + gradFunc: (dy: Tensor, saved: Tensor[]) => { + const [a, b] = saved; + const outShape = + broadcast_util.assertAndGetBroadcastShape(a.shape, b.shape); + + const derA = () => { + let res = dy; + const reduceAxes = broadcast_util.getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + res = res.sum(reduceAxes); + } + return res.reshape(a.shape); + }; + const derB = () => { + let res = dy; + const reduceAxes = broadcast_util.getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + res = res.sum(reduceAxes); + } + return res.reshape(b.shape); + }; + + return {a: derA, b: derB}; + } +} diff --git a/tfjs-core/src/kernel_names.ts b/tfjs-core/src/kernel_names.ts index 78d85fa6927..3d9ce3b440c 100644 --- a/tfjs-core/src/kernel_names.ts +++ b/tfjs-core/src/kernel_names.ts @@ -21,6 +21,14 @@ import {NamedTensorInfoMap} from './kernel_registry'; import {PixelData} from './types'; +export const Add = 'Add'; +export type AddInputs = BinaryInputs; + +export type BinaryInputs = Pick; + +export const Div = 'Div'; +export type DivInputs = BinaryInputs; + export const FusedBatchNorm = 'FusedBatchNorm'; export type FusedBatchNormInputs = Pick; @@ -28,11 +36,6 @@ export interface FusedBatchNormAttrs { varianceEpsilon: number; } -export type BinaryInputs = Pick; - -export const Div = 'Div'; -export type DivInputs = BinaryInputs; - export const SquaredDifference = 'SquaredDifference'; export type SquaredDifferenceInputs = BinaryInputs; diff --git a/tfjs-core/src/ops/add.ts b/tfjs-core/src/ops/add.ts new file mode 100644 index 00000000000..d7fb8c41d63 --- /dev/null +++ b/tfjs-core/src/ops/add.ts @@ -0,0 +1,65 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {ENGINE, ForwardFunc} from '../engine'; +import {Add, AddInputs} from '../kernel_names'; +import {Tensor} from '../tensor'; +import {NamedTensorMap} from '../tensor_types'; +import {makeTypesMatch} from '../tensor_util'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; + +import {op} from './operation'; + +/** + * Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting. + * + * We also expose `tf.addStrict` which has the same signature as this op and + * asserts that `a` and `b` are the same shape (does not broadcast). + * + * ```js + * const a = tf.tensor1d([1, 2, 3, 4]); + * const b = tf.tensor1d([10, 20, 30, 40]); + * + * a.add(b).print(); // or tf.add(a, b) + * ``` + * + * ```js + * // Broadcast add a with b. + * const a = tf.scalar(5); + * const b = tf.tensor1d([10, 20, 30, 40]); + * + * a.add(b).print(); // or tf.add(a, b) + * ``` + * @param a The first `tf.Tensor` to add. + * @param b The second `tf.Tensor` to add. Must have the same type as `a`. + */ +/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */ +function add_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { + let $a = convertToTensor(a, 'a', 'add'); + let $b = convertToTensor(b, 'b', 'add'); + [$a, $b] = makeTypesMatch($a, $b); + + const forward: ForwardFunc = backend => backend.add($a, $b); + + const inputs: AddInputs = {a: $a, b: $b}; + + return ENGINE.runKernelFunc( + forward, inputs as {} as NamedTensorMap, null /* gradient */, + Add) as T; +} + +export const add = op({add_}); diff --git a/tfjs-core/src/ops/binary_ops.ts b/tfjs-core/src/ops/binary_ops.ts index ac5e0525ce4..60a95401028 100644 --- a/tfjs-core/src/ops/binary_ops.ts +++ b/tfjs-core/src/ops/binary_ops.ts @@ -27,61 +27,6 @@ import {op} from './operation'; import {scalar, zerosLike} from './tensor_ops'; import {neg} from './unary_ops'; -/** - * Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting. - * - * We also expose `tf.addStrict` which has the same signature as this op and - * asserts that `a` and `b` are the same shape (does not broadcast). - * - * ```js - * const a = tf.tensor1d([1, 2, 3, 4]); - * const b = tf.tensor1d([10, 20, 30, 40]); - * - * a.add(b).print(); // or tf.add(a, b) - * ``` - * - * ```js - * // Broadcast add a with b. - * const a = tf.scalar(5); - * const b = tf.tensor1d([10, 20, 30, 40]); - * - * a.add(b).print(); // or tf.add(a, b) - * ``` - * @param a The first `tf.Tensor` to add. - * @param b The second `tf.Tensor` to add. Must have the same type as `a`. - */ -/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */ -function add_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { - let $a = convertToTensor(a, 'a', 'add'); - let $b = convertToTensor(b, 'b', 'add'); - [$a, $b] = makeTypesMatch($a, $b); - - const outShape = - broadcast_util.assertAndGetBroadcastShape($a.shape, $b.shape); - - const der = (dy: Tensor) => { - const derA = () => { - let res = dy; - const reduceAxes = broadcast_util.getReductionAxes($a.shape, outShape); - if (reduceAxes.length > 0) { - res = res.sum(reduceAxes); - } - return res.reshape($a.shape); - }; - const derB = () => { - let res = dy; - const reduceAxes = broadcast_util.getReductionAxes($b.shape, outShape); - if (reduceAxes.length > 0) { - res = res.sum(reduceAxes); - } - return res.reshape($b.shape); - }; - return {a: derA, b: derB}; - }; - return ENGINE.runKernelFunc( - backend => backend.add($a, $b), {a: $a, b: $b}, der, 'Add') as T; -} - /** * Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype. * @@ -728,7 +673,6 @@ function atan2_( }, {$a, $b}, der) as T; } -export const add = op({add_}); export const addN = op({addN_}); export const addStrict = op({addStrict_}); export const atan2 = op({atan2_}); diff --git a/tfjs-core/src/ops/ops.ts b/tfjs-core/src/ops/ops.ts index 0f45e6e42bf..32dcfac224d 100644 --- a/tfjs-core/src/ops/ops.ts +++ b/tfjs-core/src/ops/ops.ts @@ -1,6 +1,6 @@ /** * @license - * Copyright 2018 Google Inc. All Rights Reserved. + * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -16,10 +16,11 @@ */ // Modularized ops. -export {batchNormalization, batchNorm} from './batchnorm'; -export {batchNormalization2d, batchNorm2d} from './batchnorm2d'; -export {batchNormalization3d, batchNorm3d} from './batchnorm3d'; -export {batchNormalization4d, batchNorm4d} from './batchnorm4d'; +export {add} from './add'; +export {batchNorm, batchNormalization} from './batchnorm'; +export {batchNorm2d, batchNormalization2d} from './batchnorm2d'; +export {batchNorm3d, batchNormalization3d} from './batchnorm3d'; +export {batchNorm4d, batchNormalization4d} from './batchnorm4d'; export {broadcastTo} from './broadcast_to'; export {clone} from './clone'; export {div} from './div'; diff --git a/tfjs-core/src/public/chained_ops/add.ts b/tfjs-core/src/public/chained_ops/add.ts new file mode 100644 index 00000000000..83ca43ed49b --- /dev/null +++ b/tfjs-core/src/public/chained_ops/add.ts @@ -0,0 +1,29 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {add} from '../../ops/add'; +import {Tensor} from '../../tensor'; +import {Rank, TensorLike} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + add(b: Tensor|TensorLike): T; + } +} + +Tensor.prototype.add = function(b: Tensor|TensorLike): T { + return add(this, b); +}; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts index ca8aa8194db..0f969b622fa 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts @@ -14,7 +14,7 @@ * limitations under the License. * ============================================================================= */ - +import './add'; import './broadcast_to'; import './div'; import './div_no_nan'; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts index 8f1630653d2..b82818be73c 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts @@ -24,8 +24,13 @@ import {ALL_ENVS, describeWithFlags} from '../../jasmine_util'; // flexibility to change in future. const CHAINED_OPS = [ +<<<<<<< HEAD 'square', 'broadcastTo', 'tile', 'oneHot', 'div', 'divNoNan', 'transpose', 'pad', 'batchNorm' +======= + 'add', 'broadcastTo', 'div', 'divNoNan', 'oneHot', 'pad', 'square', 'tile', + 'transpose' +>>>>>>> [core]Modularize add. ]; describeWithFlags('chained ops', ALL_ENVS, () => { diff --git a/tfjs-core/src/register_all_gradients.ts b/tfjs-core/src/register_all_gradients.ts index 35974b826ac..62d0c1b738a 100644 --- a/tfjs-core/src/register_all_gradients.ts +++ b/tfjs-core/src/register_all_gradients.ts @@ -14,6 +14,7 @@ * limitations under the License. * ============================================================================= */ +import {addGradConfig} from './gradients/Add_grad'; import {broadcastToGradConfig} from './gradients/BroadcastTo_grad'; import {divGradConfig} from './gradients/Div_grad'; import {fusedBatchNormGradConfig} from './gradients/FusedBatchNorm_grad'; @@ -29,7 +30,7 @@ import {registerGradient} from './kernel_registry'; // Export all kernel configs here so that the package can auto register them const gradConfigs: GradConfig[] = [ - broadcastToGradConfig, divGradConfig, fusedBatchNormGradConfig, + addGradConfig, broadcastToGradConfig, divGradConfig, fusedBatchNormGradConfig, identityGradConfig, oneHotGradConfig, padV2GradConfig, squareGradConfig, squaredDifferenceGradConfig, tileGradConfig, transposeGradConfig ]; diff --git a/tfjs-core/src/tensor.ts b/tfjs-core/src/tensor.ts index 83345f02555..d8cc002fca6 100644 --- a/tfjs-core/src/tensor.ts +++ b/tfjs-core/src/tensor.ts @@ -208,7 +208,6 @@ export interface OpHandler { max(x: Tensor, axis: number|number[], keepDims: boolean): T; argMin(x: Tensor, axis: number): T; argMax(x: Tensor, axis: number): T; - add(a: Tensor, b: Tensor|TensorLike): T; addStrict(a: T, b: T|TensorLike): T; atan2(a: Tensor, b: Tensor|TensorLike): T; sub(a: Tensor, b: Tensor|TensorLike): T; @@ -877,11 +876,6 @@ export class Tensor { } // Binary ops. - - add(x: Tensor|TensorLike): T { - this.throwIfDisposed(); - return opHandler.add(this, x); - } addStrict(this: T, x: T|TensorLike): T { this.throwIfDisposed(); return opHandler.addStrict(this, x); From 7c51c89e240ba1a5528cdf3dd9bf91379ca9ff34 Mon Sep 17 00:00:00 2001 From: Na Li Date: Tue, 31 Mar 2020 10:56:04 -0700 Subject: [PATCH 2/7] . --- tfjs-core/src/ops/binary_ops.ts | 3 +++ tfjs-core/src/ops/fused_ops.ts | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tfjs-core/src/ops/binary_ops.ts b/tfjs-core/src/ops/binary_ops.ts index 60a95401028..a9455c24b8d 100644 --- a/tfjs-core/src/ops/binary_ops.ts +++ b/tfjs-core/src/ops/binary_ops.ts @@ -22,11 +22,14 @@ import {makeTypesMatch} from '../tensor_util'; import {convertToTensor} from '../tensor_util_env'; import {TensorLike} from '../types'; import * as util from '../util'; + +import {add} from './add'; import * as broadcast_util from './broadcast_util'; import {op} from './operation'; import {scalar, zerosLike} from './tensor_ops'; import {neg} from './unary_ops'; + /** * Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype. * diff --git a/tfjs-core/src/ops/fused_ops.ts b/tfjs-core/src/ops/fused_ops.ts index 0977cbae2d4..9372292130f 100644 --- a/tfjs-core/src/ops/fused_ops.ts +++ b/tfjs-core/src/ops/fused_ops.ts @@ -25,12 +25,11 @@ import {convertToTensor} from '../tensor_util_env'; import {TensorLike} from '../types'; import * as util from '../util'; -import {add} from './binary_ops'; +import {add} from './add'; import * as broadcast_util from './broadcast_util'; import {conv2d as unfusedConv2d, depthwiseConv2d as unfusedDepthwiseConv2d} from './conv'; import {Activation, shouldFuse} from './fused_util'; import {matMul as unfusedMatMul} from './matmul'; - import {elu, prelu, relu, relu6} from './relu_ops'; // Returns gradient for fused activation. From ffcf45c3ea90cd03b17e2117020efd25f1c26419 Mon Sep 17 00:00:00 2001 From: Na Li Date: Tue, 31 Mar 2020 13:21:28 -0700 Subject: [PATCH 3/7] . --- tfjs-core/src/gradients/Add_grad.ts | 3 +- tfjs-core/src/ops/add.ts | 6 +- tfjs-core/src/ops/add_test.ts | 325 +++++++++++++++++++++++++++ tfjs-core/src/ops/arithmetic_test.ts | 306 ------------------------- tfjs-core/src/ops/binary_ops.ts | 1 - 5 files changed, 332 insertions(+), 309 deletions(-) create mode 100644 tfjs-core/src/ops/add_test.ts diff --git a/tfjs-core/src/gradients/Add_grad.ts b/tfjs-core/src/gradients/Add_grad.ts index 5d2bd450578..f1589528dc6 100644 --- a/tfjs-core/src/gradients/Add_grad.ts +++ b/tfjs-core/src/gradients/Add_grad.ts @@ -21,6 +21,7 @@ import {Tensor} from '../tensor'; export const addGradConfig: GradConfig = { kernelName: Add, + inputsToSave: ['a', 'b'], gradFunc: (dy: Tensor, saved: Tensor[]) => { const [a, b] = saved; const outShape = @@ -45,4 +46,4 @@ export const addGradConfig: GradConfig = { return {a: derA, b: derB}; } -} +}; diff --git a/tfjs-core/src/ops/add.ts b/tfjs-core/src/ops/add.ts index d7fb8c41d63..b96b7ca1abb 100644 --- a/tfjs-core/src/ops/add.ts +++ b/tfjs-core/src/ops/add.ts @@ -53,7 +53,11 @@ function add_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { let $b = convertToTensor(b, 'b', 'add'); [$a, $b] = makeTypesMatch($a, $b); - const forward: ForwardFunc = backend => backend.add($a, $b); + const forward: ForwardFunc = (backend, save) => { + const res = backend.add($a, $b); + save([$a, $b]); + return res; + }; const inputs: AddInputs = {a: $a, b: $b}; diff --git a/tfjs-core/src/ops/add_test.ts b/tfjs-core/src/ops/add_test.ts new file mode 100644 index 00000000000..66101e5784c --- /dev/null +++ b/tfjs-core/src/ops/add_test.ts @@ -0,0 +1,325 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +describeWithFlags('add', ALL_ENVS, () => { + it('c + A', async () => { + const c = tf.scalar(5); + const a = tf.tensor1d([1, 2, 3]); + + const result = tf.add(c, a); + + expectArraysClose(await result.data(), [6, 7, 8]); + }); + + it('c + A propagates NaNs', async () => { + const c = tf.scalar(NaN); + const a = tf.tensor1d([1, 2, 3]); + + const res = tf.add(c, a); + + expectArraysEqual(await res.data(), [NaN, NaN, NaN]); + }); + + it('A + B broadcasting same rank Tensors different shape', async () => { + const a = tf.tensor2d([1, 2, -3, -4], [2, 2]); + const b = tf.tensor2d([2, 3], [2, 1]); + + const result = tf.add(a, b); + + expect(result.shape).toEqual([2, 2]); + const expected = [3, 4, 0, -1]; + + expectArraysClose(await result.data(), expected); + }); + + it('A + B broadcast 2D + 1D', async () => { + const a = tf.tensor2d([1, 2, -3, -4], [2, 2]); + const b = tf.tensor1d([1, 2]); + + const result = tf.add(a, b); + + expect(result.shape).toEqual([2, 2]); + const expected = [2, 4, -2, -2]; + + expectArraysClose(await result.data(), expected); + }); + + it('A + B', async () => { + const a = tf.tensor1d([2, 5, 1]); + const b = tf.tensor1d([4, 2, -1]); + + const result = tf.add(a, b); + + const expected = [6, 7, 0]; + expectArraysClose(await result.data(), expected); + }); + + it('TensorLike', async () => { + const a = [2, 5, 1]; + const b = [4, 2, -1]; + + const result = tf.add(a, b); + + const expected = [6, 7, 0]; + expectArraysClose(await result.data(), expected); + }); + + it('TensorLike chained', async () => { + const a = tf.tensor1d([2, 5, 1]); + const b = [4, 2, -1]; + + const result = a.add(b); + + const expected = [6, 7, 0]; + expectArraysClose(await result.data(), expected); + }); + + it('A + B propagates NaNs', async () => { + const a = tf.tensor1d([2, 5, NaN]); + const b = tf.tensor1d([4, 2, -1]); + + const res = tf.add(a, b); + expectArraysClose(await res.data(), [6, 7, NaN]); + }); + + it('A + B throws when passed tensors with different shape', () => { + const a = tf.tensor1d([2, 5, 1, 5]); + const b = tf.tensor1d([4, 2, -1]); + + expect(() => tf.add(a, b)).toThrowError(); + expect(() => tf.add(b, a)).toThrowError(); + }); + + it('2D+scalar broadcast', async () => { + const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]); + const b = tf.scalar(2); + const res = tf.add(a, b); + expect(res.shape).toEqual([2, 3]); + expectArraysClose(await res.data(), [3, 4, 5, 6, 7, 8]); + }); + + it('scalar+1D broadcast', async () => { + const a = tf.scalar(2); + const b = tf.tensor1d([1, 2, 3, 4, 5, 6]); + const res = tf.add(a, b); + expect(res.shape).toEqual([6]); + expectArraysClose(await res.data(), [3, 4, 5, 6, 7, 8]); + }); + + it('2D+2D broadcast each with 1 dim', async () => { + const a = tf.tensor2d([1, 2, 5], [1, 3]); + const b = tf.tensor2d([7, 3], [2, 1]); + const res = tf.add(a, b); + expect(res.shape).toEqual([2, 3]); + expectArraysClose(await res.data(), [8, 9, 12, 4, 5, 8]); + }); + + it('2D+2D broadcast inner dim of b', async () => { + const a = tf.tensor2d([1, 2, 5, 4, 5, 6], [2, 3]); + const b = tf.tensor2d([7, 3], [2, 1]); + const res = tf.add(a, b); + expect(res.shape).toEqual([2, 3]); + expectArraysClose(await res.data(), [8, 9, 12, 7, 8, 9]); + }); + + it('3D+scalar', async () => { + const a = tf.tensor3d([1, 2, 3, 4, 5, 6], [2, 3, 1]); + const b = tf.scalar(-1); + const res = tf.add(a, b); + expect(res.shape).toEqual([2, 3, 1]); + expectArraysClose(await res.data(), [0, 1, 2, 3, 4, 5]); + }); + + it('6D+scalar', async () => { + const a = tf.range(0, 64).reshape([2, 2, 2, 2, 2, 2]); + const b = tf.scalar(-1); + const res = tf.add(a, b); + expect(res.shape).toEqual([2, 2, 2, 2, 2, 2]); + const expectedResult = [ + -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 + ]; + expectArraysClose(await res.data(), expectedResult); + }); + + it('6D+2D', async () => { + const a = tf.range(0, 64).reshape([2, 2, 2, 2, 2, 2]); + const b = tf.tensor2d([11, 13, 17, 19], [2, 2]); + const res = tf.add(a, b); + expect(res.shape).toEqual([2, 2, 2, 2, 2, 2]); + const expectedResult = [ + 11, 14, 19, 22, 15, 18, 23, 26, 19, 22, 27, 30, 23, 26, 31, 34, + 27, 30, 35, 38, 31, 34, 39, 42, 35, 38, 43, 46, 39, 42, 47, 50, + 43, 46, 51, 54, 47, 50, 55, 58, 51, 54, 59, 62, 55, 58, 63, 66, + 59, 62, 67, 70, 63, 66, 71, 74, 67, 70, 75, 78, 71, 74, 79, 82 + ]; + expectArraysClose(await res.data(), expectedResult); + }); + + it('add tensors with 0 in shape', async () => { + const a = tf.tensor1d([1]); + const b = tf.tensor3d([], [0, 0, 5]); + const res = tf.add(a, b); + expect(res.shape).toEqual([0, 0, 5]); + expectArraysEqual(await res.data(), []); + }); + + it('gradient: scalar + 1D broadcast', async () => { + const a = tf.scalar(2); + const b = tf.tensor1d([3, 4, 5]); + const dy = tf.tensor1d([7, 8, 9]); + + const grads = tf.grads((a, b) => tf.add(a, b)); + const [da, db] = grads([a, b], dy); + + expect(da.shape).toEqual(a.shape); + expect(da.dtype).toEqual('float32'); + expectArraysClose(await da.data(), [7 + 8 + 9]); + + expect(db.shape).toEqual(b.shape); + expect(db.dtype).toEqual('float32'); + expectArraysClose(await db.data(), [7, 8, 9]); + }); + + it('gradient with clones', async () => { + const a = tf.scalar(2); + const b = tf.tensor1d([3, 4, 5]); + const dy = tf.tensor1d([7, 8, 9]); + + const grads = tf.grads((a, b) => tf.add(a.clone(), b.clone()).clone()); + const [da, db] = grads([a, b], dy); + + expect(da.shape).toEqual(a.shape); + expect(da.dtype).toEqual('float32'); + expectArraysClose(await da.data(), [7 + 8 + 9]); + + expect(db.shape).toEqual(b.shape); + expect(db.dtype).toEqual('float32'); + expectArraysClose(await db.data(), [7, 8, 9]); + }); + + it('gradient: 2D + 2D broadcast', async () => { + const a = tf.tensor2d([2, 3], [2, 1]); + const b = tf.tensor2d([4, 5, 6, 7], [2, 2]); + const dy = tf.tensor2d([5, 4, 3, 2], [2, 2]); + + const grads = tf.grads((a, b) => tf.add(a, b)); + const [da, db] = grads([a, b], dy); + + expect(da.shape).toEqual(a.shape); + expect(da.dtype).toEqual('float32'); + expectArraysClose(await da.data(), [5 + 4, 3 + 2]); + + expect(db.shape).toEqual(b.shape); + expect(db.dtype).toEqual('float32'); + expectArraysClose(await db.data(), [5, 4, 3, 2]); + }); + + it('complex number addition', async () => { + const real1 = tf.tensor1d([1]); + const imag1 = tf.tensor1d([2]); + const complex1 = tf.complex(real1, imag1); + + const real2 = tf.tensor1d([3]); + const imag2 = tf.tensor1d([4]); + const complex2 = tf.complex(real2, imag2); + + const result = complex1.add(complex2); + + expect(result.dtype).toBe('complex64'); + expect(result.shape).toEqual([1]); + expectArraysClose(await result.data(), [4, 6]); + }); + + it('complex number reshape and then addition', async () => { + const real1 = tf.tensor1d([1]); + const imag1 = tf.tensor1d([2]); + const complex1 = tf.complex(real1, imag1); + + const real2 = tf.tensor1d([3]); + const imag2 = tf.tensor1d([4]); + const complex2 = tf.complex(real2, imag2); + + const complex1Reshaped = complex1.reshape([1, 1, 1]); + const complex2Reshaped = complex2.reshape([1, 1, 1]); + + const result = complex1Reshaped.add(complex2Reshaped); + + expect(result.dtype).toBe('complex64'); + expect(result.shape).toEqual([1, 1, 1]); + expectArraysClose(await result.data(), [4, 6]); + }); + + it('complex number broadcasting addition', async () => { + const real1 = tf.tensor2d([1, 2, -3, -4], [2, 2]); + const imag1 = tf.tensor2d([10, 20, -30, -40], [2, 2]); + const complex1 = tf.complex(real1, imag1); + + const real2 = tf.tensor1d([4]); + const imag2 = tf.tensor1d([5]); + const complex2 = tf.complex(real2, imag2); + + const result = tf.add(complex1, complex2); + + expect(result.dtype).toEqual('complex64'); + expect(result.shape).toEqual([2, 2]); + expectArraysClose( + await result.data(), + [1 + 4, 10 + 5, 2 + 4, 20 + 5, -3 + 4, -30 + 5, -4 + 4, -40 + 5]); + }); + + it('throws when passed a as a non-tensor', () => { + expect(() => tf.add({} as tf.Tensor, tf.scalar(1))) + .toThrowError(/Argument 'a' passed to 'add' must be a Tensor/); + }); + it('throws when passed b as a non-tensor', () => { + expect(() => tf.add(tf.scalar(1), {} as tf.Tensor)) + .toThrowError(/Argument 'b' passed to 'add' must be a Tensor/); + }); + + it('upcasts when dtypes dont match', async () => { + let res = tf.add(tf.scalar(1, 'int32'), tf.scalar(1, 'float32')); + expect(res.dtype).toBe('float32'); + expectArraysClose(await res.data(), [2]); + + res = tf.add(tf.scalar(1, 'int32'), tf.scalar(true, 'bool')); + expect(res.dtype).toBe('int32'); + expectArraysClose(await res.data(), [2]); + + res = tf.add(tf.scalar(1, 'int32'), tf.scalar(false, 'bool')); + expect(res.dtype).toBe('int32'); + expectArraysClose(await res.data(), [1]); + + res = tf.add(tf.complex(4, 7), tf.scalar(1, 'float32')); + expect(res.dtype).toBe('complex64'); + expectArraysClose(await res.data(), [5, 7]); + + res = tf.add(tf.complex(4, 7), tf.scalar(1, 'int32')); + expect(res.dtype).toBe('complex64'); + expectArraysClose(await res.data(), [5, 7]); + }); + + it('accepts a tensor-like object', async () => { + const result = tf.add(5, [1, 2, 3]); + expectArraysClose(await result.data(), [6, 7, 8]); + }); +}); diff --git a/tfjs-core/src/ops/arithmetic_test.ts b/tfjs-core/src/ops/arithmetic_test.ts index 3ae2dcc9b43..aeda0b01dbf 100644 --- a/tfjs-core/src/ops/arithmetic_test.ts +++ b/tfjs-core/src/ops/arithmetic_test.ts @@ -1014,312 +1014,6 @@ describeWithFlags('pow', ALL_ENVS, () => { }); }); -describeWithFlags('add', ALL_ENVS, () => { - it('c + A', async () => { - const c = tf.scalar(5); - const a = tf.tensor1d([1, 2, 3]); - - const result = tf.add(c, a); - - expectArraysClose(await result.data(), [6, 7, 8]); - }); - - it('c + A propagates NaNs', async () => { - const c = tf.scalar(NaN); - const a = tf.tensor1d([1, 2, 3]); - - const res = tf.add(c, a); - - expectArraysEqual(await res.data(), [NaN, NaN, NaN]); - }); - - it('A + B broadcasting same rank Tensors different shape', async () => { - const a = tf.tensor2d([1, 2, -3, -4], [2, 2]); - const b = tf.tensor2d([2, 3], [2, 1]); - - const result = tf.add(a, b); - - expect(result.shape).toEqual([2, 2]); - const expected = [3, 4, 0, -1]; - - expectArraysClose(await result.data(), expected); - }); - - it('A + B broadcast 2D + 1D', async () => { - const a = tf.tensor2d([1, 2, -3, -4], [2, 2]); - const b = tf.tensor1d([1, 2]); - - const result = tf.add(a, b); - - expect(result.shape).toEqual([2, 2]); - const expected = [2, 4, -2, -2]; - - expectArraysClose(await result.data(), expected); - }); - - it('A + B', async () => { - const a = tf.tensor1d([2, 5, 1]); - const b = tf.tensor1d([4, 2, -1]); - - const result = tf.add(a, b); - - const expected = [6, 7, 0]; - expectArraysClose(await result.data(), expected); - }); - - it('TensorLike', async () => { - const a = [2, 5, 1]; - const b = [4, 2, -1]; - - const result = tf.add(a, b); - - const expected = [6, 7, 0]; - expectArraysClose(await result.data(), expected); - }); - - it('TensorLike chained', async () => { - const a = tf.tensor1d([2, 5, 1]); - const b = [4, 2, -1]; - - const result = a.add(b); - - const expected = [6, 7, 0]; - expectArraysClose(await result.data(), expected); - }); - - it('A + B propagates NaNs', async () => { - const a = tf.tensor1d([2, 5, NaN]); - const b = tf.tensor1d([4, 2, -1]); - - const res = tf.add(a, b); - expectArraysClose(await res.data(), [6, 7, NaN]); - }); - - it('A + B throws when passed tensors with different shape', () => { - const a = tf.tensor1d([2, 5, 1, 5]); - const b = tf.tensor1d([4, 2, -1]); - - expect(() => tf.add(a, b)).toThrowError(); - expect(() => tf.add(b, a)).toThrowError(); - }); - - it('2D+scalar broadcast', async () => { - const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]); - const b = tf.scalar(2); - const res = tf.add(a, b); - expect(res.shape).toEqual([2, 3]); - expectArraysClose(await res.data(), [3, 4, 5, 6, 7, 8]); - }); - - it('scalar+1D broadcast', async () => { - const a = tf.scalar(2); - const b = tf.tensor1d([1, 2, 3, 4, 5, 6]); - const res = tf.add(a, b); - expect(res.shape).toEqual([6]); - expectArraysClose(await res.data(), [3, 4, 5, 6, 7, 8]); - }); - - it('2D+2D broadcast each with 1 dim', async () => { - const a = tf.tensor2d([1, 2, 5], [1, 3]); - const b = tf.tensor2d([7, 3], [2, 1]); - const res = tf.add(a, b); - expect(res.shape).toEqual([2, 3]); - expectArraysClose(await res.data(), [8, 9, 12, 4, 5, 8]); - }); - - it('2D+2D broadcast inner dim of b', async () => { - const a = tf.tensor2d([1, 2, 5, 4, 5, 6], [2, 3]); - const b = tf.tensor2d([7, 3], [2, 1]); - const res = tf.add(a, b); - expect(res.shape).toEqual([2, 3]); - expectArraysClose(await res.data(), [8, 9, 12, 7, 8, 9]); - }); - - it('3D+scalar', async () => { - const a = tf.tensor3d([1, 2, 3, 4, 5, 6], [2, 3, 1]); - const b = tf.scalar(-1); - const res = tf.add(a, b); - expect(res.shape).toEqual([2, 3, 1]); - expectArraysClose(await res.data(), [0, 1, 2, 3, 4, 5]); - }); - - it('6D+scalar', async () => { - const a = tf.range(0, 64).reshape([2, 2, 2, 2, 2, 2]); - const b = tf.scalar(-1); - const res = tf.add(a, b); - expect(res.shape).toEqual([2, 2, 2, 2, 2, 2]); - const expectedResult = [ - -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 - ]; - expectArraysClose(await res.data(), expectedResult); - }); - - it('6D+2D', async () => { - const a = tf.range(0, 64).reshape([2, 2, 2, 2, 2, 2]); - const b = tf.tensor2d([11, 13, 17, 19], [2, 2]); - const res = tf.add(a, b); - expect(res.shape).toEqual([2, 2, 2, 2, 2, 2]); - const expectedResult = [ - 11, 14, 19, 22, 15, 18, 23, 26, 19, 22, 27, 30, 23, 26, 31, 34, - 27, 30, 35, 38, 31, 34, 39, 42, 35, 38, 43, 46, 39, 42, 47, 50, - 43, 46, 51, 54, 47, 50, 55, 58, 51, 54, 59, 62, 55, 58, 63, 66, - 59, 62, 67, 70, 63, 66, 71, 74, 67, 70, 75, 78, 71, 74, 79, 82 - ]; - expectArraysClose(await res.data(), expectedResult); - }); - - it('add tensors with 0 in shape', async () => { - const a = tf.tensor1d([1]); - const b = tf.tensor3d([], [0, 0, 5]); - const res = tf.add(a, b); - expect(res.shape).toEqual([0, 0, 5]); - expectArraysEqual(await res.data(), []); - }); - - it('gradient: scalar + 1D broadcast', async () => { - const a = tf.scalar(2); - const b = tf.tensor1d([3, 4, 5]); - const dy = tf.tensor1d([7, 8, 9]); - - const grads = tf.grads((a, b) => tf.add(a, b)); - const [da, db] = grads([a, b], dy); - - expect(da.shape).toEqual(a.shape); - expect(da.dtype).toEqual('float32'); - expectArraysClose(await da.data(), [7 + 8 + 9]); - - expect(db.shape).toEqual(b.shape); - expect(db.dtype).toEqual('float32'); - expectArraysClose(await db.data(), [7, 8, 9]); - }); - - it('gradient with clones', async () => { - const a = tf.scalar(2); - const b = tf.tensor1d([3, 4, 5]); - const dy = tf.tensor1d([7, 8, 9]); - - const grads = tf.grads((a, b) => tf.add(a.clone(), b.clone()).clone()); - const [da, db] = grads([a, b], dy); - - expect(da.shape).toEqual(a.shape); - expect(da.dtype).toEqual('float32'); - expectArraysClose(await da.data(), [7 + 8 + 9]); - - expect(db.shape).toEqual(b.shape); - expect(db.dtype).toEqual('float32'); - expectArraysClose(await db.data(), [7, 8, 9]); - }); - - it('gradient: 2D + 2D broadcast', async () => { - const a = tf.tensor2d([2, 3], [2, 1]); - const b = tf.tensor2d([4, 5, 6, 7], [2, 2]); - const dy = tf.tensor2d([5, 4, 3, 2], [2, 2]); - - const grads = tf.grads((a, b) => tf.add(a, b)); - const [da, db] = grads([a, b], dy); - - expect(da.shape).toEqual(a.shape); - expect(da.dtype).toEqual('float32'); - expectArraysClose(await da.data(), [5 + 4, 3 + 2]); - - expect(db.shape).toEqual(b.shape); - expect(db.dtype).toEqual('float32'); - expectArraysClose(await db.data(), [5, 4, 3, 2]); - }); - - it('complex number addition', async () => { - const real1 = tf.tensor1d([1]); - const imag1 = tf.tensor1d([2]); - const complex1 = tf.complex(real1, imag1); - - const real2 = tf.tensor1d([3]); - const imag2 = tf.tensor1d([4]); - const complex2 = tf.complex(real2, imag2); - - const result = complex1.add(complex2); - - expect(result.dtype).toBe('complex64'); - expect(result.shape).toEqual([1]); - expectArraysClose(await result.data(), [4, 6]); - }); - - it('complex number reshape and then addition', async () => { - const real1 = tf.tensor1d([1]); - const imag1 = tf.tensor1d([2]); - const complex1 = tf.complex(real1, imag1); - - const real2 = tf.tensor1d([3]); - const imag2 = tf.tensor1d([4]); - const complex2 = tf.complex(real2, imag2); - - const complex1Reshaped = complex1.reshape([1, 1, 1]); - const complex2Reshaped = complex2.reshape([1, 1, 1]); - - const result = complex1Reshaped.add(complex2Reshaped); - - expect(result.dtype).toBe('complex64'); - expect(result.shape).toEqual([1, 1, 1]); - expectArraysClose(await result.data(), [4, 6]); - }); - - it('complex number broadcasting addition', async () => { - const real1 = tf.tensor2d([1, 2, -3, -4], [2, 2]); - const imag1 = tf.tensor2d([10, 20, -30, -40], [2, 2]); - const complex1 = tf.complex(real1, imag1); - - const real2 = tf.tensor1d([4]); - const imag2 = tf.tensor1d([5]); - const complex2 = tf.complex(real2, imag2); - - const result = tf.add(complex1, complex2); - - expect(result.dtype).toEqual('complex64'); - expect(result.shape).toEqual([2, 2]); - expectArraysClose( - await result.data(), - [1 + 4, 10 + 5, 2 + 4, 20 + 5, -3 + 4, -30 + 5, -4 + 4, -40 + 5]); - }); - - it('throws when passed a as a non-tensor', () => { - expect(() => tf.add({} as tf.Tensor, tf.scalar(1))) - .toThrowError(/Argument 'a' passed to 'add' must be a Tensor/); - }); - it('throws when passed b as a non-tensor', () => { - expect(() => tf.add(tf.scalar(1), {} as tf.Tensor)) - .toThrowError(/Argument 'b' passed to 'add' must be a Tensor/); - }); - - it('upcasts when dtypes dont match', async () => { - let res = tf.add(tf.scalar(1, 'int32'), tf.scalar(1, 'float32')); - expect(res.dtype).toBe('float32'); - expectArraysClose(await res.data(), [2]); - - res = tf.add(tf.scalar(1, 'int32'), tf.scalar(true, 'bool')); - expect(res.dtype).toBe('int32'); - expectArraysClose(await res.data(), [2]); - - res = tf.add(tf.scalar(1, 'int32'), tf.scalar(false, 'bool')); - expect(res.dtype).toBe('int32'); - expectArraysClose(await res.data(), [1]); - - res = tf.add(tf.complex(4, 7), tf.scalar(1, 'float32')); - expect(res.dtype).toBe('complex64'); - expectArraysClose(await res.data(), [5, 7]); - - res = tf.add(tf.complex(4, 7), tf.scalar(1, 'int32')); - expect(res.dtype).toBe('complex64'); - expectArraysClose(await res.data(), [5, 7]); - }); - - it('accepts a tensor-like object', async () => { - const result = tf.add(5, [1, 2, 3]); - expectArraysClose(await result.data(), [6, 7, 8]); - }); -}); - describeWithFlags('addN', ALL_ENVS, () => { it('a single tensor', async () => { const res = tf.addN([tf.tensor1d([1, 2, 3])]); diff --git a/tfjs-core/src/ops/binary_ops.ts b/tfjs-core/src/ops/binary_ops.ts index a9455c24b8d..c44c82af29c 100644 --- a/tfjs-core/src/ops/binary_ops.ts +++ b/tfjs-core/src/ops/binary_ops.ts @@ -29,7 +29,6 @@ import {op} from './operation'; import {scalar, zerosLike} from './tensor_ops'; import {neg} from './unary_ops'; - /** * Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype. * From 09f0862658929fc65c0a3e0d27981f03e1b2d06a Mon Sep 17 00:00:00 2001 From: Na Li Date: Tue, 31 Mar 2020 14:14:39 -0700 Subject: [PATCH 4/7] change build-deps and install order. --- tfjs-core/src/tests.ts | 1 + tfjs-data/package.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tfjs-core/src/tests.ts b/tfjs-core/src/tests.ts index ae9143836b5..a6515ae1622 100644 --- a/tfjs-core/src/tests.ts +++ b/tfjs-core/src/tests.ts @@ -37,6 +37,7 @@ import './io/router_registry_test'; import './io/weights_loader_test'; import './jasmine_util_test'; import './kernel_registry_test'; +import './ops/add_test'; import './ops/arithmetic_test'; import './ops/array_ops_test'; import './ops/axis_util_test'; diff --git a/tfjs-data/package.json b/tfjs-data/package.json index 9ce9bd17dce..9a317e29ade 100644 --- a/tfjs-data/package.json +++ b/tfjs-data/package.json @@ -53,7 +53,7 @@ "publish-npm": "npm publish", "test": "yarn && yarn build-deps && ts-node src/test_node.ts", "test-browsers": "karma start --browsers='Chrome,Firefox'", - "test-ci": "yarn && yarn build-deps-ci && yarn build-ci && yarn lint && ts-node src/test_node.ts", + "test-ci": "yarn build-deps-ci && yarn && yarn build-ci && yarn lint && ts-node src/test_node.ts", "test-snippets": "ts-node ./scripts/test_snippets.ts", "lint": "tslint -p . -t verbose" }, From fda7fbf79f55a85116f7b48316e44061d6520839 Mon Sep 17 00:00:00 2001 From: Na Li Date: Tue, 31 Mar 2020 14:25:29 -0700 Subject: [PATCH 5/7] . --- tfjs-core/yarn.lock | 27 +++++++++++++++++++++++---- tfjs-data/package.json | 2 +- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/tfjs-core/yarn.lock b/tfjs-core/yarn.lock index 78de9633ccd..e708e93c2b3 100644 --- a/tfjs-core/yarn.lock +++ b/tfjs-core/yarn.lock @@ -18,10 +18,29 @@ esutils "^2.0.2" js-tokens "^4.0.0" -"@bazel/bazelisk@^1.3.0": - version "1.3.0" - resolved "https://registry.yarnpkg.com/@bazel/bazelisk/-/bazelisk-1.3.0.tgz#dc312dd30ad01e9af86e53b40795ab6e545fa55b" - integrity sha512-73H1nq3572tTf+dhDT86aWQN+LCyfxrh05jabqPXp6cpR8soxte3gS5oUqkN36fUe+J2HzNiV4CXZTz4Xytd3Q== +"@bazel/bazel-darwin_x64@0.24.0": + version "0.24.0" + resolved "https://registry.yarnpkg.com/@bazel/bazel-darwin_x64/-/bazel-darwin_x64-0.24.0.tgz#828ef298d8d542961df388f17b0244f4f4302a74" + integrity sha512-xly44vkcD/fauUb7Lm5Lme4qhEZdkuuyBKSVQUHPbYAGDdbj/W8dupI3bZREkJAgG/WrRU+WXUemMj4U8ZcLcw== + +"@bazel/bazel-linux_x64@0.24.0": + version "0.24.0" + resolved "https://registry.yarnpkg.com/@bazel/bazel-linux_x64/-/bazel-linux_x64-0.24.0.tgz#9ef2e7266833ad2221fe4af4ceb6763d2897e3ff" + integrity sha512-p5ylPLWnJZDGbaIFBrtD/tp3Su5rMdzeeNJKU24XyiWQTHVZ3OD3I2Fb0ILCgfBjY8AlA7EtCtOI4hYnAuIOtg== + +"@bazel/bazel-win32_x64@0.24.0": + version "0.24.0" + resolved "https://registry.yarnpkg.com/@bazel/bazel-win32_x64/-/bazel-win32_x64-0.24.0.tgz#02d83113a6c6ed99795a3e41bff5631aa141638d" + integrity sha512-/bcSEx+GoV/q7H4WM0jazfxTcurSiIIePhRv+d05mxRDcaWwhCO8KzmmZRWH1abW6npvq5tLkbSQi7G7nUBhgg== + +"@bazel/bazel@^0.24.0": + version "0.24.0" + resolved "https://registry.yarnpkg.com/@bazel/bazel/-/bazel-0.24.0.tgz#f4e68e3680ac299858c24c26be3d08d1151e78fc" + integrity sha512-/5E55tqH9ogAGF9Dd7RSCJmk7/xdlsPTAhsX3yEsEMs7GLdHlgD3jbeePsKUiHKKr8LXAufjTs2pXQfjrkZRMg== + optionalDependencies: + "@bazel/bazel-darwin_x64" "0.24.0" + "@bazel/bazel-linux_x64" "0.24.0" + "@bazel/bazel-win32_x64" "0.24.0" "@bazel/typescript@^0.27.8": version "0.27.10" diff --git a/tfjs-data/package.json b/tfjs-data/package.json index 9a317e29ade..9ce9bd17dce 100644 --- a/tfjs-data/package.json +++ b/tfjs-data/package.json @@ -53,7 +53,7 @@ "publish-npm": "npm publish", "test": "yarn && yarn build-deps && ts-node src/test_node.ts", "test-browsers": "karma start --browsers='Chrome,Firefox'", - "test-ci": "yarn build-deps-ci && yarn && yarn build-ci && yarn lint && ts-node src/test_node.ts", + "test-ci": "yarn && yarn build-deps-ci && yarn build-ci && yarn lint && ts-node src/test_node.ts", "test-snippets": "ts-node ./scripts/test_snippets.ts", "lint": "tslint -p . -t verbose" }, From 84541dab7772ea1a1b2e2472a7be7fcda9ad6c27 Mon Sep 17 00:00:00 2001 From: Na Li Date: Tue, 31 Mar 2020 16:59:13 -0700 Subject: [PATCH 6/7] Fix lint. --- tfjs-core/src/gradients/Add_grad.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tfjs-core/src/gradients/Add_grad.ts b/tfjs-core/src/gradients/Add_grad.ts index f1589528dc6..a234a940cff 100644 --- a/tfjs-core/src/gradients/Add_grad.ts +++ b/tfjs-core/src/gradients/Add_grad.ts @@ -1,5 +1,3 @@ -import {Add} from '../kernel_names'; -import {GradConfig} from '../kernel_registry'; /** * @license * Copyright 2020 Google Inc. All Rights Reserved. @@ -16,6 +14,8 @@ import {GradConfig} from '../kernel_registry'; * limitations under the License. * ============================================================================= */ +import {Add} from '../kernel_names'; +import {GradConfig} from '../kernel_registry'; import * as broadcast_util from '../ops/broadcast_util'; import {Tensor} from '../tensor'; From 105f658ca9b62f3e4c091f8e17e41c31518bb15c Mon Sep 17 00:00:00 2001 From: Na Li Date: Thu, 2 Apr 2020 10:23:05 -0700 Subject: [PATCH 7/7] . --- .../chained_ops/register_all_chained_ops_test.ts | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts index b82818be73c..d5901090a80 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts @@ -24,13 +24,8 @@ import {ALL_ENVS, describeWithFlags} from '../../jasmine_util'; // flexibility to change in future. const CHAINED_OPS = [ -<<<<<<< HEAD - 'square', 'broadcastTo', 'tile', 'oneHot', 'div', 'divNoNan', 'transpose', - 'pad', 'batchNorm' -======= - 'add', 'broadcastTo', 'div', 'divNoNan', 'oneHot', 'pad', 'square', 'tile', - 'transpose' ->>>>>>> [core]Modularize add. + 'add', 'batchNorm', 'broadcastTo', 'div', 'divNoNan', 'oneHot', 'pad', + 'square', 'tile', 'transpose' ]; describeWithFlags('chained ops', ALL_ENVS, () => { @@ -39,7 +34,7 @@ describeWithFlags('chained ops', ALL_ENVS, () => { for (const opName of CHAINED_OPS) { //@ts-ignore expect(typeof tensor[opName]) - .toBe('function', `${opName} chained op not found`); + .toBe('function', `${opName} chained op not found`); } }); });