From 2b6928941de1bdd32dfd1bf9e8cf412616477543 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 09:31:27 -0400 Subject: [PATCH 01/24] initial --- tfjs-core/src/backends/cpu/backend_cpu.ts | 10 +- tfjs-core/src/backends/cpu/kernels/Div.ts | 23 ++++ .../backends/cpu/kernels/SquaredDifference.ts | 35 ++---- .../src/backends/cpu/register_all_kernels.ts | 5 +- .../src/backends/cpu/utils/kernel_utils.ts | 95 ++++++++++------ .../webgl/kernels/SquaredDifference.ts | 4 +- tfjs-core/src/gradients/Div_grad.ts | 49 ++++++++ tfjs-core/src/kernel_names.ts | 5 +- tfjs-core/src/ops/binary_ops.ts | 68 +----------- tfjs-core/src/ops/div.ts | 105 ++++++++++++++++++ tfjs-core/src/ops/ops.ts | 1 + tfjs-core/src/ops/squared_difference.ts | 5 +- tfjs-core/src/register_all_gradients.ts | 2 + 13 files changed, 263 insertions(+), 144 deletions(-) create mode 100644 tfjs-core/src/backends/cpu/kernels/Div.ts create mode 100644 tfjs-core/src/gradients/Div_grad.ts create mode 100644 tfjs-core/src/ops/div.ts diff --git a/tfjs-core/src/backends/cpu/backend_cpu.ts b/tfjs-core/src/backends/cpu/backend_cpu.ts index f45855309a8..37e9d445806 100644 --- a/tfjs-core/src/backends/cpu/backend_cpu.ts +++ b/tfjs-core/src/backends/cpu/backend_cpu.ts @@ -385,7 +385,7 @@ export class MathBackendCPU extends KernelBackend { const b = this.exp(a); const sumExp = this.sum(b, axes).reshape(expandedShape); - return this.realDivide(b, sumExp) as T; + return b.div(sumExp); } subtract(a: Tensor, b: Tensor): Tensor { @@ -493,14 +493,6 @@ export class MathBackendCPU extends KernelBackend { (aValue, bValue) => aValue * bValue); } - realDivide(a: Tensor, b: Tensor): Tensor { - assertNotComplex([a, b], 'realDivide'); - - const op = (a: number, b: number) => a / b; - const outputDtype = 'float32'; - return this.broadcastedBinaryOp(a, b, outputDtype, op); - } - floorDiv(a: Tensor, b: Tensor): Tensor { assertNotComplex([a, b], 'floorDiv'); diff --git a/tfjs-core/src/backends/cpu/kernels/Div.ts b/tfjs-core/src/backends/cpu/kernels/Div.ts new file mode 100644 index 00000000000..5483f92f857 --- /dev/null +++ b/tfjs-core/src/backends/cpu/kernels/Div.ts @@ -0,0 +1,23 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Div} from '../../../kernel_names'; +import {createBinaryKernelConfig} from '../utils/kernel_utils'; +import {createBinaryOp} from '../utils/kernel_utils'; + +export const div = createBinaryOp((a: number, b: number) => a / b); +export const divConfig = createBinaryKernelConfig(Div, div); diff --git a/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts b/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts index a57bf1a156a..d89a0e71181 100644 --- a/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts +++ b/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts @@ -15,31 +15,14 @@ * ============================================================================= */ -import {SquaredDifference, SquaredDifferenceInputs} from '../../../kernel_names'; -import {KernelConfig} from '../../../kernel_registry'; -import {TypedArray} from '../../../types'; -import {MathBackendCPU} from '../backend_cpu'; -import {assertNotComplex} from '../cpu_util'; -import {broadcastedBinaryOp} from '../utils/kernel_utils'; +import {SquaredDifference} from '../../../kernel_names'; +import {createBinaryOp} from '../utils/kernel_utils'; +import {createBinaryKernelConfig} from '../utils/kernel_utils'; -export const squaredDifferenceConfig: KernelConfig = { - kernelName: SquaredDifference, - backendName: 'cpu', - kernelFunc: ({inputs, backend}) => { - const {a, b} = inputs as SquaredDifferenceInputs; - const cpuBackend = backend as MathBackendCPU; - assertNotComplex([a, b], SquaredDifference); +const squaredDifferenceImpl = createBinaryOp((aVal, bVal) => { + const diff = aVal - bVal; + return diff * diff; +}); - const aVals = cpuBackend.data.get(a.dataId).values as TypedArray; - const bVals = cpuBackend.data.get(b.dataId).values as TypedArray; - - const [resultData, resultShape] = broadcastedBinaryOp( - a.shape, b.shape, aVals, bVals, a.dtype, (aVal, bVal) => { - const diff = aVal - bVal; - return diff * diff; - }); - - const dataId = cpuBackend.write(resultData, resultShape, a.dtype); - return {dataId, shape: resultShape, dtype: a.dtype}; - } -}; +export const squaredDifferenceConfig = + createBinaryKernelConfig(SquaredDifference, squaredDifferenceImpl); diff --git a/tfjs-core/src/backends/cpu/register_all_kernels.ts b/tfjs-core/src/backends/cpu/register_all_kernels.ts index 9516f9af311..4d7f2e983cf 100644 --- a/tfjs-core/src/backends/cpu/register_all_kernels.ts +++ b/tfjs-core/src/backends/cpu/register_all_kernels.ts @@ -19,15 +19,14 @@ // the contents of this file and import only the kernels that are needed. import {KernelConfig, registerKernel} from '../../kernel_registry'; +import {divConfig} from './kernels/Div'; import {nonMaxSuppressionV5Config} from './kernels/NonMaxSuppressionV5'; import {squareConfig} from './kernels/Square'; import {squaredDifferenceConfig} from './kernels/SquaredDifference'; // List all kernel configs here const kernelConfigs: KernelConfig[] = [ - nonMaxSuppressionV5Config, - squareConfig, - squaredDifferenceConfig, + nonMaxSuppressionV5Config, squareConfig, squaredDifferenceConfig, divConfig ]; for (const kernelConfig of kernelConfigs) { diff --git a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts index ce21517dba4..f9fa95e9903 100644 --- a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts +++ b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts @@ -16,50 +16,77 @@ */ import * as backend_util from '../../../backends/backend_util'; +import {BinaryInputs} from '../../../kernel_names'; +import {KernelConfig} from '../../../kernel_registry'; import {DataType, NumericDataType, TypedArray} from '../../../types'; import * as util from '../../../util'; +import {MathBackendCPU} from '../backend_cpu'; +import {assertNotComplex} from '../cpu_util'; -export function broadcastedBinaryOp( - aShape: number[], bShape: number[], aVals: TypedArray, bVals: TypedArray, - dtype: DataType, - op: (a: number, b: number) => number): [TypedArray, number[]] { - const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape); +export const createBinaryKernelConfig = + (name: string, + op: ( + aShape: number[], bShape: number[], aVals: TypedArray, + bVals: TypedArray, dtype: DataType) => [TypedArray, number[]]): + KernelConfig => ({ + kernelName: name, + backendName: 'cpu', + kernelFunc: ({inputs, backend}) => { + const {a, b} = inputs as BinaryInputs; + const cpuBackend = backend as MathBackendCPU; + assertNotComplex([a, b], name); - const resultRank = newShape.length; - const resultStrides = util.computeStrides(newShape); - const resultSize = util.sizeFromShape(newShape); + const aVals = cpuBackend.data.get(a.dataId).values as TypedArray; + const bVals = cpuBackend.data.get(b.dataId).values as TypedArray; - const result = - util.getTypedArrayFromDType(dtype as NumericDataType, resultSize); + const [resultData, resultShape] = + op(a.shape, b.shape, aVals, bVals, a.dtype); - const aRank = aShape.length; - const bRank = bShape.length; + const dataId = cpuBackend.write(resultData, resultShape, a.dtype); + return {dataId, shape: resultShape, dtype: a.dtype}; + } + }); - const aStrides = util.computeStrides(aShape); - const bStrides = util.computeStrides(bShape); +export const createBinaryOp = (op: (a: number, b: number) => number) => + (aShape: number[], bShape: number[], aVals: TypedArray, bVals: TypedArray, + dtype: DataType): [TypedArray, number[]] => { + const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape); - const aBroadcastDims = backend_util.getBroadcastDims(aShape, newShape); - const bBroadcastDims = backend_util.getBroadcastDims(bShape, newShape); + const resultRank = newShape.length; + const resultStrides = util.computeStrides(newShape); + const resultSize = util.sizeFromShape(newShape); - if (aBroadcastDims.length + bBroadcastDims.length === 0) { - for (let i = 0; i < result.length; ++i) { - result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]); - } - } else { - for (let i = 0; i < result.length; ++i) { - const loc = util.indexToLoc(i, resultRank, resultStrides); + const result = + util.getTypedArrayFromDType(dtype as NumericDataType, resultSize); - const aLoc = loc.slice(-aRank); - aBroadcastDims.forEach(d => aLoc[d] = 0); - const aIndex = util.locToIndex(aLoc, aRank, aStrides); + const aRank = aShape.length; + const bRank = bShape.length; - const bLoc = loc.slice(-bRank); - bBroadcastDims.forEach(d => bLoc[d] = 0); - const bIndex = util.locToIndex(bLoc, bRank, bStrides); + const aStrides = util.computeStrides(aShape); + const bStrides = util.computeStrides(bShape); - result[i] = op(aVals[aIndex], bVals[bIndex]); - } - } + const aBroadcastDims = backend_util.getBroadcastDims(aShape, newShape); + const bBroadcastDims = backend_util.getBroadcastDims(bShape, newShape); - return [result, newShape]; -} + if (aBroadcastDims.length + bBroadcastDims.length === 0) { + for (let i = 0; i < result.length; ++i) { + result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]); + } + } else { + for (let i = 0; i < result.length; ++i) { + const loc = util.indexToLoc(i, resultRank, resultStrides); + + const aLoc = loc.slice(-aRank); + aBroadcastDims.forEach(d => aLoc[d] = 0); + const aIndex = util.locToIndex(aLoc, aRank, aStrides); + + const bLoc = loc.slice(-bRank); + bBroadcastDims.forEach(d => bLoc[d] = 0); + const bIndex = util.locToIndex(bLoc, bRank, bStrides); + + result[i] = op(aVals[aIndex], bVals[bIndex]); + } + } + + return [result, newShape]; + }; diff --git a/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts b/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts index 41463b2c1cb..9ff08538a74 100644 --- a/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts +++ b/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts @@ -16,7 +16,7 @@ */ import {env} from '../../../environment'; -import {SquaredDifference, SquaredDifferenceInputs} from '../../../kernel_names'; +import {BinaryInputs, SquaredDifference} from '../../../kernel_names'; import {KernelConfig} from '../../../kernel_registry'; import {MathBackendWebGL} from '../backend_webgl'; import {BinaryOpProgram} from '../binaryop_gpu'; @@ -26,7 +26,7 @@ export const squaredDifferenceConfig: KernelConfig = { kernelName: SquaredDifference, backendName: 'webgl', kernelFunc: ({inputs, backend}) => { - const {a, b} = inputs as SquaredDifferenceInputs; + const {a, b} = inputs as BinaryInputs; const SQUARED_DIFFERENCE = 'return (a - b) * (a - b);'; const webGLBackend = backend as MathBackendWebGL; diff --git a/tfjs-core/src/gradients/Div_grad.ts b/tfjs-core/src/gradients/Div_grad.ts new file mode 100644 index 00000000000..c2a3b39da7d --- /dev/null +++ b/tfjs-core/src/gradients/Div_grad.ts @@ -0,0 +1,49 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Div} from '../kernel_names'; +import {GradConfig} from '../kernel_registry'; +import * as broadcast_util from '../ops/broadcast_util'; +import {Tensor} from '../tensor'; + +export const divGradConfig: GradConfig = { + kernelName: Div, + inputsToSave: ['a', 'b'], + gradFunc: (dy: Tensor, saved: Tensor[]) => { + const [$a, $b] = saved; + const outShape = + broadcast_util.assertAndGetBroadcastShape($a.shape, $b.shape); + const derA = () => { + const res = dy.div($b.toFloat()); + const reduceAxes = broadcast_util.getReductionAxes($a.shape, outShape); + if (reduceAxes.length > 0) { + return res.sum(reduceAxes).reshape($a.shape); + } + return res; + }; + const derB = () => { + let res = dy.mul($a.toFloat()); + const reduceAxes = broadcast_util.getReductionAxes($b.shape, outShape); + if (reduceAxes.length > 0) { + res = res.sum(reduceAxes).reshape($b.shape); + } + const tmp = $b.square(); + return res.div(tmp.toFloat()).neg(); + }; + return {a: derA, b: derB}; + } +}; diff --git a/tfjs-core/src/kernel_names.ts b/tfjs-core/src/kernel_names.ts index 4b2ce190ef9..2e8ff2e2e8b 100644 --- a/tfjs-core/src/kernel_names.ts +++ b/tfjs-core/src/kernel_names.ts @@ -21,8 +21,11 @@ import {NamedTensorInfoMap} from './kernel_registry'; import {PixelData} from './types'; +export type BinaryInputs = Pick; + +export const Div = 'Div'; + export const SquaredDifference = 'SquaredDifference'; -export type SquaredDifferenceInputs = Pick; export const Square = 'Square'; export type SquareInputs = Pick; diff --git a/tfjs-core/src/ops/binary_ops.ts b/tfjs-core/src/ops/binary_ops.ts index 9aea464bea4..ff04e0dbe40 100644 --- a/tfjs-core/src/ops/binary_ops.ts +++ b/tfjs-core/src/ops/binary_ops.ts @@ -378,71 +378,6 @@ function mulStrict_(a: T|TensorLike, b: T|TensorLike): T { return $a.mul($b); } -/** - * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. - * - * We also expose `tf.divStrict` which has the same signature as this op and - * asserts that `a` and `b` are the same shape (does not broadcast). - * - * ```js - * const a = tf.tensor1d([1, 4, 9, 16]); - * const b = tf.tensor1d([1, 2, 3, 4]); - * - * a.div(b).print(); // or tf.div(a, b) - * ``` - * - * ```js - * // Broadcast div a with b. - * const a = tf.tensor1d([2, 4, 6, 8]); - * const b = tf.scalar(2); - * - * a.div(b).print(); // or tf.div(a, b) - * ``` - * - * @param a The first tensor as the numerator. - * @param b The second tensor as the denominator. Must have the same dtype as - * `a`. - */ -/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */ -function div_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { - let $a = convertToTensor(a, 'a', 'div'); - let $b = convertToTensor(b, 'b', 'div'); - [$a, $b] = makeTypesMatch($a, $b); - - if ($a.dtype === 'int32' && $b.dtype === 'int32') { - return floorDiv($a, $b); - } - - const outShape = - broadcast_util.assertAndGetBroadcastShape($a.shape, $b.shape); - const der = (dy: Tensor, saved: Tensor[]) => { - const [$a, $b] = saved; - const derA = () => { - const res = dy.div($b.toFloat()); - const reduceAxes = broadcast_util.getReductionAxes($a.shape, outShape); - if (reduceAxes.length > 0) { - return res.sum(reduceAxes).reshape($a.shape); - } - return res; - }; - const derB = () => { - let res = dy.mul($a.toFloat()); - const reduceAxes = broadcast_util.getReductionAxes($b.shape, outShape); - if (reduceAxes.length > 0) { - res = res.sum(reduceAxes).reshape($b.shape); - } - const tmp = $b.square(); - return res.div(tmp.toFloat()).neg(); - }; - return {a: derA, b: derB}; - }; - return ENGINE.runKernelFunc((backend, save) => { - const res = backend.realDivide($a, $b); - save([$a, $b]); - return res; - }, {a: $a, b: $b}, der, 'Div') as T; -} - /** * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. Return 0 * if denominator is 0. @@ -480,7 +415,7 @@ function divNoNan_( let $b = convertToTensor(b, 'b', 'div'); [$a, $b] = makeTypesMatch($a, $b); - const divResult = div($a, $b); + const divResult = $a.div($b); const zeros = zerosLike(divResult); const bEqualsZero = $b.equal(zeros); return where(bEqualsZero, zeros, divResult) as T; @@ -841,7 +776,6 @@ export const add = op({add_}); export const addN = op({addN_}); export const addStrict = op({addStrict_}); export const atan2 = op({atan2_}); -export const div = op({div_}); export const divNoNan = op({divNoNan_}); export const divStrict = op({divStrict_}); export const floorDiv = op({floorDiv_}); diff --git a/tfjs-core/src/ops/div.ts b/tfjs-core/src/ops/div.ts new file mode 100644 index 00000000000..05c19b5967a --- /dev/null +++ b/tfjs-core/src/ops/div.ts @@ -0,0 +1,105 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {ENGINE, ForwardFunc} from '../engine'; +import {BinaryInputs, Div} from '../kernel_names'; +import {Tensor} from '../tensor'; +import {NamedTensorMap} from '../tensor_types'; +import {makeTypesMatch} from '../tensor_util'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; + +import {floorDiv} from './binary_ops'; +import * as broadcast_util from './broadcast_util'; +import {op} from './operation'; + +/** + * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. + * + * We also expose `tf.divStrict` which has the same signature as this op and + * asserts that `a` and `b` are the same shape (does not broadcast). + * + * ```js + * const a = tf.tensor1d([1, 4, 9, 16]); + * const b = tf.tensor1d([1, 2, 3, 4]); + * + * a.div(b).print(); // or tf.div(a, b) + * ``` + * + * ```js + * // Broadcast div a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(2); + * + * a.div(b).print(); // or tf.div(a, b) + * ``` + * + * @param a The first tensor as the numerator. + * @param b The second tensor as the denominator. Must have the same dtype as + * `a`. + */ +/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */ +function div_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { + let $a = convertToTensor(a, 'a', 'div'); + let $b = convertToTensor(b, 'b', 'div'); + [$a, $b] = makeTypesMatch($a, $b); + + if ($a.dtype === 'int32' && $b.dtype === 'int32') { + return floorDiv($a, $b); + } + + const outShape = + broadcast_util.assertAndGetBroadcastShape($a.shape, $b.shape); + const der = (dy: Tensor, saved: Tensor[]) => { + const [$a, $b] = saved; + const derA = () => { + const res = dy.div($b.toFloat()); + const reduceAxes = broadcast_util.getReductionAxes($a.shape, outShape); + if (reduceAxes.length > 0) { + return res.sum(reduceAxes).reshape($a.shape); + } + return res; + }; + const derB = () => { + let res = dy.mul($a.toFloat()); + const reduceAxes = broadcast_util.getReductionAxes($b.shape, outShape); + if (reduceAxes.length > 0) { + res = res.sum(reduceAxes).reshape($b.shape); + } + const tmp = $b.square(); + return res.div(tmp.toFloat()).neg(); + }; + return {a: derA, b: derB}; + }; + + const forward: ForwardFunc = (backend, save) => { + const res = backend.realDivide($a, $b); + save([$a, $b]); + return res; + }; + + const inputs: BinaryInputs = {a: $a, b: $b}; + const attrs = {}; + const inputsToSave = [$a, $b]; + const outputsToSave: boolean[] = []; + + return ENGINE.runKernelFunc( + forward, inputs as {} as NamedTensorMap, der, Div, attrs, + inputsToSave, outputsToSave) as T; +} + +export const div = op({div_}); diff --git a/tfjs-core/src/ops/ops.ts b/tfjs-core/src/ops/ops.ts index 720d02bc6a0..7355d15ba89 100644 --- a/tfjs-core/src/ops/ops.ts +++ b/tfjs-core/src/ops/ops.ts @@ -17,6 +17,7 @@ // Modularized ops. export {broadcastTo} from './broadcast_to'; +export {div} from './div'; export {square} from './square'; export {squaredDifference} from './squared_difference'; diff --git a/tfjs-core/src/ops/squared_difference.ts b/tfjs-core/src/ops/squared_difference.ts index 0653191326c..264fdb2d9f9 100644 --- a/tfjs-core/src/ops/squared_difference.ts +++ b/tfjs-core/src/ops/squared_difference.ts @@ -16,7 +16,7 @@ */ import {ENGINE, ForwardFunc} from '../engine'; -import {SquaredDifference, SquaredDifferenceInputs} from '../kernel_names'; +import {BinaryInputs, SquaredDifference} from '../kernel_names'; import {Tensor} from '../tensor'; import {NamedTensorMap} from '../tensor_types'; import {makeTypesMatch} from '../tensor_util'; @@ -27,6 +27,7 @@ import {assertAndGetBroadcastShape} from './broadcast_util'; import {op} from './operation'; import {scalar} from './tensor_ops'; + /** * Returns (a - b) * (a - b) element-wise. * Supports broadcasting. @@ -74,7 +75,7 @@ function squaredDifference_( return res; }; - const inputs: SquaredDifferenceInputs = {a: $a, b: $b}; + const inputs: BinaryInputs = {a: $a, b: $b}; const attrs = {}; const inputsToSave = [$a, $b]; diff --git a/tfjs-core/src/register_all_gradients.ts b/tfjs-core/src/register_all_gradients.ts index 5438e897224..8be15b33cbc 100644 --- a/tfjs-core/src/register_all_gradients.ts +++ b/tfjs-core/src/register_all_gradients.ts @@ -15,6 +15,7 @@ * ============================================================================= */ import {broadcastToGradConfig} from './gradients/BroadcastTo_grad'; +import {divGradConfig} from './gradients/Div_grad'; import {squareGradConfig} from './gradients/Square_grad'; import {squaredDifferenceGradConfig} from './gradients/SquaredDifference_grad'; import {GradConfig} from './kernel_registry'; @@ -24,6 +25,7 @@ import {registerGradient} from './kernel_registry'; const gradConfigs: GradConfig[] = [ squareGradConfig, squaredDifferenceGradConfig, + divGradConfig, broadcastToGradConfig, ]; From b159a1fc2ed5353fdda9175888a9a92614dc733f Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 09:59:07 -0400 Subject: [PATCH 02/24] webgl --- tfjs-core/src/backends/webgl/backend_webgl.ts | 35 +++++------- tfjs-core/src/backends/webgl/kernels/Div.ts | 55 +++++++++++++++++++ .../backends/webgl/register_all_kernels.ts | 2 + 3 files changed, 71 insertions(+), 21 deletions(-) create mode 100644 tfjs-core/src/backends/webgl/kernels/Div.ts diff --git a/tfjs-core/src/backends/webgl/backend_webgl.ts b/tfjs-core/src/backends/webgl/backend_webgl.ts index f513265c7aa..831c85bcb2c 100644 --- a/tfjs-core/src/backends/webgl/backend_webgl.ts +++ b/tfjs-core/src/backends/webgl/backend_webgl.ts @@ -1372,18 +1372,6 @@ export class MathBackendWebGL extends KernelBackend { return this.reduce(a2D, 'any', a2D.dtype).reshape(outShape); } - realDivide(a: Tensor, b: Tensor): Tensor { - const op = binaryop_gpu.DIV; - const outputDtype = 'float32'; - if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { - const checkOutOfBounds = true; - return this.packedBinaryOp( - a, b, binaryop_packed_gpu.DIV, outputDtype, checkOutOfBounds); - } - const program = new BinaryOpProgram(op, a.shape, b.shape); - return this.compileAndRun(program, [a, b], outputDtype); - } - floorDiv(a: Tensor, b: Tensor): Tensor { const op = binaryop_gpu.INT_DIV; const outputDtype = 'int32'; @@ -1597,7 +1585,7 @@ export class MathBackendWebGL extends KernelBackend { const b = this.exp(a); const sumExp = this.sum(b, axes).reshape(expandedShape); - return this.realDivide(b, sumExp) as T; + return b.div(sumExp); } log(x: T): T { @@ -2440,8 +2428,8 @@ export class MathBackendWebGL extends KernelBackend { const program = new PackProgram(input.shape); const preventEagerUnpackingOutput = true; return this.runWebGLProgram( - program, [input], input.dtype, null /* customSetup */, - preventEagerUnpackingOutput); + program, [input], input.dtype, null /* out info */, + null /* customSetup */, preventEagerUnpackingOutput); } private packedReshape(input: TensorInfo, afterShape: number[]): TensorInfo { @@ -2461,8 +2449,8 @@ export class MathBackendWebGL extends KernelBackend { const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape); const preventEagerUnpackingOfOutput = true; const output = this.runWebGLProgram( - program, [input3D], input.dtype, null /* customSetup */, - preventEagerUnpackingOfOutput); + program, [input3D], input.dtype, null /* out info */, + null /* customSetup */, preventEagerUnpackingOfOutput); return {dataId: output.dataId, shape: afterShape, dtype: output.dtype}; } @@ -2480,15 +2468,19 @@ export class MathBackendWebGL extends KernelBackend { const preventEagerUnpackingOfOutput = true; const out = this.runWebGLProgram( program, [{shape: shapeAs3D, dtype, dataId}], dtype, - null /* customSetup */, preventEagerUnpackingOfOutput); + null /* out info */, null /* customSetup */, + preventEagerUnpackingOfOutput); return {dtype, shape, dataId: out.dataId}; } runWebGLProgram( program: GPGPUProgram, inputs: TensorInfo[], outputDtype: DataType, + output?: TensorInfo, customSetup?: (gpgpu: GPGPUContext, webGLProgram: WebGLProgram) => void, preventEagerUnpackingOfOutput = false): TensorInfo { - const output = this.makeTensorInfo(program.outputShape, outputDtype); + if (output == null) { + output = this.makeTensorInfo(program.outputShape, outputDtype); + } const outData = this.texData.get(output.dataId); if (program.packedOutput) { outData.isPacked = true; @@ -2615,7 +2607,7 @@ export class MathBackendWebGL extends KernelBackend { preventEagerUnpackingOfOutput = false): K { outputDtype = outputDtype || inputs[0].dtype; const outInfo = this.runWebGLProgram( - program, inputs, outputDtype, customSetup, + program, inputs, outputDtype, null /* out info */, customSetup, preventEagerUnpackingOfOutput); return ENGINE.makeTensorFromDataId( outInfo.dataId, outInfo.shape, outInfo.dtype) as {} as K; @@ -2741,7 +2733,8 @@ export class MathBackendWebGL extends KernelBackend { // WEBGL_PACK. const preventEagerUnpacking = true; const encodedOutputTarget = this.runWebGLProgram( - program, [tempDenseInputHandle], dtype, null, preventEagerUnpacking); + program, [tempDenseInputHandle], dtype, null /* out info */, + null /* custom setup */, preventEagerUnpacking); // Have the original texture assume the identity of the encoded output. const outputTexData = this.texData.get(encodedOutputTarget.dataId); diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts new file mode 100644 index 00000000000..8da4db85641 --- /dev/null +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -0,0 +1,55 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {backend_util} from '../../..'; +import {env} from '../../../environment'; +import {BinaryInputs, Div} from '../../../kernel_names'; +import {KernelConfig, TensorInfo} from '../../../kernel_registry'; +import {MathBackendWebGL} from '../backend_webgl'; +import * as binaryop_gpu from '../binaryop_gpu'; +import {BinaryOpProgram} from '../binaryop_gpu'; +import * as binaryop_packed_gpu from '../binaryop_packed_gpu'; +import {BinaryOpPackedProgram} from '../binaryop_packed_gpu'; + +export const divImpl = + (a: TensorInfo, b: TensorInfo, out: TensorInfo, + backend: MathBackendWebGL): TensorInfo => { + let program = new BinaryOpProgram(binaryop_gpu.DIV, a.shape, b.shape); + if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { + program = new BinaryOpPackedProgram( + binaryop_packed_gpu.DIV, a.shape, b.shape, true); + } + const output = backend.runWebGLProgram(program, [a, b], 'float32', out); + return output; + }; + +export const divConfig: KernelConfig = { + kernelName: Div, + backendName: 'webgl', + kernelFunc: ({inputs, backend}) => { + const {a, b} = inputs as BinaryInputs; + + const webglBackend = backend as MathBackendWebGL; + + const outShape = backend_util.assertAndGetBroadcastShape(a.shape, b.shape); + const outTensorInfo = webglBackend.makeTensorInfo(outShape, a.dtype); + + const out = divImpl(a, b, outTensorInfo, webglBackend); + + return {dataId: out.dataId, shape: out.shape, dtype: out.dtype}; + } +}; diff --git a/tfjs-core/src/backends/webgl/register_all_kernels.ts b/tfjs-core/src/backends/webgl/register_all_kernels.ts index f7913ac6b21..fe04734d2cb 100644 --- a/tfjs-core/src/backends/webgl/register_all_kernels.ts +++ b/tfjs-core/src/backends/webgl/register_all_kernels.ts @@ -16,6 +16,7 @@ */ import {KernelConfig, registerKernel} from '../../kernel_registry'; +import {divConfig} from './kernels/Div'; import {fromPixelsConfig} from './kernels/FromPixels'; import {nonMaxSuppressionV5Config} from './kernels/NonMaxSuppressionV5'; import {squareConfig} from './kernels/Square'; @@ -24,6 +25,7 @@ import {squaredDifferenceConfig} from './kernels/SquaredDifference'; // List all kernel configs here const kernelConfigs: KernelConfig[] = [ fromPixelsConfig, + divConfig, nonMaxSuppressionV5Config, squareConfig, squaredDifferenceConfig, From 86c7451ba2f2ce1a0400072576f9e6dc77fef823 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 10:10:49 -0400 Subject: [PATCH 03/24] add divnonan --- tfjs-core/src/backends/webgl/kernels/Div.ts | 4 +- tfjs-core/src/ops/binary_ops.ts | 45 ------------- tfjs-core/src/ops/divNoNan.ts | 71 +++++++++++++++++++++ tfjs-core/src/ops/ops.ts | 1 + tfjs-core/src/ops/squared_difference.ts | 1 - 5 files changed, 74 insertions(+), 48 deletions(-) create mode 100644 tfjs-core/src/ops/divNoNan.ts diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts index 8da4db85641..d76911d6f78 100644 --- a/tfjs-core/src/backends/webgl/kernels/Div.ts +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -15,7 +15,7 @@ * ============================================================================= */ -import {backend_util} from '../../..'; +import {assertAndGetBroadcastShape} from '../../../../src/ops/broadcast_util'; import {env} from '../../../environment'; import {BinaryInputs, Div} from '../../../kernel_names'; import {KernelConfig, TensorInfo} from '../../../kernel_registry'; @@ -45,7 +45,7 @@ export const divConfig: KernelConfig = { const webglBackend = backend as MathBackendWebGL; - const outShape = backend_util.assertAndGetBroadcastShape(a.shape, b.shape); + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); const outTensorInfo = webglBackend.makeTensorInfo(outShape, a.dtype); const out = divImpl(a, b, outTensorInfo, webglBackend); diff --git a/tfjs-core/src/ops/binary_ops.ts b/tfjs-core/src/ops/binary_ops.ts index ff04e0dbe40..ac5e0525ce4 100644 --- a/tfjs-core/src/ops/binary_ops.ts +++ b/tfjs-core/src/ops/binary_ops.ts @@ -23,7 +23,6 @@ import {convertToTensor} from '../tensor_util_env'; import {TensorLike} from '../types'; import * as util from '../util'; import * as broadcast_util from './broadcast_util'; -import {where} from './logical_ops'; import {op} from './operation'; import {scalar, zerosLike} from './tensor_ops'; import {neg} from './unary_ops'; @@ -378,49 +377,6 @@ function mulStrict_(a: T|TensorLike, b: T|TensorLike): T { return $a.mul($b); } -/** - * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. Return 0 - * if denominator is 0. - * - * We also expose `tf.divStrict` which has the same signature as this op and - * asserts that `a` and `b` are the same shape (does not broadcast). - * - * ```js - * const a = tf.tensor1d([1, 4, 9, 16]); - * const b = tf.tensor1d([1, 2, 3, 4]); - * const c = tf.tensor1d([0, 0, 0, 0]); - * - * a.divNoNan(b).print(); // or tf.divNoNan(a, b) - * a.divNoNan(c).print(); // or tf.divNoNan(a, c) - * ``` - * - * ```js - * // Broadcast div a with b. - * const a = tf.tensor1d([2, 4, 6, 8]); - * const b = tf.scalar(2); - * const c = tf.scalar(0); - * - * a.divNoNan(b).print(); // or tf.divNoNan(a, b) - * a.divNoNan(c).print(); // or tf.divNoNan(a, c) - * ``` - * - * @param a The first tensor as the numerator. - * @param b The second tensor as the denominator. Must have the same dtype as - * `a`. - */ -/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */ -function divNoNan_( - a: Tensor|TensorLike, b: Tensor|TensorLike): T { - let $a = convertToTensor(a, 'a', 'div'); - let $b = convertToTensor(b, 'b', 'div'); - [$a, $b] = makeTypesMatch($a, $b); - - const divResult = $a.div($b); - const zeros = zerosLike(divResult); - const bEqualsZero = $b.equal(zeros); - return where(bEqualsZero, zeros, divResult) as T; -} - /** * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. * The result is rounded with floor function. @@ -776,7 +732,6 @@ export const add = op({add_}); export const addN = op({addN_}); export const addStrict = op({addStrict_}); export const atan2 = op({atan2_}); -export const divNoNan = op({divNoNan_}); export const divStrict = op({divStrict_}); export const floorDiv = op({floorDiv_}); export const maximum = op({maximum_}); diff --git a/tfjs-core/src/ops/divNoNan.ts b/tfjs-core/src/ops/divNoNan.ts new file mode 100644 index 00000000000..d329c28ec48 --- /dev/null +++ b/tfjs-core/src/ops/divNoNan.ts @@ -0,0 +1,71 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Tensor} from '../tensor'; +import {makeTypesMatch} from '../tensor_util'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; + +import {div} from './div'; +import {where} from './logical_ops'; +import {op} from './operation'; +import {zerosLike} from './tensor_ops'; + +/** + * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. Return 0 + * if denominator is 0. + * + * We also expose `tf.divStrict` which has the same signature as this op and + * asserts that `a` and `b` are the same shape (does not broadcast). + * + * ```js + * const a = tf.tensor1d([1, 4, 9, 16]); + * const b = tf.tensor1d([1, 2, 3, 4]); + * const c = tf.tensor1d([0, 0, 0, 0]); + * + * a.divNoNan(b).print(); // or tf.divNoNan(a, b) + * a.divNoNan(c).print(); // or tf.divNoNan(a, c) + * ``` + * + * ```js + * // Broadcast div a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(2); + * const c = tf.scalar(0); + * + * a.divNoNan(b).print(); // or tf.divNoNan(a, b) + * a.divNoNan(c).print(); // or tf.divNoNan(a, c) + * ``` + * + * @param a The first tensor as the numerator. + * @param b The second tensor as the denominator. Must have the same dtype as + * `a`. + */ +/** @doc {heading: 'Operations', subheading: 'Arithmetic'} */ +function divNoNan_( + a: Tensor|TensorLike, b: Tensor|TensorLike): T { + let $a = convertToTensor(a, 'a', 'div'); + let $b = convertToTensor(b, 'b', 'div'); + [$a, $b] = makeTypesMatch($a, $b); + + const divResult = div($a, $b); + const zeros = zerosLike(divResult); + const bEqualsZero = $b.equal(zeros); + return where(bEqualsZero, zeros, divResult) as T; +} + +export const divNoNan = op({divNoNan_}); diff --git a/tfjs-core/src/ops/ops.ts b/tfjs-core/src/ops/ops.ts index 7355d15ba89..3e2e08f32bd 100644 --- a/tfjs-core/src/ops/ops.ts +++ b/tfjs-core/src/ops/ops.ts @@ -18,6 +18,7 @@ // Modularized ops. export {broadcastTo} from './broadcast_to'; export {div} from './div'; +export {divNoNan} from './divNoNan'; export {square} from './square'; export {squaredDifference} from './squared_difference'; diff --git a/tfjs-core/src/ops/squared_difference.ts b/tfjs-core/src/ops/squared_difference.ts index 264fdb2d9f9..9a5ecef3641 100644 --- a/tfjs-core/src/ops/squared_difference.ts +++ b/tfjs-core/src/ops/squared_difference.ts @@ -27,7 +27,6 @@ import {assertAndGetBroadcastShape} from './broadcast_util'; import {op} from './operation'; import {scalar} from './tensor_ops'; - /** * Returns (a - b) * (a - b) element-wise. * Supports broadcasting. From 90cb0e647db8539f0b6c2327c294da419788dd60 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 10:18:49 -0400 Subject: [PATCH 04/24] add divnonan --- tfjs-core/src/public/chained_ops/div.ts | 30 ++++++++++++++++++ tfjs-core/src/public/chained_ops/divNoNan.ts | 31 +++++++++++++++++++ .../chained_ops/register_all_chained_ops.ts | 4 ++- 3 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 tfjs-core/src/public/chained_ops/div.ts create mode 100644 tfjs-core/src/public/chained_ops/divNoNan.ts diff --git a/tfjs-core/src/public/chained_ops/div.ts b/tfjs-core/src/public/chained_ops/div.ts new file mode 100644 index 00000000000..5491e286965 --- /dev/null +++ b/tfjs-core/src/public/chained_ops/div.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {div} from '../../ops/div'; +import {Tensor} from '../../tensor'; +import {Rank, TensorLike} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + div(b: Tensor|TensorLike): T; + } +} + +Tensor.prototype.div = function(b: Tensor|TensorLike): T { + return div(this, b); +}; diff --git a/tfjs-core/src/public/chained_ops/divNoNan.ts b/tfjs-core/src/public/chained_ops/divNoNan.ts new file mode 100644 index 00000000000..b1fce46b8e0 --- /dev/null +++ b/tfjs-core/src/public/chained_ops/divNoNan.ts @@ -0,0 +1,31 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {divNoNan} from '../../ops/divNoNan'; +import {Tensor} from '../../tensor'; +import {Rank, TensorLike} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + divNoNan(b: Tensor|TensorLike): T; + } +} + +Tensor.prototype.divNoNan = function(b: Tensor| + TensorLike): T { + return divNoNan(this, b); +}; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts index 3a79a8dd6d9..bf68f168aa0 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts @@ -15,5 +15,7 @@ * ============================================================================= */ -import './squared_difference'; import './broadcast_to'; +import './div'; +import './divNoNan'; +import './squared_difference'; From 9a985a7b3f05bb4d8d28082cecc6a4ad37d1fa78 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 10:19:14 -0400 Subject: [PATCH 05/24] test --- .../src/public/chained_ops/register_all_chained_ops_test.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts index 0ce556c9aa0..e111b20a729 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts @@ -24,8 +24,10 @@ import {ALL_ENVS, describeWithFlags} from '../../jasmine_util'; // flexibility to change in future. const CHAINED_OPS = [ - 'square', 'broadcastTo', + 'div', + 'divNoNan', + 'square', ]; describeWithFlags('chained ops', ALL_ENVS, () => { From ec37e5786f9ab767f21c3c8c7b118b88bcb8dc45 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 11:02:22 -0400 Subject: [PATCH 06/24] chagne import --- tfjs-core/src/backends/webgl/kernels/Div.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts index d76911d6f78..006ab571572 100644 --- a/tfjs-core/src/backends/webgl/kernels/Div.ts +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -15,10 +15,10 @@ * ============================================================================= */ -import {assertAndGetBroadcastShape} from '../../../../src/ops/broadcast_util'; import {env} from '../../../environment'; import {BinaryInputs, Div} from '../../../kernel_names'; import {KernelConfig, TensorInfo} from '../../../kernel_registry'; +import {assertAndGetBroadcastShape} from '../../backend_util'; import {MathBackendWebGL} from '../backend_webgl'; import * as binaryop_gpu from '../binaryop_gpu'; import {BinaryOpProgram} from '../binaryop_gpu'; From d2b40bbec10e55c3ef81e680f9e7003ca760fe38 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 12:43:22 -0400 Subject: [PATCH 07/24] binary inputs --- tfjs-core/src/backends/webgl/kernels/Div.ts | 4 ++-- tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts | 4 ++-- tfjs-core/src/kernel_names.ts | 2 ++ tfjs-core/src/ops/div.ts | 5 +++-- tfjs-core/src/ops/squared_difference.ts | 5 +++-- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts index 006ab571572..6aae529eded 100644 --- a/tfjs-core/src/backends/webgl/kernels/Div.ts +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -16,7 +16,7 @@ */ import {env} from '../../../environment'; -import {BinaryInputs, Div} from '../../../kernel_names'; +import {Div, DivInputs} from '../../../kernel_names'; import {KernelConfig, TensorInfo} from '../../../kernel_registry'; import {assertAndGetBroadcastShape} from '../../backend_util'; import {MathBackendWebGL} from '../backend_webgl'; @@ -41,7 +41,7 @@ export const divConfig: KernelConfig = { kernelName: Div, backendName: 'webgl', kernelFunc: ({inputs, backend}) => { - const {a, b} = inputs as BinaryInputs; + const {a, b} = inputs as DivInputs; const webglBackend = backend as MathBackendWebGL; diff --git a/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts b/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts index 9ff08538a74..41463b2c1cb 100644 --- a/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts +++ b/tfjs-core/src/backends/webgl/kernels/SquaredDifference.ts @@ -16,7 +16,7 @@ */ import {env} from '../../../environment'; -import {BinaryInputs, SquaredDifference} from '../../../kernel_names'; +import {SquaredDifference, SquaredDifferenceInputs} from '../../../kernel_names'; import {KernelConfig} from '../../../kernel_registry'; import {MathBackendWebGL} from '../backend_webgl'; import {BinaryOpProgram} from '../binaryop_gpu'; @@ -26,7 +26,7 @@ export const squaredDifferenceConfig: KernelConfig = { kernelName: SquaredDifference, backendName: 'webgl', kernelFunc: ({inputs, backend}) => { - const {a, b} = inputs as BinaryInputs; + const {a, b} = inputs as SquaredDifferenceInputs; const SQUARED_DIFFERENCE = 'return (a - b) * (a - b);'; const webGLBackend = backend as MathBackendWebGL; diff --git a/tfjs-core/src/kernel_names.ts b/tfjs-core/src/kernel_names.ts index 2e8ff2e2e8b..c25e2e1f624 100644 --- a/tfjs-core/src/kernel_names.ts +++ b/tfjs-core/src/kernel_names.ts @@ -24,8 +24,10 @@ import {PixelData} from './types'; export type BinaryInputs = Pick; export const Div = 'Div'; +export type DivInputs = BinaryInputs; export const SquaredDifference = 'SquaredDifference'; +export type SquaredDifferenceInputs = BinaryInputs; export const Square = 'Square'; export type SquareInputs = Pick; diff --git a/tfjs-core/src/ops/div.ts b/tfjs-core/src/ops/div.ts index 05c19b5967a..cd48cbeaab8 100644 --- a/tfjs-core/src/ops/div.ts +++ b/tfjs-core/src/ops/div.ts @@ -16,7 +16,7 @@ */ import {ENGINE, ForwardFunc} from '../engine'; -import {BinaryInputs, Div} from '../kernel_names'; +import {Div, DivInputs} from '../kernel_names'; import {Tensor} from '../tensor'; import {NamedTensorMap} from '../tensor_types'; import {makeTypesMatch} from '../tensor_util'; @@ -27,6 +27,7 @@ import {floorDiv} from './binary_ops'; import * as broadcast_util from './broadcast_util'; import {op} from './operation'; + /** * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. * @@ -92,7 +93,7 @@ function div_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { return res; }; - const inputs: BinaryInputs = {a: $a, b: $b}; + const inputs: DivInputs = {a: $a, b: $b}; const attrs = {}; const inputsToSave = [$a, $b]; const outputsToSave: boolean[] = []; diff --git a/tfjs-core/src/ops/squared_difference.ts b/tfjs-core/src/ops/squared_difference.ts index 9a5ecef3641..c8fc4ada6e6 100644 --- a/tfjs-core/src/ops/squared_difference.ts +++ b/tfjs-core/src/ops/squared_difference.ts @@ -16,7 +16,7 @@ */ import {ENGINE, ForwardFunc} from '../engine'; -import {BinaryInputs, SquaredDifference} from '../kernel_names'; +import {SquaredDifference, SquaredDifferenceInputs} from '../kernel_names'; import {Tensor} from '../tensor'; import {NamedTensorMap} from '../tensor_types'; import {makeTypesMatch} from '../tensor_util'; @@ -27,6 +27,7 @@ import {assertAndGetBroadcastShape} from './broadcast_util'; import {op} from './operation'; import {scalar} from './tensor_ops'; + /** * Returns (a - b) * (a - b) element-wise. * Supports broadcasting. @@ -74,7 +75,7 @@ function squaredDifference_( return res; }; - const inputs: BinaryInputs = {a: $a, b: $b}; + const inputs: SquaredDifferenceInputs = {a: $a, b: $b}; const attrs = {}; const inputsToSave = [$a, $b]; From d32569f636bd2f9a723dda00996ec78a7324515d Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 12:53:18 -0400 Subject: [PATCH 08/24] avoid public api --- tfjs-core/src/backends/cpu/backend_cpu.ts | 5 +++-- tfjs-core/src/backends/webgl/backend_webgl.ts | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tfjs-core/src/backends/cpu/backend_cpu.ts b/tfjs-core/src/backends/cpu/backend_cpu.ts index 37e9d445806..9d9e9eca22d 100644 --- a/tfjs-core/src/backends/cpu/backend_cpu.ts +++ b/tfjs-core/src/backends/cpu/backend_cpu.ts @@ -19,7 +19,6 @@ import * as seedrandom from 'seedrandom'; import {ENGINE} from '../../engine'; import {env} from '../../environment'; - import {warn} from '../../log'; import * as array_ops_util from '../../ops/array_ops_util'; import * as axis_util from '../../ops/axis_util'; @@ -27,6 +26,7 @@ import * as broadcast_util from '../../ops/broadcast_util'; import {complex, imag, real} from '../../ops/complex_ops'; import * as concat_util from '../../ops/concat_util'; import {Conv2DInfo, Conv3DInfo} from '../../ops/conv_util'; +import {div} from '../../ops/div'; import * as erf_util from '../../ops/erf_util'; import {Activation, FusedBatchMatMulConfig, FusedConv2DConfig} from '../../ops/fused_util'; import * as gather_nd_util from '../../ops/gather_nd_util'; @@ -47,6 +47,7 @@ import {split} from '../split_shared'; import {tile} from '../tile_impl'; import {topkImpl} from '../topk_impl'; import {whereImpl} from '../where_impl'; + import {assertNotComplex} from './cpu_util'; function mapActivation( @@ -385,7 +386,7 @@ export class MathBackendCPU extends KernelBackend { const b = this.exp(a); const sumExp = this.sum(b, axes).reshape(expandedShape); - return b.div(sumExp); + return div(b, sumExp); } subtract(a: Tensor, b: Tensor): Tensor { diff --git a/tfjs-core/src/backends/webgl/backend_webgl.ts b/tfjs-core/src/backends/webgl/backend_webgl.ts index 831c85bcb2c..2dc62e46f58 100644 --- a/tfjs-core/src/backends/webgl/backend_webgl.ts +++ b/tfjs-core/src/backends/webgl/backend_webgl.ts @@ -30,6 +30,7 @@ import * as axis_util from '../../ops/axis_util'; import {complex, imag, real} from '../../ops/complex_ops'; import {computeOutShape} from '../../ops/concat_util'; import {Conv2DInfo, Conv3DInfo} from '../../ops/conv_util'; +import {div} from '../../ops/div'; import {Activation, FusedBatchMatMulConfig, FusedConv2DConfig} from '../../ops/fused_util'; import * as gather_nd_util from '../../ops/gather_nd_util'; import * as reduce_util from '../../ops/reduce_util'; @@ -1585,7 +1586,7 @@ export class MathBackendWebGL extends KernelBackend { const b = this.exp(a); const sumExp = this.sum(b, axes).reshape(expandedShape); - return b.div(sumExp); + return div(b, sumExp); } log(x: T): T { From 5a410dd19ac8d665074cc4c67f01432e3d86e824 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 12:54:48 -0400 Subject: [PATCH 09/24] rename --- tfjs-core/src/backends/cpu/kernels/Div.ts | 4 ++-- tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts | 4 ++-- tfjs-core/src/backends/cpu/utils/kernel_utils.ts | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tfjs-core/src/backends/cpu/kernels/Div.ts b/tfjs-core/src/backends/cpu/kernels/Div.ts index 5483f92f857..76300022d25 100644 --- a/tfjs-core/src/backends/cpu/kernels/Div.ts +++ b/tfjs-core/src/backends/cpu/kernels/Div.ts @@ -17,7 +17,7 @@ import {Div} from '../../../kernel_names'; import {createBinaryKernelConfig} from '../utils/kernel_utils'; -import {createBinaryOp} from '../utils/kernel_utils'; +import {createBinaryKernel} from '../utils/kernel_utils'; -export const div = createBinaryOp((a: number, b: number) => a / b); +export const div = createBinaryKernel((a: number, b: number) => a / b); export const divConfig = createBinaryKernelConfig(Div, div); diff --git a/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts b/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts index d89a0e71181..5fdb7346e74 100644 --- a/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts +++ b/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts @@ -16,10 +16,10 @@ */ import {SquaredDifference} from '../../../kernel_names'; -import {createBinaryOp} from '../utils/kernel_utils'; +import {createBinaryKernel} from '../utils/kernel_utils'; import {createBinaryKernelConfig} from '../utils/kernel_utils'; -const squaredDifferenceImpl = createBinaryOp((aVal, bVal) => { +const squaredDifferenceImpl = createBinaryKernel((aVal, bVal) => { const diff = aVal - bVal; return diff * diff; }); diff --git a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts index f9fa95e9903..dcaa7e682a3 100644 --- a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts +++ b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts @@ -47,7 +47,7 @@ export const createBinaryKernelConfig = } }); -export const createBinaryOp = (op: (a: number, b: number) => number) => +export const createBinaryKernel = (op: (a: number, b: number) => number) => (aShape: number[], bShape: number[], aVals: TypedArray, bVals: TypedArray, dtype: DataType): [TypedArray, number[]] => { const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape); From d0f5edd8a375fae909da18b397b5b0b066e36357 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 12:59:22 -0400 Subject: [PATCH 10/24] rename --- .../src/backends/cpu/utils/kernel_utils.ts | 133 +++++++++--------- 1 file changed, 68 insertions(+), 65 deletions(-) diff --git a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts index dcaa7e682a3..93de9353c13 100644 --- a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts +++ b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts @@ -23,70 +23,73 @@ import * as util from '../../../util'; import {MathBackendCPU} from '../backend_cpu'; import {assertNotComplex} from '../cpu_util'; -export const createBinaryKernelConfig = - (name: string, - op: ( - aShape: number[], bShape: number[], aVals: TypedArray, - bVals: TypedArray, dtype: DataType) => [TypedArray, number[]]): - KernelConfig => ({ - kernelName: name, - backendName: 'cpu', - kernelFunc: ({inputs, backend}) => { - const {a, b} = inputs as BinaryInputs; - const cpuBackend = backend as MathBackendCPU; - assertNotComplex([a, b], name); - - const aVals = cpuBackend.data.get(a.dataId).values as TypedArray; - const bVals = cpuBackend.data.get(b.dataId).values as TypedArray; - - const [resultData, resultShape] = - op(a.shape, b.shape, aVals, bVals, a.dtype); - - const dataId = cpuBackend.write(resultData, resultShape, a.dtype); - return {dataId, shape: resultShape, dtype: a.dtype}; - } - }); - -export const createBinaryKernel = (op: (a: number, b: number) => number) => - (aShape: number[], bShape: number[], aVals: TypedArray, bVals: TypedArray, - dtype: DataType): [TypedArray, number[]] => { - const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape); - - const resultRank = newShape.length; - const resultStrides = util.computeStrides(newShape); - const resultSize = util.sizeFromShape(newShape); - - const result = - util.getTypedArrayFromDType(dtype as NumericDataType, resultSize); - - const aRank = aShape.length; - const bRank = bShape.length; - - const aStrides = util.computeStrides(aShape); - const bStrides = util.computeStrides(bShape); - - const aBroadcastDims = backend_util.getBroadcastDims(aShape, newShape); - const bBroadcastDims = backend_util.getBroadcastDims(bShape, newShape); - - if (aBroadcastDims.length + bBroadcastDims.length === 0) { - for (let i = 0; i < result.length; ++i) { - result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]); - } - } else { - for (let i = 0; i < result.length; ++i) { - const loc = util.indexToLoc(i, resultRank, resultStrides); - - const aLoc = loc.slice(-aRank); - aBroadcastDims.forEach(d => aLoc[d] = 0); - const aIndex = util.locToIndex(aLoc, aRank, aStrides); - - const bLoc = loc.slice(-bRank); - bBroadcastDims.forEach(d => bLoc[d] = 0); - const bIndex = util.locToIndex(bLoc, bRank, bStrides); - - result[i] = op(aVals[aIndex], bVals[bIndex]); - } +export function createBinaryKernelConfig( + name: string, + op: ( + aShape: number[], bShape: number[], aVals: TypedArray, + bVals: TypedArray, + dtype: DataType) => [TypedArray, number[]]): KernelConfig { + return { + kernelName: name, + backendName: 'cpu', + kernelFunc: ({inputs, backend}) => { + const {a, b} = inputs as BinaryInputs; + const cpuBackend = backend as MathBackendCPU; + assertNotComplex([a, b], name); + + const aVals = cpuBackend.data.get(a.dataId).values as TypedArray; + const bVals = cpuBackend.data.get(b.dataId).values as TypedArray; + + const [resultData, resultShape] = + op(a.shape, b.shape, aVals, bVals, a.dtype); + + const dataId = cpuBackend.write(resultData, resultShape, a.dtype); + return {dataId, shape: resultShape, dtype: a.dtype}; + } + }; +} + +export function createBinaryKernel(op: (a: number, b: number) => number) { + return (aShape: number[], bShape: number[], aVals: TypedArray, + bVals: TypedArray, dtype: DataType): [TypedArray, number[]] => { + const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape); + + const resultRank = newShape.length; + const resultStrides = util.computeStrides(newShape); + const resultSize = util.sizeFromShape(newShape); + + const result = + util.getTypedArrayFromDType(dtype as NumericDataType, resultSize); + + const aRank = aShape.length; + const bRank = bShape.length; + + const aStrides = util.computeStrides(aShape); + const bStrides = util.computeStrides(bShape); + + const aBroadcastDims = backend_util.getBroadcastDims(aShape, newShape); + const bBroadcastDims = backend_util.getBroadcastDims(bShape, newShape); + + if (aBroadcastDims.length + bBroadcastDims.length === 0) { + for (let i = 0; i < result.length; ++i) { + result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]); } + } else { + for (let i = 0; i < result.length; ++i) { + const loc = util.indexToLoc(i, resultRank, resultStrides); - return [result, newShape]; - }; + const aLoc = loc.slice(-aRank); + aBroadcastDims.forEach(d => aLoc[d] = 0); + const aIndex = util.locToIndex(aLoc, aRank, aStrides); + + const bLoc = loc.slice(-bRank); + bBroadcastDims.forEach(d => bLoc[d] = 0); + const bIndex = util.locToIndex(bLoc, bRank, bStrides); + + result[i] = op(aVals[aIndex], bVals[bIndex]); + } + } + + return [result, newShape]; + }; +} From 66ac6568a35940bb8984d1d254960598fbafabfd Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 13:15:21 -0400 Subject: [PATCH 11/24] pr comments --- tfjs-core/src/backends/cpu/backend_cpu.ts | 2 ++ tfjs-core/src/backends/cpu/kernels/Div.ts | 4 ++-- tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts | 4 ++-- tfjs-core/src/backends/cpu/utils/kernel_utils.ts | 2 +- tfjs-core/src/backends/webgl/backend_webgl.ts | 2 ++ 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tfjs-core/src/backends/cpu/backend_cpu.ts b/tfjs-core/src/backends/cpu/backend_cpu.ts index 9d9e9eca22d..6880be3269b 100644 --- a/tfjs-core/src/backends/cpu/backend_cpu.ts +++ b/tfjs-core/src/backends/cpu/backend_cpu.ts @@ -386,6 +386,8 @@ export class MathBackendCPU extends KernelBackend { const b = this.exp(a); const sumExp = this.sum(b, axes).reshape(expandedShape); + // TODO(annxingyuan): Call divImpl rather than op as part of softmax kernel + // modularization. return div(b, sumExp); } diff --git a/tfjs-core/src/backends/cpu/kernels/Div.ts b/tfjs-core/src/backends/cpu/kernels/Div.ts index 76300022d25..e5ccc990cee 100644 --- a/tfjs-core/src/backends/cpu/kernels/Div.ts +++ b/tfjs-core/src/backends/cpu/kernels/Div.ts @@ -17,7 +17,7 @@ import {Div} from '../../../kernel_names'; import {createBinaryKernelConfig} from '../utils/kernel_utils'; -import {createBinaryKernel} from '../utils/kernel_utils'; +import {createBinaryKernelImpl} from '../utils/kernel_utils'; -export const div = createBinaryKernel((a: number, b: number) => a / b); +export const div = createBinaryKernelImpl((a: number, b: number) => a / b); export const divConfig = createBinaryKernelConfig(Div, div); diff --git a/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts b/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts index 5fdb7346e74..f7ad0205821 100644 --- a/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts +++ b/tfjs-core/src/backends/cpu/kernels/SquaredDifference.ts @@ -16,10 +16,10 @@ */ import {SquaredDifference} from '../../../kernel_names'; -import {createBinaryKernel} from '../utils/kernel_utils'; +import {createBinaryKernelImpl} from '../utils/kernel_utils'; import {createBinaryKernelConfig} from '../utils/kernel_utils'; -const squaredDifferenceImpl = createBinaryKernel((aVal, bVal) => { +const squaredDifferenceImpl = createBinaryKernelImpl((aVal, bVal) => { const diff = aVal - bVal; return diff * diff; }); diff --git a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts index 93de9353c13..68279392173 100644 --- a/tfjs-core/src/backends/cpu/utils/kernel_utils.ts +++ b/tfjs-core/src/backends/cpu/utils/kernel_utils.ts @@ -49,7 +49,7 @@ export function createBinaryKernelConfig( }; } -export function createBinaryKernel(op: (a: number, b: number) => number) { +export function createBinaryKernelImpl(op: (a: number, b: number) => number) { return (aShape: number[], bShape: number[], aVals: TypedArray, bVals: TypedArray, dtype: DataType): [TypedArray, number[]] => { const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape); diff --git a/tfjs-core/src/backends/webgl/backend_webgl.ts b/tfjs-core/src/backends/webgl/backend_webgl.ts index 2dc62e46f58..aa86014a98e 100644 --- a/tfjs-core/src/backends/webgl/backend_webgl.ts +++ b/tfjs-core/src/backends/webgl/backend_webgl.ts @@ -1586,6 +1586,8 @@ export class MathBackendWebGL extends KernelBackend { const b = this.exp(a); const sumExp = this.sum(b, axes).reshape(expandedShape); + // TODO(annxingyuan): Call divImpl rather than op as part of softmax kernel + // modularization. return div(b, sumExp); } From 12c457e48df72ebefd1a60ed8922c5fe67a22245 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 17:24:45 -0400 Subject: [PATCH 12/24] remove out tensor --- tfjs-core/src/backends/webgl/kernels/Div.ts | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts index 6aae529eded..373bb083f6d 100644 --- a/tfjs-core/src/backends/webgl/kernels/Div.ts +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -18,7 +18,6 @@ import {env} from '../../../environment'; import {Div, DivInputs} from '../../../kernel_names'; import {KernelConfig, TensorInfo} from '../../../kernel_registry'; -import {assertAndGetBroadcastShape} from '../../backend_util'; import {MathBackendWebGL} from '../backend_webgl'; import * as binaryop_gpu from '../binaryop_gpu'; import {BinaryOpProgram} from '../binaryop_gpu'; @@ -26,14 +25,13 @@ import * as binaryop_packed_gpu from '../binaryop_packed_gpu'; import {BinaryOpPackedProgram} from '../binaryop_packed_gpu'; export const divImpl = - (a: TensorInfo, b: TensorInfo, out: TensorInfo, - backend: MathBackendWebGL): TensorInfo => { + (a: TensorInfo, b: TensorInfo, backend: MathBackendWebGL): TensorInfo => { let program = new BinaryOpProgram(binaryop_gpu.DIV, a.shape, b.shape); if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { program = new BinaryOpPackedProgram( binaryop_packed_gpu.DIV, a.shape, b.shape, true); } - const output = backend.runWebGLProgram(program, [a, b], 'float32', out); + const output = backend.runWebGLProgram(program, [a, b], 'float32'); return output; }; @@ -45,10 +43,7 @@ export const divConfig: KernelConfig = { const webglBackend = backend as MathBackendWebGL; - const outShape = assertAndGetBroadcastShape(a.shape, b.shape); - const outTensorInfo = webglBackend.makeTensorInfo(outShape, a.dtype); - - const out = divImpl(a, b, outTensorInfo, webglBackend); + const out = divImpl(a, b, webglBackend); return {dataId: out.dataId, shape: out.shape, dtype: out.dtype}; } From 0720c8220e147f0cf306e2f1ef2469756cc0ceed Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 17:26:23 -0400 Subject: [PATCH 13/24] simplify --- tfjs-core/src/backends/webgl/backend_webgl.ts | 21 +++++++------------ 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/tfjs-core/src/backends/webgl/backend_webgl.ts b/tfjs-core/src/backends/webgl/backend_webgl.ts index aa86014a98e..64d29147b21 100644 --- a/tfjs-core/src/backends/webgl/backend_webgl.ts +++ b/tfjs-core/src/backends/webgl/backend_webgl.ts @@ -2431,8 +2431,8 @@ export class MathBackendWebGL extends KernelBackend { const program = new PackProgram(input.shape); const preventEagerUnpackingOutput = true; return this.runWebGLProgram( - program, [input], input.dtype, null /* out info */, - null /* customSetup */, preventEagerUnpackingOutput); + program, [input], input.dtype, null /* customSetup */, + preventEagerUnpackingOutput); } private packedReshape(input: TensorInfo, afterShape: number[]): TensorInfo { @@ -2452,8 +2452,8 @@ export class MathBackendWebGL extends KernelBackend { const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape); const preventEagerUnpackingOfOutput = true; const output = this.runWebGLProgram( - program, [input3D], input.dtype, null /* out info */, - null /* customSetup */, preventEagerUnpackingOfOutput); + program, [input3D], input.dtype, null /* customSetup */, + preventEagerUnpackingOfOutput); return {dataId: output.dataId, shape: afterShape, dtype: output.dtype}; } @@ -2471,19 +2471,15 @@ export class MathBackendWebGL extends KernelBackend { const preventEagerUnpackingOfOutput = true; const out = this.runWebGLProgram( program, [{shape: shapeAs3D, dtype, dataId}], dtype, - null /* out info */, null /* customSetup */, - preventEagerUnpackingOfOutput); + null /* customSetup */, preventEagerUnpackingOfOutput); return {dtype, shape, dataId: out.dataId}; } runWebGLProgram( program: GPGPUProgram, inputs: TensorInfo[], outputDtype: DataType, - output?: TensorInfo, customSetup?: (gpgpu: GPGPUContext, webGLProgram: WebGLProgram) => void, preventEagerUnpackingOfOutput = false): TensorInfo { - if (output == null) { - output = this.makeTensorInfo(program.outputShape, outputDtype); - } + const output = this.makeTensorInfo(program.outputShape, outputDtype); const outData = this.texData.get(output.dataId); if (program.packedOutput) { outData.isPacked = true; @@ -2610,7 +2606,7 @@ export class MathBackendWebGL extends KernelBackend { preventEagerUnpackingOfOutput = false): K { outputDtype = outputDtype || inputs[0].dtype; const outInfo = this.runWebGLProgram( - program, inputs, outputDtype, null /* out info */, customSetup, + program, inputs, outputDtype, customSetup, preventEagerUnpackingOfOutput); return ENGINE.makeTensorFromDataId( outInfo.dataId, outInfo.shape, outInfo.dtype) as {} as K; @@ -2736,8 +2732,7 @@ export class MathBackendWebGL extends KernelBackend { // WEBGL_PACK. const preventEagerUnpacking = true; const encodedOutputTarget = this.runWebGLProgram( - program, [tempDenseInputHandle], dtype, null /* out info */, - null /* custom setup */, preventEagerUnpacking); + program, [tempDenseInputHandle], dtype, null, preventEagerUnpacking); // Have the original texture assume the identity of the encoded output. const outputTexData = this.texData.get(encodedOutputTarget.dataId); From 32c89e07f8c79bff138cffa9c5a6531bd49c4cf3 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 17:38:34 -0400 Subject: [PATCH 14/24] pr comments --- tfjs-core/src/backends/webgl/kernels/Div.ts | 20 ++++++++++---------- tfjs-core/src/gradients/Div_grad.ts | 21 +++++++++++---------- tfjs-core/src/tensor.ts | 10 ---------- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts index 373bb083f6d..05ce2c2717b 100644 --- a/tfjs-core/src/backends/webgl/kernels/Div.ts +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -24,16 +24,16 @@ import {BinaryOpProgram} from '../binaryop_gpu'; import * as binaryop_packed_gpu from '../binaryop_packed_gpu'; import {BinaryOpPackedProgram} from '../binaryop_packed_gpu'; -export const divImpl = - (a: TensorInfo, b: TensorInfo, backend: MathBackendWebGL): TensorInfo => { - let program = new BinaryOpProgram(binaryop_gpu.DIV, a.shape, b.shape); - if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { - program = new BinaryOpPackedProgram( - binaryop_packed_gpu.DIV, a.shape, b.shape, true); - } - const output = backend.runWebGLProgram(program, [a, b], 'float32'); - return output; - }; +export function divImpl( + a: TensorInfo, b: TensorInfo, backend: MathBackendWebGL): TensorInfo { + let program = new BinaryOpProgram(binaryop_gpu.DIV, a.shape, b.shape); + if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { + program = new BinaryOpPackedProgram( + binaryop_packed_gpu.DIV, a.shape, b.shape, true); + } + const output = backend.runWebGLProgram(program, [a, b], 'float32'); + return output; +} export const divConfig: KernelConfig = { kernelName: Div, diff --git a/tfjs-core/src/gradients/Div_grad.ts b/tfjs-core/src/gradients/Div_grad.ts index c2a3b39da7d..8b4fc15d985 100644 --- a/tfjs-core/src/gradients/Div_grad.ts +++ b/tfjs-core/src/gradients/Div_grad.ts @@ -18,31 +18,32 @@ import {Div} from '../kernel_names'; import {GradConfig} from '../kernel_registry'; import * as broadcast_util from '../ops/broadcast_util'; +import {div} from '../ops/div'; import {Tensor} from '../tensor'; export const divGradConfig: GradConfig = { kernelName: Div, inputsToSave: ['a', 'b'], gradFunc: (dy: Tensor, saved: Tensor[]) => { - const [$a, $b] = saved; + const [a, b] = saved; const outShape = - broadcast_util.assertAndGetBroadcastShape($a.shape, $b.shape); + broadcast_util.assertAndGetBroadcastShape(a.shape, b.shape); const derA = () => { - const res = dy.div($b.toFloat()); - const reduceAxes = broadcast_util.getReductionAxes($a.shape, outShape); + const res = div(dy, b.toFloat()); + const reduceAxes = broadcast_util.getReductionAxes(a.shape, outShape); if (reduceAxes.length > 0) { - return res.sum(reduceAxes).reshape($a.shape); + return res.sum(reduceAxes).reshape(a.shape); } return res; }; const derB = () => { - let res = dy.mul($a.toFloat()); - const reduceAxes = broadcast_util.getReductionAxes($b.shape, outShape); + let res = dy.mul(a.toFloat()); + const reduceAxes = broadcast_util.getReductionAxes(b.shape, outShape); if (reduceAxes.length > 0) { - res = res.sum(reduceAxes).reshape($b.shape); + res = res.sum(reduceAxes).reshape(b.shape); } - const tmp = $b.square(); - return res.div(tmp.toFloat()).neg(); + const tmp = b.square(); + return div(res, tmp.toFloat()).neg(); }; return {a: derA, b: derB}; } diff --git a/tfjs-core/src/tensor.ts b/tfjs-core/src/tensor.ts index cee26c4cde3..b4606e44bcc 100644 --- a/tfjs-core/src/tensor.ts +++ b/tfjs-core/src/tensor.ts @@ -229,8 +229,6 @@ export interface OpHandler { powStrict(base: T, exp: Tensor|TensorLike): T; mul(a: Tensor, b: Tensor|TensorLike): T; mulStrict(a: T, b: T|TensorLike): T; - div(a: Tensor, b: Tensor|TensorLike): T; - divNoNan(a: Tensor, b: Tensor|TensorLike): T; floorDiv(a: Tensor, b: Tensor|TensorLike): T; divStrict(a: T, b: T|TensorLike): T; mod(a: Tensor, b: Tensor|TensorLike): T; @@ -955,14 +953,6 @@ export class Tensor { this.throwIfDisposed(); return opHandler.mulStrict(this, x); } - div(x: Tensor|TensorLike): T { - this.throwIfDisposed(); - return opHandler.div(this, x); - } - divNoNan(x: Tensor|TensorLike): T { - this.throwIfDisposed(); - return opHandler.divNoNan(this, x); - } floorDiv(x: Tensor|TensorLike): T { this.throwIfDisposed(); return opHandler.floorDiv(this, x); From 83d9c7f078448cafc4350b75146e994ffc64c48c Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 17:39:48 -0400 Subject: [PATCH 15/24] clean --- tfjs-core/src/ops/div.ts | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tfjs-core/src/ops/div.ts b/tfjs-core/src/ops/div.ts index cd48cbeaab8..e08fb9a587f 100644 --- a/tfjs-core/src/ops/div.ts +++ b/tfjs-core/src/ops/div.ts @@ -95,12 +95,9 @@ function div_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { const inputs: DivInputs = {a: $a, b: $b}; const attrs = {}; - const inputsToSave = [$a, $b]; - const outputsToSave: boolean[] = []; return ENGINE.runKernelFunc( - forward, inputs as {} as NamedTensorMap, der, Div, attrs, - inputsToSave, outputsToSave) as T; + forward, inputs as {} as NamedTensorMap, der, Div, attrs) as T; } export const div = op({div_}); From 15e6c7252a621f5a9744af74ef26de14d43feaec Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Mon, 23 Mar 2020 17:55:04 -0400 Subject: [PATCH 16/24] lint --- tfjs-core/src/ops/div.ts | 1 - tfjs-core/src/ops/squared_difference.ts | 1 - 2 files changed, 2 deletions(-) diff --git a/tfjs-core/src/ops/div.ts b/tfjs-core/src/ops/div.ts index e08fb9a587f..194202f343a 100644 --- a/tfjs-core/src/ops/div.ts +++ b/tfjs-core/src/ops/div.ts @@ -27,7 +27,6 @@ import {floorDiv} from './binary_ops'; import * as broadcast_util from './broadcast_util'; import {op} from './operation'; - /** * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. * diff --git a/tfjs-core/src/ops/squared_difference.ts b/tfjs-core/src/ops/squared_difference.ts index c8fc4ada6e6..0653191326c 100644 --- a/tfjs-core/src/ops/squared_difference.ts +++ b/tfjs-core/src/ops/squared_difference.ts @@ -27,7 +27,6 @@ import {assertAndGetBroadcastShape} from './broadcast_util'; import {op} from './operation'; import {scalar} from './tensor_ops'; - /** * Returns (a - b) * (a - b) element-wise. * Supports broadcasting. From 6385e9ec8eaa2fde2f6d5f180fab45ebe9f42018 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Tue, 24 Mar 2020 07:00:44 -0400 Subject: [PATCH 17/24] save From 4af9bbc9bf330278bd8497279a61df733923d96d Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Wed, 25 Mar 2020 10:31:49 -0400 Subject: [PATCH 18/24] separate out --- tfjs-core/src/backends/cpu/kernels/Div.ts | 5 ++- .../src/backends/cpu/kernels/Div_impl.ts | 20 +++++++++++ tfjs-core/src/backends/webgl/kernels/Div.ts | 19 ++-------- .../src/backends/webgl/kernels/Div_impl.ts | 35 +++++++++++++++++++ 4 files changed, 59 insertions(+), 20 deletions(-) create mode 100644 tfjs-core/src/backends/cpu/kernels/Div_impl.ts create mode 100644 tfjs-core/src/backends/webgl/kernels/Div_impl.ts diff --git a/tfjs-core/src/backends/cpu/kernels/Div.ts b/tfjs-core/src/backends/cpu/kernels/Div.ts index e5ccc990cee..d248a487131 100644 --- a/tfjs-core/src/backends/cpu/kernels/Div.ts +++ b/tfjs-core/src/backends/cpu/kernels/Div.ts @@ -17,7 +17,6 @@ import {Div} from '../../../kernel_names'; import {createBinaryKernelConfig} from '../utils/kernel_utils'; -import {createBinaryKernelImpl} from '../utils/kernel_utils'; +import {divImpl} from './Div_impl'; -export const div = createBinaryKernelImpl((a: number, b: number) => a / b); -export const divConfig = createBinaryKernelConfig(Div, div); +export const divConfig = createBinaryKernelConfig(Div, divImpl); diff --git a/tfjs-core/src/backends/cpu/kernels/Div_impl.ts b/tfjs-core/src/backends/cpu/kernels/Div_impl.ts new file mode 100644 index 00000000000..2b4c060d8ff --- /dev/null +++ b/tfjs-core/src/backends/cpu/kernels/Div_impl.ts @@ -0,0 +1,20 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {createBinaryKernelImpl} from '../utils/kernel_utils'; + +export const divImpl = createBinaryKernelImpl((a: number, b: number) => a / b); diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts index 05ce2c2717b..40f1193bff6 100644 --- a/tfjs-core/src/backends/webgl/kernels/Div.ts +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -15,25 +15,10 @@ * ============================================================================= */ -import {env} from '../../../environment'; import {Div, DivInputs} from '../../../kernel_names'; -import {KernelConfig, TensorInfo} from '../../../kernel_registry'; +import {KernelConfig} from '../../../kernel_registry'; import {MathBackendWebGL} from '../backend_webgl'; -import * as binaryop_gpu from '../binaryop_gpu'; -import {BinaryOpProgram} from '../binaryop_gpu'; -import * as binaryop_packed_gpu from '../binaryop_packed_gpu'; -import {BinaryOpPackedProgram} from '../binaryop_packed_gpu'; - -export function divImpl( - a: TensorInfo, b: TensorInfo, backend: MathBackendWebGL): TensorInfo { - let program = new BinaryOpProgram(binaryop_gpu.DIV, a.shape, b.shape); - if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { - program = new BinaryOpPackedProgram( - binaryop_packed_gpu.DIV, a.shape, b.shape, true); - } - const output = backend.runWebGLProgram(program, [a, b], 'float32'); - return output; -} +import {divImpl} from './Div_impl'; export const divConfig: KernelConfig = { kernelName: Div, diff --git a/tfjs-core/src/backends/webgl/kernels/Div_impl.ts b/tfjs-core/src/backends/webgl/kernels/Div_impl.ts new file mode 100644 index 00000000000..01a2c145164 --- /dev/null +++ b/tfjs-core/src/backends/webgl/kernels/Div_impl.ts @@ -0,0 +1,35 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {env} from '../../../environment'; +import {TensorInfo} from '../../../kernel_registry'; +import {MathBackendWebGL} from '../backend_webgl'; +import * as binaryop_gpu from '../binaryop_gpu'; +import {BinaryOpProgram} from '../binaryop_gpu'; +import * as binaryop_packed_gpu from '../binaryop_packed_gpu'; +import {BinaryOpPackedProgram} from '../binaryop_packed_gpu'; + +export function divImpl( + a: TensorInfo, b: TensorInfo, backend: MathBackendWebGL): TensorInfo { + let program = new BinaryOpProgram(binaryop_gpu.DIV, a.shape, b.shape); + if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { + program = new BinaryOpPackedProgram( + binaryop_packed_gpu.DIV, a.shape, b.shape, true); + } + const output = backend.runWebGLProgram(program, [a, b], 'float32'); + return output; +} From 3076d8a76d39bb1e83528dcc44c4b1d6a192fdf2 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Wed, 25 Mar 2020 10:32:32 -0400 Subject: [PATCH 19/24] modify --- tfjs-core/src/backends/webgl/kernels/Div.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tfjs-core/src/backends/webgl/kernels/Div.ts b/tfjs-core/src/backends/webgl/kernels/Div.ts index 40f1193bff6..0c88c4b9738 100644 --- a/tfjs-core/src/backends/webgl/kernels/Div.ts +++ b/tfjs-core/src/backends/webgl/kernels/Div.ts @@ -28,8 +28,6 @@ export const divConfig: KernelConfig = { const webglBackend = backend as MathBackendWebGL; - const out = divImpl(a, b, webglBackend); - - return {dataId: out.dataId, shape: out.shape, dtype: out.dtype}; + return divImpl(a, b, webglBackend); } }; From 3cb66118ab435d61e9e376b3c2e84b11db5474de Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Wed, 25 Mar 2020 10:35:02 -0400 Subject: [PATCH 20/24] unchain --- tfjs-core/src/gradients/Div_grad.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tfjs-core/src/gradients/Div_grad.ts b/tfjs-core/src/gradients/Div_grad.ts index 8b4fc15d985..578f48dbdd0 100644 --- a/tfjs-core/src/gradients/Div_grad.ts +++ b/tfjs-core/src/gradients/Div_grad.ts @@ -19,6 +19,9 @@ import {Div} from '../kernel_names'; import {GradConfig} from '../kernel_registry'; import * as broadcast_util from '../ops/broadcast_util'; import {div} from '../ops/div'; +import {sum} from '../ops/reduction_ops'; +import {square} from '../ops/square'; +import {neg} from '../ops/unary_ops'; import {Tensor} from '../tensor'; export const divGradConfig: GradConfig = { @@ -32,7 +35,7 @@ export const divGradConfig: GradConfig = { const res = div(dy, b.toFloat()); const reduceAxes = broadcast_util.getReductionAxes(a.shape, outShape); if (reduceAxes.length > 0) { - return res.sum(reduceAxes).reshape(a.shape); + return sum(res, reduceAxes).reshape(a.shape); } return res; }; @@ -40,10 +43,10 @@ export const divGradConfig: GradConfig = { let res = dy.mul(a.toFloat()); const reduceAxes = broadcast_util.getReductionAxes(b.shape, outShape); if (reduceAxes.length > 0) { - res = res.sum(reduceAxes).reshape(b.shape); + res = sum(res, reduceAxes).reshape(b.shape); } - const tmp = b.square(); - return div(res, tmp.toFloat()).neg(); + const tmp = square(b); + return neg(div(res, tmp.toFloat())); }; return {a: derA, b: derB}; } From 78ed40f22a03d9f3079fc7b5a12c83dbbe669a42 Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Wed, 25 Mar 2020 10:36:41 -0400 Subject: [PATCH 21/24] remove der --- tfjs-core/src/ops/div.ts | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/tfjs-core/src/ops/div.ts b/tfjs-core/src/ops/div.ts index 194202f343a..54364f18f05 100644 --- a/tfjs-core/src/ops/div.ts +++ b/tfjs-core/src/ops/div.ts @@ -24,7 +24,6 @@ import {convertToTensor} from '../tensor_util_env'; import {TensorLike} from '../types'; import {floorDiv} from './binary_ops'; -import * as broadcast_util from './broadcast_util'; import {op} from './operation'; /** @@ -62,30 +61,6 @@ function div_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { return floorDiv($a, $b); } - const outShape = - broadcast_util.assertAndGetBroadcastShape($a.shape, $b.shape); - const der = (dy: Tensor, saved: Tensor[]) => { - const [$a, $b] = saved; - const derA = () => { - const res = dy.div($b.toFloat()); - const reduceAxes = broadcast_util.getReductionAxes($a.shape, outShape); - if (reduceAxes.length > 0) { - return res.sum(reduceAxes).reshape($a.shape); - } - return res; - }; - const derB = () => { - let res = dy.mul($a.toFloat()); - const reduceAxes = broadcast_util.getReductionAxes($b.shape, outShape); - if (reduceAxes.length > 0) { - res = res.sum(reduceAxes).reshape($b.shape); - } - const tmp = $b.square(); - return res.div(tmp.toFloat()).neg(); - }; - return {a: derA, b: derB}; - }; - const forward: ForwardFunc = (backend, save) => { const res = backend.realDivide($a, $b); save([$a, $b]); @@ -96,7 +71,8 @@ function div_(a: Tensor|TensorLike, b: Tensor|TensorLike): T { const attrs = {}; return ENGINE.runKernelFunc( - forward, inputs as {} as NamedTensorMap, der, Div, attrs) as T; + forward, inputs as {} as NamedTensorMap, null /* gradient */, Div, + attrs) as T; } export const div = op({div_}); From cd3773243cfefcc2c43d953c357de049832efeba Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Wed, 25 Mar 2020 10:44:02 -0400 Subject: [PATCH 22/24] divnonan --- tfjs-core/src/ops/{divNoNan.ts => div_no_nan.ts} | 1 + tfjs-core/src/ops/ops.ts | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) rename tfjs-core/src/ops/{divNoNan.ts => div_no_nan.ts} (98%) diff --git a/tfjs-core/src/ops/divNoNan.ts b/tfjs-core/src/ops/div_no_nan.ts similarity index 98% rename from tfjs-core/src/ops/divNoNan.ts rename to tfjs-core/src/ops/div_no_nan.ts index d329c28ec48..b9737316cad 100644 --- a/tfjs-core/src/ops/divNoNan.ts +++ b/tfjs-core/src/ops/div_no_nan.ts @@ -58,6 +58,7 @@ import {zerosLike} from './tensor_ops'; /** @doc {heading: 'Operations', subheading: 'Arithmetic'} */ function divNoNan_( a: Tensor|TensorLike, b: Tensor|TensorLike): T { + // TODO: Make this into its own kernel. let $a = convertToTensor(a, 'a', 'div'); let $b = convertToTensor(b, 'b', 'div'); [$a, $b] = makeTypesMatch($a, $b); diff --git a/tfjs-core/src/ops/ops.ts b/tfjs-core/src/ops/ops.ts index 6a99144c066..7f4268fdee2 100644 --- a/tfjs-core/src/ops/ops.ts +++ b/tfjs-core/src/ops/ops.ts @@ -19,7 +19,7 @@ export {broadcastTo} from './broadcast_to'; export {clone} from './clone'; export {div} from './div'; -export {divNoNan} from './divNoNan'; +export {divNoNan} from './div_no_nan'; export {eye} from './eye'; export {multinomial} from './multinomial'; export {oneHot} from './one_hot'; From 24e6ecf0e74cd2bdac0c2eea04831acef43c4cce Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Wed, 25 Mar 2020 10:47:26 -0400 Subject: [PATCH 23/24] save From 15791ba3d0e147645d05635ad42bea6193b274ac Mon Sep 17 00:00:00 2001 From: Ann Yuan Date: Wed, 25 Mar 2020 14:30:36 -0400 Subject: [PATCH 24/24] rename --- tfjs-core/src/public/chained_ops/{divNoNan.ts => div_no_nan.ts} | 2 +- tfjs-core/src/public/chained_ops/register_all_chained_ops.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename tfjs-core/src/public/chained_ops/{divNoNan.ts => div_no_nan.ts} (95%) diff --git a/tfjs-core/src/public/chained_ops/divNoNan.ts b/tfjs-core/src/public/chained_ops/div_no_nan.ts similarity index 95% rename from tfjs-core/src/public/chained_ops/divNoNan.ts rename to tfjs-core/src/public/chained_ops/div_no_nan.ts index b1fce46b8e0..ad09451be0f 100644 --- a/tfjs-core/src/public/chained_ops/divNoNan.ts +++ b/tfjs-core/src/public/chained_ops/div_no_nan.ts @@ -15,7 +15,7 @@ * ============================================================================= */ -import {divNoNan} from '../../ops/divNoNan'; +import {divNoNan} from '../../ops/div_no_nan'; import {Tensor} from '../../tensor'; import {Rank, TensorLike} from '../../types'; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts index 9ae096baa66..2c4114a7d1b 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts @@ -17,7 +17,7 @@ import './broadcast_to'; import './div'; -import './divNoNan'; +import './div_no_nan'; import './squared_difference'; import './tile'; import './one_hot';