diff --git a/tfjs-backend-cpu/src/kernels/Max.ts b/tfjs-backend-cpu/src/kernels/Max.ts index ec6eab28b19..870894a89d7 100644 --- a/tfjs-backend-cpu/src/kernels/Max.ts +++ b/tfjs-backend-cpu/src/kernels/Max.ts @@ -30,7 +30,7 @@ export const maxConfig: KernelConfig = { backendName: 'cpu', kernelFunc: ({inputs, attrs, backend}) => { const {x} = inputs as MaxInputs; - const {reductionIndices} = attrs as {} as MaxAttrs; + const {reductionIndices, keepDims} = attrs as {} as MaxAttrs; const cpuBackend = backend as MathBackendCPU; let xShape = x.shape; const xRank = xShape.length; @@ -60,6 +60,14 @@ export const maxConfig: KernelConfig = { const result = maxImpl(xVals, reduceSize, maxOutShape, x.dtype); const dataId = cpuBackend.write(result, maxOutShape, x.dtype); - return {dataId, shape: maxOutShape, dtype: x.dtype}; + + let outShape = maxOutShape; + if (keepDims) { + // reshape + const newShape = backend_util.expandShapeToKeepDim(maxOutShape, origAxes); + outShape = newShape; + } + + return {dataId, shape: outShape, dtype: x.dtype}; } }; diff --git a/tfjs-backend-wasm/src/kernels/ArgMax.ts b/tfjs-backend-wasm/src/kernels/ArgMax.ts index 5f360e34b7b..b2bf13991ff 100644 --- a/tfjs-backend-wasm/src/kernels/ArgMax.ts +++ b/tfjs-backend-wasm/src/kernels/ArgMax.ts @@ -15,20 +15,13 @@ * ============================================================================= */ -import {KernelFunc, registerKernel, TensorInfo, util} from '@tensorflow/tfjs-core'; +import {ArgMax, ArgMaxAttrs, ArgMaxInputs, KernelFunc, registerKernel, util} from '@tensorflow/tfjs-core'; import {BackendWasm} from '../backend_wasm'; +import {permuteAxesAndTranspose} from './kernel_utils'; import {CppDType} from './types'; -interface ArgMaxInputs { - x: TensorInfo; -} - -interface ArgMaxAttrs { - axis: number; -} - let wasmFunc: ( xId: number, dtype: number, outerSize: number, innerSize: number, outId: number) => void; @@ -45,19 +38,43 @@ function setup(backend: BackendWasm) { function argmax( args: {inputs: ArgMaxInputs, backend: BackendWasm, attrs: ArgMaxAttrs}) { - const {inputs: {x}, backend, attrs: {axis}} = args; - const outShape = x.shape.slice(0, -1); - const out = backend.makeOutput(outShape, 'int32'); + const {backend, inputs, attrs} = args; + const {axis} = attrs as {} as ArgMaxAttrs; + const {x} = inputs as {} as ArgMaxInputs; const xId = backend.dataIdMap.get(x.dataId).id; + let inputId = xId; + let input = x; + + const {transposed, axes, inputWasTransposed} = + permuteAxesAndTranspose(x, axis, backend); + + if (inputWasTransposed) { + const transposedId = backend.dataIdMap.get(transposed.dataId).id; + if (transposedId !== xId) { + // transpose was not a no-op. We will need to dispose of this + // once we are done. + input = transposed; + inputId = transposedId; + } + } + + const outShape = input.shape.slice(0, -1); + const out = backend.makeOutput(outShape, 'int32'); const outId = backend.dataIdMap.get(out.dataId).id; const outerSize = util.sizeFromShape(out.shape); - const innerSize = x.shape[axis]; - wasmFunc(xId, CppDType[x.dtype], outerSize, innerSize, outId); + const innerSize = input.shape[axes[0]]; + wasmFunc(inputId, CppDType[input.dtype], outerSize, innerSize, outId); + + if (inputWasTransposed) { + // dispose of the transposed tensor. + backend.disposeData(transposed.dataId); + } + return out; } registerKernel({ - kernelName: 'ArgMax', + kernelName: ArgMax, backendName: 'wasm', kernelFunc: argmax as {} as KernelFunc, setupFunc: setup diff --git a/tfjs-backend-wasm/src/kernels/Max.ts b/tfjs-backend-wasm/src/kernels/Max.ts index 7a32ca0b213..4c7ea86148b 100644 --- a/tfjs-backend-wasm/src/kernels/Max.ts +++ b/tfjs-backend-wasm/src/kernels/Max.ts @@ -15,12 +15,12 @@ * ============================================================================= */ -import {backend_util, registerKernel, TensorInfo, util} from '@tensorflow/tfjs-core'; +import {backend_util, KernelFunc, registerKernel, TensorInfo, util} from '@tensorflow/tfjs-core'; import {Max, MaxAttrs, MaxInputs} from '@tensorflow/tfjs-core'; import {BackendWasm} from '../backend_wasm'; -import {transpose} from './Transpose'; +import {permuteAxesAndTranspose} from './kernel_utils'; let wasmMax: (xId: number, reduceSize: number, outId: number) => void; @@ -29,56 +29,57 @@ function setup(backend: BackendWasm): void { backend.wasm.cwrap('Max', null /*void*/, ['number, number, number']); } -function max(args: {backend: BackendWasm, inputs: {}, attrs: {}}): TensorInfo { +function max(args: {backend: BackendWasm, inputs: MaxInputs, attrs: MaxAttrs}): + TensorInfo { const {backend, inputs, attrs} = args; - const {reductionIndices} = attrs as MaxAttrs; - const {x} = inputs as MaxInputs; + const {reductionIndices: axis, keepDims} = attrs; + const {x} = inputs; const xId = backend.dataIdMap.get(x.dataId).id; - - let xShape = x.shape; - const xRank = x.shape.length; - const xVals = backend.typedArrayFromHeap(x); - - const origAxes = util.parseAxisParam(reductionIndices, xShape); - let axes = origAxes; - const permutedAxes = backend_util.getAxesPermutation(axes, xRank); - const maxInputIsTransposed = permutedAxes != null; - if (maxInputIsTransposed) { - const newShape: number[] = new Array(xRank); - for (let i = 0; i < newShape.length; i++) { - newShape[i] = xShape[permutedAxes[i]]; - } - - axes = backend_util.getInnerMostAxes(axes.length, xRank); - - const xTransposed = - transpose({inputs: {x}, attrs: {perm: permutedAxes}, backend}); - - if (backend.dataIdMap.get(xTransposed.dataId).id !== xId) { - // If perm is not no-op. - const xTransposedVals = backend.typedArrayFromHeap(xTransposed); - xVals.set(xTransposedVals, 0); - backend.disposeData(xTransposed.dataId); + let inputId = xId; + let input = x; + + const {transposed, axes, originalAxes, inputWasTransposed} = + permuteAxesAndTranspose(x, axis, backend); + + if (inputWasTransposed) { + const transposedId = backend.dataIdMap.get(transposed.dataId).id; + if (transposedId !== xId) { + // transpose was not a no-op. We will need to dispose of this + // once we are done. + input = transposed; + inputId = transposedId; } - xShape = newShape; } - backend_util.assertAxesAreInnerMostDims('max', axes, xRank); + const inputRank = input.shape.length; + backend_util.assertAxesAreInnerMostDims('max', axes, inputRank); const [outShape, reduceShape] = - backend_util.computeOutAndReduceShapes(xShape, axes); + backend_util.computeOutAndReduceShapes(input.shape, axes); const reduceSize = util.sizeFromShape(reduceShape); const out = backend.makeOutput(outShape, x.dtype); - if (util.sizeFromShape(xShape) === 0) { - return out; + if (util.sizeFromShape(input.shape) !== 0) { + const outId = backend.dataIdMap.get(out.dataId).id; + wasmMax(inputId, reduceSize, outId); } - const outId = backend.dataIdMap.get(out.dataId).id; + if (inputWasTransposed) { + // dispose of the transposed tensor. + backend.disposeData(transposed.dataId); + } - wasmMax(xId, reduceSize, outId); + if (keepDims) { + // reshape + const newShape = backend_util.expandShapeToKeepDim(out.shape, originalAxes); + out.shape = newShape; + } return out; } -registerKernel( - {kernelName: Max, backendName: 'wasm', setupFunc: setup, kernelFunc: max}); +registerKernel({ + kernelName: Max, + backendName: 'wasm', + setupFunc: setup, + kernelFunc: max as {} as KernelFunc +}); diff --git a/tfjs-backend-wasm/src/kernels/Min.ts b/tfjs-backend-wasm/src/kernels/Min.ts index a696d21d773..291b9d54806 100644 --- a/tfjs-backend-wasm/src/kernels/Min.ts +++ b/tfjs-backend-wasm/src/kernels/Min.ts @@ -15,17 +15,11 @@ * ============================================================================= */ -import {backend_util, NamedAttrMap, NamedTensorInfoMap, registerKernel, TensorInfo, util} from '@tensorflow/tfjs-core'; +import {backend_util, KernelFunc, Min, MinAttrs, MinInputs, registerKernel, TensorInfo, util} from '@tensorflow/tfjs-core'; import {BackendWasm} from '../backend_wasm'; -interface MinInputs extends NamedTensorInfoMap { - x: TensorInfo; -} - -interface MinAttrs extends NamedAttrMap { - axes: number[]; -} +import {permuteAxesAndTranspose} from './kernel_utils'; let wasmMin: (xId: number, reduceSize: number, outId: number) => void; @@ -37,29 +31,55 @@ function setup(backend: BackendWasm): void { function min(args: {backend: BackendWasm, inputs: MinInputs, attrs: MinAttrs}): TensorInfo { const {backend, inputs, attrs} = args; - const {axes} = attrs; + const {axis, keepDims} = attrs; const {x} = inputs; const xId = backend.dataIdMap.get(x.dataId).id; + let inputId = xId; + let input = x; + + const {transposed, axes, originalAxes, inputWasTransposed} = + permuteAxesAndTranspose(x, axis, backend); + + if (inputWasTransposed) { + const transposedId = backend.dataIdMap.get(transposed.dataId).id; + if (transposedId !== xId) { + // transpose was not a no-op. We will need to dispose of this + // once we are done. + input = transposed; + inputId = transposedId; + } + } + + const inputRank = input.shape.length; - backend_util.assertAxesAreInnerMostDims('min', axes, x.shape.length); + backend_util.assertAxesAreInnerMostDims('min', axes, inputRank); const [outShape, reduceShape] = - backend_util.computeOutAndReduceShapes(x.shape, axes); + backend_util.computeOutAndReduceShapes(input.shape, axes); const reduceSize = util.sizeFromShape(reduceShape); - const out = backend.makeOutput(outShape, x.dtype); - if (util.sizeFromShape(x.shape) === 0) { - return out; + const out = backend.makeOutput(outShape, input.dtype); + if (util.sizeFromShape(input.shape) !== 0) { + const outId = backend.dataIdMap.get(out.dataId).id; + wasmMin(inputId, reduceSize, outId); } - const outId = backend.dataIdMap.get(out.dataId).id; + if (inputWasTransposed) { + // dispose of the transposed tensor. + backend.disposeData(transposed.dataId); + } + + if (keepDims) { + // reshape + const newShape = backend_util.expandShapeToKeepDim(out.shape, originalAxes); + out.shape = newShape; + } - wasmMin(xId, reduceSize, outId); return out; } registerKernel({ - kernelName: 'Min', + kernelName: Min, backendName: 'wasm', setupFunc: setup, - kernelFunc: min + kernelFunc: min as {} as KernelFunc }); diff --git a/tfjs-backend-wasm/src/kernels/Sum.ts b/tfjs-backend-wasm/src/kernels/Sum.ts index 4a043c82504..17183be32fc 100644 --- a/tfjs-backend-wasm/src/kernels/Sum.ts +++ b/tfjs-backend-wasm/src/kernels/Sum.ts @@ -15,17 +15,11 @@ * ============================================================================= */ -import {backend_util, NamedAttrMap, NamedTensorInfoMap, registerKernel, TensorInfo, util} from '@tensorflow/tfjs-core'; +import {backend_util, KernelFunc, registerKernel, Sum, SumAttrs, SumInputs, TensorInfo, util} from '@tensorflow/tfjs-core'; import {BackendWasm} from '../backend_wasm'; -interface SumInputs extends NamedTensorInfoMap { - x: TensorInfo; -} - -interface SumAttrs extends NamedAttrMap { - axes: number[]; -} +import {permuteAxesAndTranspose} from './kernel_utils'; let wasmSum: (xId: number, reduceSize: number, outId: number) => void; @@ -37,29 +31,57 @@ function setup(backend: BackendWasm): void { function sum(args: {backend: BackendWasm, inputs: SumInputs, attrs: SumAttrs}): TensorInfo { const {backend, inputs, attrs} = args; - const {axes} = attrs; + const {axis, keepDims} = attrs; const {x} = inputs; const xId = backend.dataIdMap.get(x.dataId).id; + let inputId = xId; + let input = x; - backend_util.assertAxesAreInnerMostDims('sum', axes, x.shape.length); + const {transposed, axes, originalAxes, inputWasTransposed} = + permuteAxesAndTranspose(x, axis, backend); + + let reductionAxes = axes; + if (inputWasTransposed) { + const transposedId = backend.dataIdMap.get(transposed.dataId).id; + if (transposedId !== xId) { + // transpose was not a no-op. We will need to dispose of this + // once we are done. + input = transposed; + inputId = transposedId; + reductionAxes = backend_util.getInnerMostAxes( + reductionAxes.length, input.shape.length); + } + } + + backend_util.assertAxesAreInnerMostDims( + 'sum', reductionAxes, input.shape.length); const [outShape, reduceShape] = - backend_util.computeOutAndReduceShapes(x.shape, axes); + backend_util.computeOutAndReduceShapes(input.shape, reductionAxes); const reduceSize = util.sizeFromShape(reduceShape); - const out = backend.makeOutput(outShape, x.dtype); - if (util.sizeFromShape(x.shape) === 0) { - return out; + const out = backend.makeOutput(outShape, input.dtype); + if (util.sizeFromShape(input.shape) !== 0) { + const outId = backend.dataIdMap.get(out.dataId).id; + wasmSum(inputId, reduceSize, outId); + } + + if (inputWasTransposed) { + // dispose of the transposed tensor. + backend.disposeData(transposed.dataId); } - const outId = backend.dataIdMap.get(out.dataId).id; + if (keepDims) { + // reshape + const newShape = backend_util.expandShapeToKeepDim(out.shape, originalAxes); + out.shape = newShape; + } - wasmSum(xId, reduceSize, outId); return out; } registerKernel({ - kernelName: 'Sum', + kernelName: Sum, backendName: 'wasm', setupFunc: setup, - kernelFunc: sum + kernelFunc: sum as {} as KernelFunc }); diff --git a/tfjs-backend-wasm/src/kernels/kernel_utils.ts b/tfjs-backend-wasm/src/kernels/kernel_utils.ts new file mode 100644 index 00000000000..87b54d21eff --- /dev/null +++ b/tfjs-backend-wasm/src/kernels/kernel_utils.ts @@ -0,0 +1,63 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {backend_util, TensorInfo, util} from '@tensorflow/tfjs-core'; +import {BackendWasm} from '../backend_wasm'; +import {transpose} from './Transpose'; + +/** + * Compute permutation axes and do a transpose if necessary. + * + * Used by reduction ops. + * @param x input TensorInfo + * @param axis reduction axes + * @param backend wasm backend instance + */ +export function permuteAxesAndTranspose( + x: TensorInfo, axis: number|number[], backend: BackendWasm): { + transposed: TensorInfo|null, + axes: number[], + originalAxes: number[], + inputWasTransposed: boolean +} { + const xShape = x.shape; + const xRank = x.shape.length; + + const originalAxes = util.parseAxisParam(axis, xShape); + let axes = originalAxes; + const permutedAxes = backend_util.getAxesPermutation(axes, xRank); + let xTransposed = null; + let inputWasTransposed = false; + if (permutedAxes != null) { + const newShape: number[] = new Array(xRank); + for (let i = 0; i < newShape.length; i++) { + newShape[i] = xShape[permutedAxes[i]]; + } + + axes = backend_util.getInnerMostAxes(axes.length, xRank); + xTransposed = + transpose({inputs: {x}, attrs: {perm: permutedAxes}, backend}); + + const xId = backend.dataIdMap.get(x.dataId).id; + const transposedId = backend.dataIdMap.get(xTransposed.dataId).id; + if (transposedId !== xId) { + inputWasTransposed = true; + } + } + + return {transposed: xTransposed, originalAxes, axes, inputWasTransposed}; +} diff --git a/tfjs-backend-webgl/src/kernels/Max.ts b/tfjs-backend-webgl/src/kernels/Max.ts index 54b193c2ebc..cee66cfbe18 100644 --- a/tfjs-backend-webgl/src/kernels/Max.ts +++ b/tfjs-backend-webgl/src/kernels/Max.ts @@ -29,7 +29,7 @@ export const maxConfig: KernelConfig = { backendName: 'webgl', kernelFunc: ({inputs, attrs, backend}) => { const {x} = inputs as MaxInputs; - const {reductionIndices} = attrs as {} as MaxAttrs; + const {reductionIndices, keepDims} = attrs as {} as MaxAttrs; const webglBackend = backend as MathBackendWebGL; const xRank = x.shape.length; @@ -67,19 +67,25 @@ export const maxConfig: KernelConfig = { const [maxOutShape, reduceShape] = backend_util.computeOutAndReduceShapes(maxInput.shape, axes); + let outShape = maxOutShape; + if (keepDims) { + // rather than reshape at the end, set the target shape here. + outShape = backend_util.expandShapeToKeepDim(maxOutShape, origAxes); + } + let out; if (shouldExecuteOnCPU) { const xTexData = webglBackend.texData.get(maxInput.dataId); const values = xTexData.values as TypedArray; const outValues = maxImplCPU( - values, util.sizeFromShape(reduceShape), maxOutShape, x.dtype); + values, util.sizeFromShape(reduceShape), outShape, x.dtype); - out = webglBackend.makeTensorInfo(maxOutShape, x.dtype); + out = webglBackend.makeTensorInfo(outShape, x.dtype); const outData = webglBackend.texData.get(out.dataId); outData.values = outValues; } else { - out = maxImpl(maxInput, reduceShape, maxOutShape, webglBackend); + out = maxImpl(maxInput, reduceShape, outShape, webglBackend); } if (maxInputIsTransposed) { diff --git a/tfjs-core/src/gradients/ArgMax_grad.ts b/tfjs-core/src/gradients/ArgMax_grad.ts new file mode 100644 index 00000000000..12ed1094f8f --- /dev/null +++ b/tfjs-core/src/gradients/ArgMax_grad.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {ArgMax} from '../kernel_names'; +import {GradConfig} from '../kernel_registry'; +import {zerosLike} from '../ops/tensor_ops'; +import {Tensor} from '../tensor'; + +export const argMaxGradConfig: GradConfig = { + kernelName: ArgMax, + inputsToSave: ['x'], + gradFunc: (dy: Tensor, saved: Tensor[]) => { + const [x] = saved; + return {x: () => zerosLike(x)}; + } +}; diff --git a/tfjs-core/src/gradients/ArgMin_grad.ts b/tfjs-core/src/gradients/ArgMin_grad.ts new file mode 100644 index 00000000000..7b9611b99d3 --- /dev/null +++ b/tfjs-core/src/gradients/ArgMin_grad.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {ArgMin} from '../kernel_names'; +import {GradConfig} from '../kernel_registry'; +import {zerosLike} from '../ops/tensor_ops'; +import {Tensor} from '../tensor'; + +export const argMinGradConfig: GradConfig = { + kernelName: ArgMin, + inputsToSave: ['x'], + gradFunc: (dy: Tensor, saved: Tensor[]) => { + const [x] = saved; + return {x: () => zerosLike(x)}; + } +}; diff --git a/tfjs-core/src/gradients/Atan2_grad.ts b/tfjs-core/src/gradients/Atan2_grad.ts index e304abc52fb..fae7f8b6d85 100644 --- a/tfjs-core/src/gradients/Atan2_grad.ts +++ b/tfjs-core/src/gradients/Atan2_grad.ts @@ -21,9 +21,9 @@ import {add} from '../ops/add'; import {assertAndGetBroadcastShape, getReductionAxes} from '../ops/broadcast_util'; import {div} from '../ops/div'; import {mul} from '../ops/mul'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; import {square} from '../ops/square'; +import {sum} from '../ops/sum'; import {neg} from '../ops/unary_ops'; import {Tensor} from '../tensor'; diff --git a/tfjs-core/src/gradients/BroadcastTo_grad.ts b/tfjs-core/src/gradients/BroadcastTo_grad.ts index 3a990f44710..91bdf63d7d5 100644 --- a/tfjs-core/src/gradients/BroadcastTo_grad.ts +++ b/tfjs-core/src/gradients/BroadcastTo_grad.ts @@ -17,7 +17,7 @@ import {BroadcastTo, BroadCastToAttrs} from '../kernel_names'; import {GradConfig, NamedAttrMap} from '../kernel_registry'; -import {sum} from '../ops/reduction_ops'; +import {sum} from '../ops/sum'; import {Tensor} from '../tensor'; export const broadcastToGradConfig: GradConfig = { diff --git a/tfjs-core/src/gradients/Div_grad.ts b/tfjs-core/src/gradients/Div_grad.ts index daa31d854bf..6fe57626df8 100644 --- a/tfjs-core/src/gradients/Div_grad.ts +++ b/tfjs-core/src/gradients/Div_grad.ts @@ -20,9 +20,9 @@ import {GradConfig} from '../kernel_registry'; import * as broadcast_util from '../ops/broadcast_util'; import {div} from '../ops/div'; import {mul} from '../ops/mul'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; import {square} from '../ops/square'; +import {sum} from '../ops/sum'; import {neg} from '../ops/unary_ops'; import {Tensor} from '../tensor'; diff --git a/tfjs-core/src/gradients/FusedBatchNorm_grad.ts b/tfjs-core/src/gradients/FusedBatchNorm_grad.ts index 7a32aef95de..9ab2726bfe1 100644 --- a/tfjs-core/src/gradients/FusedBatchNorm_grad.ts +++ b/tfjs-core/src/gradients/FusedBatchNorm_grad.ts @@ -19,9 +19,9 @@ import {GradConfig, NamedAttrMap} from '../kernel_registry'; import {add} from '../ops/add'; import {getReductionAxes} from '../ops/broadcast_util'; import {mul} from '../ops/mul'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; import {sub} from '../ops/sub'; +import {sum} from '../ops/sum'; import {scalar} from '../ops/tensor_ops'; import {tile} from '../ops/tile'; import {rsqrt} from '../ops/unary_ops'; diff --git a/tfjs-core/src/gradients/Max_grad.ts b/tfjs-core/src/gradients/Max_grad.ts index 81a688cbf22..66072bfd59b 100644 --- a/tfjs-core/src/gradients/Max_grad.ts +++ b/tfjs-core/src/gradients/Max_grad.ts @@ -18,11 +18,12 @@ import {Max, MaxAttrs} from '../kernel_names'; import {GradConfig, NamedAttrMap} from '../kernel_registry'; import * as axis_util from '../ops/axis_util'; -import {gradForMinAndMax} from '../ops/reduction_ops_util'; import {transpose} from '../ops/transpose'; import {Tensor} from '../tensor'; import * as util from '../util'; +import {gradForMinAndMax} from './min_max_grad_util'; + export const maxGradConfig: GradConfig = { kernelName: Max, inputsToSave: ['x'], diff --git a/tfjs-core/src/gradients/Min_grad.ts b/tfjs-core/src/gradients/Min_grad.ts new file mode 100644 index 00000000000..a312bf3ac25 --- /dev/null +++ b/tfjs-core/src/gradients/Min_grad.ts @@ -0,0 +1,48 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Min, MinAttrs} from '../kernel_names'; +import {GradConfig, NamedAttrMap} from '../kernel_registry'; +import * as axis_util from '../ops/axis_util'; +import {transpose} from '../ops/transpose'; +import {Tensor} from '../tensor'; +import * as util from '../util'; + +import {gradForMinAndMax} from './min_max_grad_util'; + +export const minGradConfig: GradConfig = { + kernelName: Min, + inputsToSave: ['x'], + outputsToSave: [true], + gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => { + const minAttrs: MinAttrs = attrs as {} as MinAttrs; + const {axis} = minAttrs; + const [x, y] = saved; + const origAxes = util.parseAxisParam(axis, x.shape); + const permutedAxes = axis_util.getAxesPermutation(origAxes, x.rank); + const minGrad = gradForMinAndMax(dy, y, x, origAxes, permutedAxes); + return { + x: () => { + let out = minGrad['x'](); + if (permutedAxes != null) { + out = transpose(out); + } + return out; + } + }; + } +}; diff --git a/tfjs-core/src/gradients/Mod_grad.ts b/tfjs-core/src/gradients/Mod_grad.ts index 150979117fb..b6acd988df8 100644 --- a/tfjs-core/src/gradients/Mod_grad.ts +++ b/tfjs-core/src/gradients/Mod_grad.ts @@ -20,8 +20,8 @@ import {GradConfig} from '../kernel_registry'; import {assertAndGetBroadcastShape, getReductionAxes} from '../ops/broadcast_util'; import {div} from '../ops/div'; import {mul} from '../ops/mul'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; +import {sum} from '../ops/sum'; import {floor, neg} from '../ops/unary_ops'; import {Tensor} from '../tensor'; diff --git a/tfjs-core/src/gradients/Multiply_grad.ts b/tfjs-core/src/gradients/Multiply_grad.ts index 2ecdec6bce0..8d2813315d2 100644 --- a/tfjs-core/src/gradients/Multiply_grad.ts +++ b/tfjs-core/src/gradients/Multiply_grad.ts @@ -20,8 +20,8 @@ import {GradConfig} from '../kernel_registry'; import {cast} from '../ops/array_ops'; import {assertAndGetBroadcastShape, getReductionAxes} from '../ops/broadcast_util'; import {mul} from '../ops/mul'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; +import {sum} from '../ops/sum'; import {Tensor} from '../tensor'; export const multiplyGradConfig: GradConfig = { diff --git a/tfjs-core/src/gradients/Pow_grad.ts b/tfjs-core/src/gradients/Pow_grad.ts index 878b10e949a..e0422e65f33 100644 --- a/tfjs-core/src/gradients/Pow_grad.ts +++ b/tfjs-core/src/gradients/Pow_grad.ts @@ -21,9 +21,9 @@ import * as broadcast_util from '../ops/broadcast_util'; import {greater} from '../ops/greater'; import {mul} from '../ops/mul'; import {pow} from '../ops/pow'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; import {sub} from '../ops/sub'; +import {sum} from '../ops/sum'; import {scalar, zerosLike} from '../ops/tensor_ops'; import {log} from '../ops/unary_ops'; import {where} from '../ops/where'; diff --git a/tfjs-core/src/gradients/Prelu_grad.ts b/tfjs-core/src/gradients/Prelu_grad.ts index 71918ef8da1..d9fe69a197a 100644 --- a/tfjs-core/src/gradients/Prelu_grad.ts +++ b/tfjs-core/src/gradients/Prelu_grad.ts @@ -19,8 +19,8 @@ import {GradConfig} from '../kernel_registry'; import {getReductionAxes} from '../ops/broadcast_util'; import {greater} from '../ops/greater'; import {mul} from '../ops/mul'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; +import {sum} from '../ops/sum'; import {zerosLike} from '../ops/tensor_ops'; import {where} from '../ops/where'; import {Tensor} from '../tensor'; diff --git a/tfjs-core/src/gradients/Sub_grad.ts b/tfjs-core/src/gradients/Sub_grad.ts index 1c2d4c02d81..6685aa3aab4 100644 --- a/tfjs-core/src/gradients/Sub_grad.ts +++ b/tfjs-core/src/gradients/Sub_grad.ts @@ -17,8 +17,8 @@ import {Sub} from '../kernel_names'; import {GradConfig} from '../kernel_registry'; import * as broadcast_util from '../ops/broadcast_util'; -import {sum} from '../ops/reduction_ops'; import {reshape} from '../ops/reshape'; +import {sum} from '../ops/sum'; import {neg} from '../ops/unary_ops'; import {Tensor} from '../tensor'; diff --git a/tfjs-core/src/gradients/Sum_grad.ts b/tfjs-core/src/gradients/Sum_grad.ts new file mode 100644 index 00000000000..de3a0a41b14 --- /dev/null +++ b/tfjs-core/src/gradients/Sum_grad.ts @@ -0,0 +1,43 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Sum, SumAttrs} from '../kernel_names'; +import {GradConfig, NamedAttrMap} from '../kernel_registry'; +import {mul} from '../ops/mul'; +import {reshape} from '../ops/reshape'; +import {ones} from '../ops/tensor_ops'; +import {Tensor} from '../tensor'; +import {parseAxisParam} from '../util'; + +export const sumGradConfig: GradConfig = { + kernelName: Sum, + inputsToSave: ['x'], + gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => { + const [x] = saved; + const expandedDyShape = x.shape.slice(); + const {axis} = attrs as {} as SumAttrs; + + const axes = parseAxisParam(axis, x.shape); + axes.forEach(axis => { + expandedDyShape[axis] = 1; + }); + const expandedDy = reshape(dy, expandedDyShape); + const derX = mul(expandedDy, ones(x.shape, 'float32')); + + return {x: () => derX}; + } +}; diff --git a/tfjs-core/src/ops/reduction_ops_util.ts b/tfjs-core/src/gradients/min_max_grad_util.ts similarity index 66% rename from tfjs-core/src/ops/reduction_ops_util.ts rename to tfjs-core/src/gradients/min_max_grad_util.ts index cd5e58a0129..b8d01b9e217 100644 --- a/tfjs-core/src/ops/reduction_ops_util.ts +++ b/tfjs-core/src/gradients/min_max_grad_util.ts @@ -15,8 +15,13 @@ * ============================================================================= */ +import {cast} from '../ops/array_ops'; +import * as axis_util from '../ops/axis_util'; +import {equal} from '../ops/equal'; +import {mul} from '../ops/mul'; +import {reshape} from '../ops/reshape'; +import {transpose} from '../ops/transpose'; import {Tensor} from '../tensor'; -import * as axis_util from './axis_util'; /** * Gradient helper function for the min and max operations. @@ -24,15 +29,15 @@ import * as axis_util from './axis_util'; export function gradForMinAndMax( dy: T, y: T, xOrig: Tensor, origAxes: number[], permutedAxes: number[]) { if (y.rank < xOrig.rank) { - y = y.reshape(axis_util.expandShapeToKeepDim(y.shape, origAxes)); + y = reshape(y, axis_util.expandShapeToKeepDim(y.shape, origAxes)) as T; } if (dy.rank < xOrig.rank) { - dy = dy.reshape(axis_util.expandShapeToKeepDim(dy.shape, origAxes)); + dy = reshape(dy, axis_util.expandShapeToKeepDim(dy.shape, origAxes)) as T; } return { x: () => { - const dx = dy.mul(xOrig.equal(y).cast(dy.dtype)); - return permutedAxes == null ? dx : dx.transpose(permutedAxes); + const dx = mul(dy, cast(equal(xOrig, y), dy.dtype)); + return permutedAxes == null ? dx : transpose(dx, permutedAxes); } }; } diff --git a/tfjs-core/src/kernel_names.ts b/tfjs-core/src/kernel_names.ts index b705fdb228f..ac5d8e59300 100644 --- a/tfjs-core/src/kernel_names.ts +++ b/tfjs-core/src/kernel_names.ts @@ -43,6 +43,18 @@ export interface AnyAttrs { keepDims: boolean; } +export const ArgMax = 'ArgMax'; +export type ArgMaxInputs = Pick; +export interface ArgMaxAttrs { + axis: number; +} + +export const ArgMin = 'ArgMin'; +export type ArgMinInputs = Pick; +export interface ArgMinAttrs { + axis: number; +} + export const Atan2 = 'Atan2'; export type Atan2Inputs = BinaryInputs; @@ -369,6 +381,20 @@ export interface MaxPoolWithArgmaxAttrs { includeBatchInIndex: boolean; } +export const Mean = 'Mean'; +export type MeanInputs = Pick; +export interface MeanAttrs { + axis: number|number[]; + keepDims: boolean; +} + +export const Min = 'Min'; +export type MinInputs = Pick; +export interface MinAttrs { + axis: number|number[]; + keepDims: boolean; +} + export const Minimum = 'Minimum'; export type MinimumInputs = BinaryInputs; @@ -479,6 +505,13 @@ export type SelectV2Inputs = Pick; export const Selu = 'Selu'; export type SeluInputs = Pick; +export const Sum = 'Sum'; +export type SumInputs = Pick; +export interface SumAttrs { + axis: number|number[]; + keepDims: boolean; +} + export const SpaceToBatchND = 'SpaceToBatchND'; export type SpaceToBatchNDInputs = Pick; export interface SpaceToBatchNDAttrs { diff --git a/tfjs-core/src/ops/arg_max.ts b/tfjs-core/src/ops/arg_max.ts new file mode 100644 index 00000000000..eb0f8219485 --- /dev/null +++ b/tfjs-core/src/ops/arg_max.ts @@ -0,0 +1,76 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {ENGINE, ForwardFunc} from '../engine'; +import {ArgMax, ArgMaxAttrs, ArgMaxInputs} from '../kernel_names'; +import {NamedAttrMap} from '../kernel_registry'; +import {Tensor} from '../tensor'; +import {NamedTensorMap} from '../tensor_types'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import * as util from '../util'; + +import * as axis_util from './axis_util'; +import {op} from './operation'; + +/** + * Returns the indices of the maximum values along an `axis`. + * + * The result has the same shape as `input` with the dimension along `axis` + * removed. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.argMax().print(); // or tf.argMax(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]); + * + * const axis = 1; + * x.argMax(axis).print(); // or tf.argMax(x, axis) + * ``` + * + * @param x The input tensor. + * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension). + */ +/** @doc {heading: 'Operations', subheading: 'Reduction'} */ +function argMax_(x: Tensor|TensorLike, axis = 0): T { + let $x = convertToTensor(x, 'x', 'argMax'); + + const forward: ForwardFunc = (backend, save) => { + save([$x]); + + let axes = util.parseAxisParam(axis, $x.shape); + const permutedAxes = axis_util.getAxesPermutation(axes, $x.rank); + if (permutedAxes != null) { + $x = $x.transpose(permutedAxes); + axes = axis_util.getInnerMostAxes(axes.length, $x.rank); + } + return backend.argMax($x, axes[0]); + }; + + const inputs: ArgMaxInputs = {x: $x}; + const attrs: ArgMaxAttrs = {axis}; + + return ENGINE.runKernelFunc( + forward, inputs as {} as NamedTensorMap, null /* grad */, ArgMax, + attrs as {} as NamedAttrMap) as T; +} + +export const argMax = op({argMax_}); diff --git a/tfjs-core/src/ops/arg_max_test.ts b/tfjs-core/src/ops/arg_max_test.ts new file mode 100644 index 00000000000..d7c4cd9ec48 --- /dev/null +++ b/tfjs-core/src/ops/arg_max_test.ts @@ -0,0 +1,178 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +import * as reduce_util from './reduce_util'; + +describeWithFlags('argmax', ALL_ENVS, () => { + it('Tensor1D', async () => { + const a = tf.tensor1d([1, 0, 3, 2]); + const result = tf.argMax(a); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), 2); + }); + + it('one value', async () => { + const a = tf.tensor1d([10]); + const result = tf.argMax(a); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), 0); + }); + + it('N > than parallelization threshold', async () => { + const n = reduce_util.PARALLELIZE_THRESHOLD * 2; + const values = new Float32Array(n); + for (let i = 0; i < n; i++) { + values[i] = i; + } + const a = tf.tensor1d(values); + const result = tf.argMax(a); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), n - 1); + }); + + it('3D, N > than parallelization threshold', async () => { + const n = reduce_util.PARALLELIZE_THRESHOLD * 2; + const values = new Float32Array(n); + for (let i = 0; i < n; i++) { + values[i] = i; + } + const a = tf.tensor3d(values, [1, 1, n]); + const result = tf.argMax(a, -1); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), n - 1); + }); + + it('max index corresponds to start of a non-initial window', async () => { + const n = reduce_util.PARALLELIZE_THRESHOLD * 2; + const windowSize = reduce_util.computeOptimalWindowSize(n); + const values = new Float32Array(n); + const index = windowSize * 2; + values[index] = 1; + const a = tf.tensor1d(values); + const result = tf.argMax(a); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), index); + }); + + it('5D, max index corresponds to start of a non-initial window', async () => { + const n = reduce_util.PARALLELIZE_THRESHOLD * 2; + const windowSize = reduce_util.computeOptimalWindowSize(n); + const values = new Float32Array(n); + const index = windowSize * 2; + values[index] = 1; + const a = tf.tensor5d(values, [1, 1, 1, 1, n]); + const result = tf.argMax(a, -1); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), index); + }); + + it('ignores NaNs', async () => { + const a = tf.tensor1d([0, 3, 5, NaN, 3]); + const res = tf.argMax(a); + expect(res.dtype).toBe('int32'); + expectArraysEqual(await res.data(), 2); + }); + + it('2D, no axis specified', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + expectArraysEqual(await tf.argMax(a).data(), [1, 0, 1]); + }); + + it('4D, no axis specified', async () => { + const a = tf.tensor4d([3, -1, 0, 100, -7, 2], [2, 1, 1, 3]); + expectArraysEqual(await tf.argMax(a).data(), [1, 0, 1]); + }); + + it('2D, axis=0', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const r = tf.argMax(a, 0); + + expect(r.shape).toEqual([3]); + expect(r.dtype).toBe('int32'); + expectArraysEqual(await r.data(), [1, 0, 1]); + }); + + it('6D, axis=0', async () => { + const a = tf.tensor6d([3, -1, 0, 100, -7, 2], [2, 1, 1, 1, 1, 3]); + const r = tf.argMax(a, 0); + + expect(r.shape).toEqual([1, 1, 1, 1, 3]); + expect(r.dtype).toBe('int32'); + expectArraysEqual(await r.data(), [1, 0, 1]); + }); + + it('2D, axis=1', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.argMax(a, 1); + expect(r.dtype).toBe('int32'); + expectArraysEqual(await r.data(), [2, 0]); + }); + + it('2D, axis = -1', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.argMax(a, -1); + expect(r.dtype).toBe('int32'); + expectArraysEqual(await r.data(), [2, 0]); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.argMax({} as tf.Tensor)) + .toThrowError(/Argument 'x' passed to 'argMax' must be a Tensor/); + }); + + it('accepts a tensor-like object', async () => { + const result = tf.argMax([1, 0, 3, 2]); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), 2); + }); + + it('accepts tensor with bool values', async () => { + const t = tf.tensor1d([0, 1], 'bool'); + const result = tf.argMax(t); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), 1); + }); + + it('has gradient', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const dy = tf.ones([3], 'float32'); + const da = tf.grad((x: tf.Tensor2D) => tf.argMax(x))(a, dy); + + expect(da.dtype).toBe('float32'); + expect(da.shape).toEqual([2, 3]); + expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); + }); + + it('gradient with clones', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const dy = tf.ones([3], 'float32'); + const da = tf.grad((x: tf.Tensor2D) => tf.argMax(x.clone()).clone())(a, dy); + + expect(da.dtype).toBe('float32'); + expect(da.shape).toEqual([2, 3]); + expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); + }); + + it('throws error for string tensor', () => { + expect(() => tf.argMax(['a'])) + .toThrowError(/Argument 'x' passed to 'argMax' must be numeric tensor/); + }); +}); diff --git a/tfjs-core/src/ops/arg_min.ts b/tfjs-core/src/ops/arg_min.ts new file mode 100644 index 00000000000..3714e75a1a8 --- /dev/null +++ b/tfjs-core/src/ops/arg_min.ts @@ -0,0 +1,80 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {ENGINE, ForwardFunc} from '../engine'; +import {ArgMin, ArgMinAttrs, ArgMinInputs} from '../kernel_names'; +import {NamedAttrMap} from '../kernel_registry'; +import {Tensor} from '../tensor'; +import {NamedTensorMap} from '../tensor_types'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import * as util from '../util'; + +import * as axis_util from './axis_util'; +import {op} from './operation'; + +/** + * Returns the indices of the minimum values along an `axis`. + * + * The result has the same shape as `input` with the dimension along `axis` + * removed. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.argMin().print(); // or tf.argMin(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]); + * + * const axis = 1; + * x.argMin(axis).print(); // or tf.argMin(x, axis) + * ``` + * + * @param x The input tensor. + * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension). + * + */ +/** @doc {heading: 'Operations', subheading: 'Reduction'} */ +function argMin_(x: Tensor|TensorLike, axis = 0): T { + let $x = convertToTensor(x, 'x', 'argMin'); + + const forward: ForwardFunc = (backend, save) => { + save([$x]); + + if (axis == null) { + axis = 0; + } + let axes = util.parseAxisParam(axis, $x.shape); + const permutedAxes = axis_util.getAxesPermutation(axes, $x.rank); + if (permutedAxes != null) { + $x = $x.transpose(permutedAxes); + axes = axis_util.getInnerMostAxes(axes.length, $x.rank); + } + return backend.argMin($x, axes[0]); + }; + + const inputs: ArgMinInputs = {x: $x}; + const attrs: ArgMinAttrs = {axis}; + + return ENGINE.runKernelFunc( + forward, inputs as {} as NamedTensorMap, null /* grad */, ArgMin, + attrs as {} as NamedAttrMap) as T; +} + +export const argMin = op({argMin_}); diff --git a/tfjs-core/src/ops/arg_min_test.ts b/tfjs-core/src/ops/arg_min_test.ts new file mode 100644 index 00000000000..21c1b5c3fa1 --- /dev/null +++ b/tfjs-core/src/ops/arg_min_test.ts @@ -0,0 +1,152 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +import * as reduce_util from './reduce_util'; + +describeWithFlags('argmin', ALL_ENVS, () => { + it('Tensor1D', async () => { + const a = tf.tensor1d([1, 0, 3, 2]); + const result = tf.argMin(a); + expectArraysEqual(await result.data(), 1); + }); + + it('one value', async () => { + const a = tf.tensor1d([10]); + const result = tf.argMin(a); + expectArraysEqual(await result.data(), 0); + }); + + it('N > than parallelization threshold', async () => { + const n = reduce_util.PARALLELIZE_THRESHOLD * 2; + const values = new Float32Array(n); + for (let i = 0; i < n; i++) { + values[i] = n - i; + } + const a = tf.tensor1d(values); + const result = tf.argMin(a); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), n - 1); + }); + + it('4D, N > than parallelization threshold', async () => { + const n = reduce_util.PARALLELIZE_THRESHOLD * 2; + const values = new Float32Array(n); + for (let i = 0; i < n; i++) { + values[i] = n - i; + } + const a = tf.tensor4d(values, [1, 1, 1, n]); + const result = tf.argMin(a, -1); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), n - 1); + }); + + it('min index corresponds to start of a non-initial window', async () => { + const n = reduce_util.PARALLELIZE_THRESHOLD * 2; + const windowSize = reduce_util.computeOptimalWindowSize(n); + const values = new Float32Array(n); + const index = windowSize * 2; + values[index] = -1; + const a = tf.tensor1d(values); + const result = tf.argMin(a); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), index); + }); + + it('ignores NaNs', async () => { + const a = tf.tensor1d([5, 0, NaN, -1, 3]); + const res = tf.argMin(a); + expectArraysEqual(await res.data(), 3); + }); + + it('3D, ignores NaNs', async () => { + const a = tf.tensor3d([5, 0, NaN, -1, 3], [1, 1, 5]); + const res = tf.argMin(a, -1); + expectArraysEqual(await res.data(), 3); + }); + + it('2D, no axis specified', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + expectArraysEqual(await tf.argMin(a).data(), [0, 1, 0]); + }); + + it('2D, axis=0', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const r = tf.argMin(a, 0); + + expect(r.shape).toEqual([3]); + expect(r.dtype).toBe('int32'); + expectArraysEqual(await r.data(), [0, 1, 0]); + }); + + it('2D, axis=1', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, -8], [2, 3]); + const r = tf.argMin(a, 1); + expectArraysEqual(await r.data(), [1, 2]); + }); + + it('2D, axis = -1', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, -8], [2, 3]); + const r = tf.argMin(a, -1); + expectArraysEqual(await r.data(), [1, 2]); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.argMin({} as tf.Tensor)) + .toThrowError(/Argument 'x' passed to 'argMin' must be a Tensor/); + }); + + it('accepts a tensor-like object', async () => { + const result = tf.argMin([1, 0, 3, 2]); + expectArraysEqual(await result.data(), 1); + }); + + it('accepts tensor with bool values', async () => { + const t = tf.tensor1d([0, 1], 'bool'); + const result = tf.argMin(t); + expect(result.dtype).toBe('int32'); + expectArraysEqual(await result.data(), 0); + }); + + it('has gradient', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const dy = tf.ones([3], 'float32'); + const da = tf.grad((x: tf.Tensor2D) => tf.argMin(x))(a, dy); + + expect(da.dtype).toBe('float32'); + expect(da.shape).toEqual([2, 3]); + expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); + }); + + it('gradient with clones', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const dy = tf.ones([3], 'float32'); + const da = tf.grad((x: tf.Tensor2D) => tf.argMin(x.clone()).clone())(a, dy); + + expect(da.dtype).toBe('float32'); + expect(da.shape).toEqual([2, 3]); + expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); + }); + + it('throws error for string tensor', () => { + expect(() => tf.argMin(['a'])) + .toThrowError(/Argument 'x' passed to 'argMin' must be numeric tensor/); + }); +}); diff --git a/tfjs-core/src/ops/compute_weighted_loss.ts b/tfjs-core/src/ops/compute_weighted_loss.ts index 9f6b618c5fe..e7fc099b61a 100644 --- a/tfjs-core/src/ops/compute_weighted_loss.ts +++ b/tfjs-core/src/ops/compute_weighted_loss.ts @@ -21,10 +21,11 @@ import {TensorLike} from '../types'; import {cast} from './array_ops'; import {div} from './div'; import {Reduction} from './loss_ops_utils'; +import {mean} from './mean'; import {mul} from './mul'; import {notEqual} from './not_equal'; import {op} from './operation'; -import {mean, sum} from './reduction_ops'; +import {sum} from './sum'; import {ones, scalar} from './tensor_ops'; /** diff --git a/tfjs-core/src/ops/cosine_distance.ts b/tfjs-core/src/ops/cosine_distance.ts index 06a0b8717eb..88ca1beda65 100644 --- a/tfjs-core/src/ops/cosine_distance.ts +++ b/tfjs-core/src/ops/cosine_distance.ts @@ -18,12 +18,13 @@ import {Tensor} from '../tensor'; import {convertToTensor} from '../tensor_util_env'; import {TensorLike} from '../types'; import {assertShapesMatch} from '../util'; + import {computeWeightedLoss} from './compute_weighted_loss'; import {Reduction} from './loss_ops_utils'; import {mul} from './mul'; import {op} from './operation'; -import {sum} from './reduction_ops'; import {sub} from './sub'; +import {sum} from './sum'; import {scalar} from './tensor_ops'; /** diff --git a/tfjs-core/src/ops/gram_schmidt.ts b/tfjs-core/src/ops/gram_schmidt.ts index da3902e4629..fa0352cc173 100644 --- a/tfjs-core/src/ops/gram_schmidt.ts +++ b/tfjs-core/src/ops/gram_schmidt.ts @@ -23,11 +23,11 @@ import {div} from './div'; import {mul} from './mul'; import {norm} from './norm'; import {op} from './operation'; -import {sum} from './reduction_ops'; import {split} from './split'; import {squeeze} from './squeeze'; import {stack} from './stack'; import {sub} from './sub'; +import {sum} from './sum'; /** * Gram-Schmidt orthogonalization. diff --git a/tfjs-core/src/ops/log_sum_exp.ts b/tfjs-core/src/ops/log_sum_exp.ts index a73cb346577..4a61ae75819 100644 --- a/tfjs-core/src/ops/log_sum_exp.ts +++ b/tfjs-core/src/ops/log_sum_exp.ts @@ -24,9 +24,9 @@ import {add} from './add'; import {expandShapeToKeepDim} from './axis_util'; import {max} from './max'; import {op} from './operation'; -import {sum} from './reduction_ops'; import {reshape} from './reshape'; import {sub} from './sub'; +import {sum} from './sum'; import {exp, log} from './unary_ops'; /** diff --git a/tfjs-core/src/ops/max.ts b/tfjs-core/src/ops/max.ts index 33dd6f0acc7..9512e7478bd 100644 --- a/tfjs-core/src/ops/max.ts +++ b/tfjs-core/src/ops/max.ts @@ -73,27 +73,28 @@ function max_( } const y = backend.max(maxInput, axes); - save([$x, y]); - if (permutedAxes != null) { - backend.disposeData(maxInput.dataId); + maxInput.dispose(); + } + + let res = y; + if (keepDims) { + const expandedShape = axis_util.expandShapeToKeepDim( + res.shape, util.parseAxisParam(axis, $x.shape)); + res = reshape(res, expandedShape) as T; + y.dispose(); } - return y; + save([$x, res]); + return res; }; + const inputs: MaxInputs = {x: $x}; const attrs: MaxAttrs = {reductionIndices: axis, keepDims}; - const res = ENGINE.runKernelFunc( - forward, inputs as {} as NamedTensorMap, null /* gradient */, - Max, attrs as {} as NamedAttrMap) as T; - if (keepDims) { - return reshape( - res, - axis_util.expandShapeToKeepDim( - res.shape, util.parseAxisParam(axis, $x.shape))) as T; - } - return res; + return ENGINE.runKernelFunc( + forward, inputs as {} as NamedTensorMap, null /* gradient */, Max, + attrs as {} as NamedAttrMap) as T; } export const max = op({max_}); diff --git a/tfjs-core/src/ops/max_test.ts b/tfjs-core/src/ops/max_test.ts new file mode 100644 index 00000000000..147bd131c78 --- /dev/null +++ b/tfjs-core/src/ops/max_test.ts @@ -0,0 +1,251 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +describeWithFlags('max', ALL_ENVS, () => { + it('with one element dominating', async () => { + const a = tf.tensor1d([3, -1, 0, 100, -7, 2]); + const r = tf.max(a); + expectArraysClose(await r.data(), 100); + }); + + it('with all elements being the same', async () => { + const a = tf.tensor1d([3, 3, 3]); + const r = tf.max(a); + expectArraysClose(await r.data(), 3); + }); + + it('ignores NaNs', async () => { + expectArraysClose(await tf.max([3, NaN, 2]).data(), 3); + }); + + it('2D', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + expectArraysClose(await tf.max(a).data(), 100); + }); + + it('2D axis=[0,1]', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + expectArraysClose(await tf.max(a, [0, 1]).data(), 100); + }); + + it('2D, axis=0', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const r = tf.max(a, [0]); + + expect(r.shape).toEqual([3]); + expectArraysClose(await r.data(), [100, -1, 2]); + }); + + it('2D, axis=0, keepDims', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const r = tf.max(a, [0], true /* keepDims */); + + expect(r.shape).toEqual([1, 3]); + expectArraysClose(await r.data(), [100, -1, 2]); + }); + + it('2D, axis=1 provided as a number', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.max(a, 1); + expectArraysClose(await r.data(), [5, 100]); + }); + + it('2D, axis = -1 provided as a number', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.max(a, -1); + expectArraysClose(await r.data(), [5, 100]); + }); + + it('2D, axis=[1]', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.max(a, [1]); + expectArraysClose(await r.data(), [5, 100]); + }); + + it('6D, axis=[5]', async () => { + const a = tf.range(0, 64).reshape([2, 2, 2, 2, 2, 2]); + const r = tf.max(a, [5]); + const expectedResult = [ + 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, + 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63 + ]; + expectArraysClose(await r.data(), expectedResult); + }); + + it('axis permutation does not change input', async () => { + const input = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const inputDataBefore = await input.data(); + + tf.max(input, [1, 0]); + + const inputDataAfter = await input.data(); + expectArraysEqual(inputDataBefore, inputDataAfter); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.max({} as tf.Tensor)) + .toThrowError(/Argument 'x' passed to 'max' must be a Tensor/); + }); + + it('accepts a tensor-like object', async () => { + const r = tf.max([3, -1, 0, 100, -7, 2]); + expectArraysClose(await r.data(), 100); + }); + + it('max gradient: Scalar', async () => { + const x = tf.scalar(42); + const dy = tf.scalar(-1); + const gradients = tf.grad(v => tf.max(v))(x, dy); + expectArraysClose(await gradients.data(), [-1]); + }); + + it('gradient with clones', async () => { + const x = tf.scalar(42); + const dy = tf.scalar(-1); + const gradients = tf.grad(v => tf.max(v.clone()).clone())(x, dy); + expectArraysClose(await gradients.data(), [-1]); + }); + + it('max gradient: 1D, ties', async () => { + const x = tf.tensor1d([1, 3, 7, 7]); + const dy = tf.scalar(-1); + const gradients = tf.grad(v => tf.max(v))(x, dy); + expectArraysClose(await gradients.data(), [0, 0, -1, -1]); + }); + + it('max gradient: 2D, axes=-1, keepDims=false', async () => { + const x = tf.tensor2d([[0, 20, 10], [-10, -30, -20]]); + const dy = tf.tensor1d([-1, -1]); + const axis = -1; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('max gradient: ties, 2D, axes=-1, keepDims=false', async () => { + const x = tf.tensor2d([[0, 20, 20], [-10, -30, -10]]); + const dy = tf.tensor1d([-1, -1]); + const axis = -1; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, -1, -1, 0, -1]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('max gradient: 2D, axes=0, keepDims=false', async () => { + const x = tf.tensor2d([[0, 20, 10], [-10, -30, 20]]); + const dy = tf.tensor1d([-1, -1, -1]); + const axis = 0; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('max gradient: 2D, axes=-1, keepDims=true', async () => { + const x = tf.tensor2d([[0, 20, 10], [-10, -30, -20]]); + const dy = tf.tensor2d([[-1], [-1]]); + const axis = -1; + const keepDims = true; + const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('max gradient: 2D, axes=0, keepDims=true', async () => { + const x = tf.tensor2d([[0, 20, 10], [-10, -30, 20]]); + const dy = tf.tensor2d([[-1, -1, -1]]); + const axis = 0; + const keepDims = true; + const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); + expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('max gradient: 3D, axes=[1, 2], keepDims=false', async () => { + const x = tf.tensor3d([[[0, 20], [10, 15]], [[-10, -30], [-20, -15]]]); + const dy = tf.tensor1d([-1, -1]); + const axis = [1, 2]; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, 0, -1, 0, 0, 0]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('max gradient: ties, 3D, axes=[1, 2], keepDims=false', async () => { + const x = tf.tensor3d([[[0, 20], [20, 20]], [[-10, -30], [-10, -15]]]); + const dy = tf.tensor1d([-1, -1]); + const axis = [1, 2]; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, -1, -1, -1, 0, -1, 0]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('max gradient: 3D, axes=2, keepDims=false', async () => { + const x = tf.tensor3d([[[0, 20], [10, 15]], [[-10, -30], [-20, -15]]]); + const dy = tf.tensor2d([[-1, -1], [-1, -1]]); + const axis = 2; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('max gradient: 3D, axes=2, keepDims=true', async () => { + const x = tf.tensor3d([[[0, 20], [10, 15]], [[-10, -30], [-20, -15]]]); + const dy = tf.tensor3d([[[-1], [-1]], [[-1], [-1]]]); + const axis = 2; + const keepDims = true; + const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('max gradient: ties, 4D, axes=[1, 2, 3], keepDims=false', async () => { + const x = tf.tensor4d([ + [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]], + [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]] + ]); + const dy = tf.tensor1d([-1, -1]); + const axis = [1, 2, 3]; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose( + await gradients.data(), + [0, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1]); + expect(gradients.shape).toEqual([2, 2, 2, 2]); + }); + + it('max gradient: ties, 4D, axes=[2, 3], keepDims=true', async () => { + const x = tf.tensor4d([ + [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]], + [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]] + ]); + const dy = tf.tensor4d([[[[-1]], [[-2]]], [[[-3]], [[-4]]]]); + const axis = [2, 3]; + const keepDims = true; + const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); + expectArraysClose( + await gradients.data(), + [0, -1, -1, -1, -2, 0, -2, 0, -3, 0, 0, 0, 0, -4, 0, -4]); + expect(gradients.shape).toEqual([2, 2, 2, 2]); + }); + + it('throws error for string tensor', () => { + expect(() => tf.max(['a'])) + .toThrowError(/Argument 'x' passed to 'max' must be numeric tensor/); + }); +}); diff --git a/tfjs-core/src/ops/mean.ts b/tfjs-core/src/ops/mean.ts new file mode 100644 index 00000000000..9ef7737b56a --- /dev/null +++ b/tfjs-core/src/ops/mean.ts @@ -0,0 +1,95 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {customGrad} from '../gradients'; +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {parseAxisParam, sizeFromShape} from '../util'; + +import {cast} from './array_ops'; +import {computeOutAndReduceShapes} from './axis_util'; +import {div} from './div'; +import {mul} from './mul'; +import {op} from './operation'; +import {reshape} from './reshape'; +import {sum} from './sum'; +import {ones, scalar} from './tensor_ops'; + +/** + * Computes the mean of elements across dimensions of a `tf.Tensor`. + * + * Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is + * true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`. + * If `keepDims` is true, the reduced dimensions are retained with length 1. + * If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with + * a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.mean().print(); // or tf.mean(a) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.mean(axis).print(); // or tf.mean(x, axis) + * ``` + * + * @param x The input tensor. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + */ +/** @doc {heading: 'Operations', subheading: 'Reduction'} */ +function mean_( + x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T { + const $x = convertToTensor(x, 'x', 'mean'); + + const axes = parseAxisParam(axis, $x.shape); + const shapes = computeOutAndReduceShapes($x.shape, axes); + const reduceShape = shapes[1]; + const reduceSize = sizeFromShape(reduceShape); + + // Use a custom gradient to bypass 2 gradient backprops since mean is used + // extremely often. + const customOp = customGrad((x: Tensor) => { + const reduceSizeScalar = scalar(reduceSize); + // Cast if needed. + const xReduce = reduceSizeScalar.dtype === x.dtype ? + x : + cast(x, reduceSizeScalar.dtype); + const res = div(xReduce, reduceSizeScalar); + const value = sum(res, axis, keepDims); + + const gradFunc = (dy: Tensor) => { + const expandedDyShape = x.shape.slice(); + axes.forEach(axis => { + expandedDyShape[axis] = 1; + }); + const expandedDy = reshape(dy, expandedDyShape); + const derX = div(mul(expandedDy, ones(x.shape, 'float32')), reduceSize); + return derX; + }; + return {value, gradFunc}; + }); + + return customOp($x) as T; +} + +export const mean = op({mean_}); diff --git a/tfjs-core/src/ops/mean_test.ts b/tfjs-core/src/ops/mean_test.ts new file mode 100644 index 00000000000..9fc96087a60 --- /dev/null +++ b/tfjs-core/src/ops/mean_test.ts @@ -0,0 +1,167 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +describeWithFlags('mean', ALL_ENVS, () => { + it('basic', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const r = tf.mean(a); + + expect(r.dtype).toBe('float32'); + expectArraysClose(await r.data(), 7 / 6); + }); + + it('propagates NaNs', async () => { + const a = tf.tensor2d([1, 2, 3, NaN, 0, 1], [3, 2]); + const r = tf.mean(a); + + expect(r.dtype).toBe('float32'); + expectArraysEqual(await r.data(), NaN); + }); + + it('mean(int32) => float32', async () => { + const a = tf.tensor1d([1, 5, 7, 3], 'int32'); + const r = tf.mean(a); + + expect(r.dtype).toBe('float32'); + expectArraysClose(await r.data(), 4); + }); + + it('mean(bool) => float32', async () => { + const a = tf.tensor1d([true, false, false, true, true], 'bool'); + const r = tf.mean(a); + + expect(r.dtype).toBe('float32'); + expectArraysClose(await r.data(), 3 / 5); + }); + + it('2D array with keep dim', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.mean(a, null, true /* keepDims */); + + expect(res.shape).toEqual([1, 1]); + expect(res.dtype).toBe('float32'); + expectArraysClose(await res.data(), [7 / 6]); + }); + + it('axis=0 in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.mean(a, [0]); + + expect(res.shape).toEqual([2]); + expect(res.dtype).toBe('float32'); + expectArraysClose(await res.data(), [4 / 3, 1]); + }); + + it('axis=0 in 2D array, keepDims', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.mean(a, [0], true /* keepDims */); + + expect(res.shape).toEqual([1, 2]); + expect(res.dtype).toBe('float32'); + expectArraysClose(await res.data(), [4 / 3, 1]); + }); + + it('axis=1 in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.mean(a, [1]); + + expect(res.dtype).toBe('float32'); + expect(res.shape).toEqual([3]); + expectArraysClose(await res.data(), [1.5, 1.5, 0.5]); + }); + + it('axis = -1 in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.mean(a, [-1]); + + expect(res.dtype).toBe('float32'); + expect(res.shape).toEqual([3]); + expectArraysClose(await res.data(), [1.5, 1.5, 0.5]); + }); + + it('2D, axis=1 provided as number', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); + const res = tf.mean(a, 1); + + expect(res.shape).toEqual([2]); + expect(res.dtype).toBe('float32'); + expectArraysClose(await res.data(), [2, 1 / 3]); + }); + + it('axis=0,1 in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.mean(a, [0, 1]); + + expect(res.shape).toEqual([]); + expect(res.dtype).toBe('float32'); + expectArraysClose(await res.data(), [7 / 6]); + }); + + it('gradients', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const dy = tf.scalar(1.5); + + const da = tf.grad(a => a.mean())(a, dy); + const dyVal = await dy.array(); + expect(da.shape).toEqual(a.shape); + expectArraysClose(await da.data(), [ + dyVal / a.size, dyVal / a.size, dyVal / a.size, dyVal / a.size, + dyVal / a.size, dyVal / a.size + ]); + }); + + it('gradient with clones', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const dy = tf.scalar(1.5); + + const da = tf.grad(a => a.clone().mean().clone())(a, dy); + const dyVal = await dy.array(); + expect(da.shape).toEqual(a.shape); + expectArraysClose(await da.data(), [ + dyVal / a.size, dyVal / a.size, dyVal / a.size, dyVal / a.size, + dyVal / a.size, dyVal / a.size + ]); + }); + + it('gradients throws for defined axis', () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const dy = tf.scalar(1.5); + + expect(() => tf.grad(a => a.mean(1))(a, dy)).toThrowError(); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.mean({} as tf.Tensor)) + .toThrowError(/Argument 'x' passed to 'mean' must be a Tensor/); + }); + + it('accepts a tensor-like object', async () => { + const r = tf.mean([[1, 2, 3], [0, 0, 1]]); + + expect(r.dtype).toBe('float32'); + expectArraysClose(await r.data(), 7 / 6); + }); + + it('throws error for string tensor', () => { + expect(() => tf.mean(['a'])) + .toThrowError(/Argument 'x' passed to 'mean' must be numeric tensor/); + }); +}); diff --git a/tfjs-core/src/ops/min.ts b/tfjs-core/src/ops/min.ts new file mode 100644 index 00000000000..aa133405ce6 --- /dev/null +++ b/tfjs-core/src/ops/min.ts @@ -0,0 +1,100 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {KernelBackend} from '../backends/backend'; +import {ENGINE, ForwardFunc} from '../engine'; +import {Min, MinAttrs, MinInputs} from '../kernel_names'; +import {NamedAttrMap} from '../kernel_registry'; +import {Tensor} from '../tensor'; +import {GradSaveFunc, NamedTensorMap} from '../tensor_types'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {parseAxisParam} from '../util'; + +import * as axis_util from './axis_util'; +import {op} from './operation'; +import {reshape} from './reshape'; +import {transpose} from './transpose'; + +/** + * Computes the minimum value from the input. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the array is reduced by 1 for each entry in `axes`. + * If `keepDims` is true, the reduced dimensions are retained with length 1. + * If `axes` has no entries, all dimensions are reduced, and an array with a + * single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.min().print(); // or tf.min(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.min(axis).print(); // or tf.min(x, axis) + * ``` + * + * @param x The input Tensor. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + */ +/** @doc {heading: 'Operations', subheading: 'Reduction'} */ +function min_( + x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T { + const $x = convertToTensor(x, 'x', 'min'); + + const forward: ForwardFunc = + (backend: KernelBackend, save: GradSaveFunc) => { + const origAxes = parseAxisParam(axis, $x.shape); + let axes = origAxes; + const permutedAxes = axis_util.getAxesPermutation(axes, $x.rank); + let minInput = $x; + if (permutedAxes != null) { + minInput = transpose($x, permutedAxes); + axes = axis_util.getInnerMostAxes(axes.length, $x.rank); + } + + const y = backend.min(minInput, axes); + if (permutedAxes != null) { + minInput.dispose(); + } + + let res = y; + if (keepDims) { + const expandedShape = + axis_util.expandShapeToKeepDim(res.shape, origAxes); + res = reshape(y, expandedShape) as T; + y.dispose(); + } + + save([$x, res]); + return res; + }; + + const inputs: MinInputs = {x: $x}; + const attrs: MinAttrs = {axis, keepDims}; + + return ENGINE.runKernelFunc( + forward, inputs as {} as NamedTensorMap, null /* gradient */, Min, + attrs as {} as NamedAttrMap) as T; +} + +export const min = op({min_}); diff --git a/tfjs-core/src/ops/min_test.ts b/tfjs-core/src/ops/min_test.ts new file mode 100644 index 00000000000..aeb3b62c285 --- /dev/null +++ b/tfjs-core/src/ops/min_test.ts @@ -0,0 +1,234 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +describeWithFlags('min', ALL_ENVS, () => { + it('Tensor1D', async () => { + const a = tf.tensor1d([3, -1, 0, 100, -7, 2]); + expectArraysClose(await tf.min(a).data(), -7); + }); + + it('ignores NaNs', async () => { + const a = tf.tensor1d([3, NaN, 2]); + expectArraysEqual(await tf.min(a).data(), 2); + }); + + it('2D', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + expectArraysClose(await tf.min(a).data(), -7); + }); + + it('2D axis=[0,1]', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + expectArraysClose(await tf.min(a, [0, 1]).data(), -7); + }); + + it('2D, axis=0', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const r = tf.min(a, 0); + + expect(r.shape).toEqual([3]); + expectArraysClose(await r.data(), [3, -7, 0]); + }); + + it('2D, axis=0, keepDims', async () => { + const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const r = tf.min(a, 0, true /* keepDims */); + + expect(r.shape).toEqual([1, 3]); + expectArraysClose(await r.data(), [3, -7, 0]); + }); + + it('2D, axis=1 provided as a number', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.min(a, 1); + expectArraysClose(await r.data(), [2, -7]); + }); + + it('2D, axis = -1 provided as a number', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.min(a, -1); + expectArraysClose(await r.data(), [2, -7]); + }); + + it('2D, axis=[1]', async () => { + const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); + const r = tf.min(a, [1]); + expectArraysClose(await r.data(), [2, -7]); + }); + + it('axis permutation does not change input', async () => { + const input = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); + const inputDataBefore = await input.data(); + + tf.min(input, [1, 0]); + + const inputDataAfter = await input.data(); + expectArraysEqual(inputDataBefore, inputDataAfter); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.min({} as tf.Tensor)) + .toThrowError(/Argument 'x' passed to 'min' must be a Tensor/); + }); + + it('accepts a tensor-like object', async () => { + expectArraysClose(await tf.min([3, -1, 0, 100, -7, 2]).data(), -7); + }); + + it('min gradient: Scalar', async () => { + const x = tf.scalar(42); + const dy = tf.scalar(-1); + const gradients = tf.grad(v => tf.min(v))(x, dy); + expectArraysClose(await gradients.data(), -1); + }); + + it('gradient with clones', async () => { + const x = tf.scalar(42); + const dy = tf.scalar(-1); + const gradients = tf.grad(v => tf.min(v.clone()).clone())(x, dy); + expectArraysClose(await gradients.data(), -1); + }); + + it('min gradient: 1D, ties', async () => { + const x = tf.tensor1d([-1, -3, -7, -7]); + const dy = tf.scalar(-1); + const gradients = tf.grad(v => tf.min(v))(x, dy); + expectArraysClose(await gradients.data(), [0, 0, -1, -1]); + }); + + it('min gradient: 2D, axes=-1, keepDims=false', async () => { + const x = tf.tensor2d([[-0, -20, -10], [10, 30, 20]]); + const dy = tf.tensor1d([-1, -1]); + const axis = -1; + const gradients = tf.grad(v => tf.min(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('min gradient: ties, 2D, axes=-1, keepDims=false', async () => { + const x = tf.tensor2d([[0, -20, -20], [10, 30, 10]]); + const dy = tf.tensor1d([-1, -1]); + const axis = -1; + const gradients = tf.grad(v => tf.min(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, -1, -1, 0, -1]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('min gradient: 2D, axes=0, keepDims=false', async () => { + const x = tf.tensor2d([[0, 20, 10], [-10, -30, 20]]); + const dy = tf.tensor1d([-1, -1, -1]); + const axis = 0; + const gradients = tf.grad(v => tf.max(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('min gradient: 2D, axes=-1, keepDims=true', async () => { + const x = tf.tensor2d([[0, -20, -10], [10, 30, 20]]); + const dy = tf.tensor2d([[-1], [-1]]); + const axis = -1; + const keepDims = true; + const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('min gradient: 2D, axes=0, keepDims=true', async () => { + const x = tf.tensor2d([[0, -20, -10], [10, 30, -20]]); + const dy = tf.tensor2d([[-1, -1, -1]]); + const axis = 0; + const keepDims = true; + const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); + expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 3]); + }); + + it('min gradient: 3D, axes=[1, 2], keepDims=false', async () => { + const x = tf.tensor3d([[[0, -20], [-10, -15]], [[10, 30], [20, 15]]]); + const dy = tf.tensor1d([-1, -1]); + const axis = [1, 2]; + const gradients = tf.grad(v => tf.min(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, 0, -1, 0, 0, 0]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('min gradient: ties, 3D, axes=[1, 2], keepDims=false', async () => { + const x = tf.tensor3d([[[0, -20], [-20, -20]], [[10, 30], [10, 15]]]); + const dy = tf.tensor1d([-1, -1]); + const axis = [1, 2]; + const gradients = tf.grad(v => tf.min(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, -1, -1, -1, 0, -1, 0]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('min gradient: 3D, axes=2, keepDims=false', async () => { + const x = tf.tensor3d([[[0, -20], [-10, -15]], [[10, 30], [20, 15]]]); + const dy = tf.tensor2d([[-1, -1], [-1, -1]]); + const axis = 2; + const gradients = tf.grad(v => tf.min(v, axis))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('min gradient: 3D, axes=2, keepDims=true', async () => { + const x = tf.tensor3d([[[0, -20], [-10, -15]], [[10, 30], [20, 15]]]); + const dy = tf.tensor3d([[[-1], [-1]], [[-1], [-1]]]); + const axis = 2; + const keepDims = true; + const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); + expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); + expect(gradients.shape).toEqual([2, 2, 2]); + }); + + it('min gradient: ties, 4D, axes=[1, 2, 3], keepDims=false', async () => { + const x = tf.tensor4d([ + [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]], + [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]] + ]); + const dy = tf.tensor1d([-1, -1]); + const axis = [1, 2, 3]; + const gradients = tf.grad(v => tf.min(v, axis))(x, dy); + expectArraysClose( + await gradients.data(), + [0, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1]); + expect(gradients.shape).toEqual([2, 2, 2, 2]); + }); + + it('min gradient: ties, 4D, axes=[2, 3], keepDims=true', async () => { + const x = tf.tensor4d([ + [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]], + [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]] + ]); + const dy = tf.tensor4d([[[[-1]], [[-2]]], [[[-3]], [[-4]]]]); + const axis = [2, 3]; + const keepDims = true; + const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); + expectArraysClose( + await gradients.data(), + [0, -1, -1, -1, -2, 0, -2, 0, -3, 0, 0, 0, 0, -4, 0, -4]); + expect(gradients.shape).toEqual([2, 2, 2, 2]); + }); + + it('throws error for string tensor', () => { + expect(() => tf.min(['a'])) + .toThrowError(/Argument 'x' passed to 'min' must be numeric tensor/); + }); +}); diff --git a/tfjs-core/src/ops/moments.ts b/tfjs-core/src/ops/moments.ts index dfaa463ba37..4e77c5caee2 100644 --- a/tfjs-core/src/ops/moments.ts +++ b/tfjs-core/src/ops/moments.ts @@ -22,8 +22,8 @@ import {parseAxisParam} from '../util'; import {cast} from './array_ops'; import {expandShapeToKeepDim} from './axis_util'; +import {mean} from './mean'; import {op} from './operation'; -import {mean} from './reduction_ops'; import {reshape} from './reshape'; import {square} from './square'; import {sub} from './sub'; diff --git a/tfjs-core/src/ops/norm.ts b/tfjs-core/src/ops/norm.ts index 42d27e783fb..c9f87121374 100644 --- a/tfjs-core/src/ops/norm.ts +++ b/tfjs-core/src/ops/norm.ts @@ -22,11 +22,12 @@ import {parseAxisParam} from '../util'; import * as axis_util from './axis_util'; import {max} from './max'; +import {min} from './min'; import {op} from './operation'; import {pow} from './pow'; -import {min, sum} from './reduction_ops'; import {reshape} from './reshape'; import {square} from './square'; +import {sum} from './sum'; import {scalar} from './tensor_ops'; import {abs, sqrt} from './unary_ops'; diff --git a/tfjs-core/src/ops/norm_test.ts b/tfjs-core/src/ops/norm_test.ts new file mode 100644 index 00000000000..d4ffaa7e6ef --- /dev/null +++ b/tfjs-core/src/ops/norm_test.ts @@ -0,0 +1,291 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +describeWithFlags('norm', ALL_ENVS, () => { + it('scalar norm', async () => { + const a = tf.scalar(-22.0); + const norm = tf.norm(a); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 22); + }); + + it('vector inf norm', async () => { + const a = tf.tensor1d([1, -2, 3, -4]); + const norm = tf.norm(a, Infinity); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 4); + }); + + it('vector -inf norm', async () => { + const a = tf.tensor1d([1, -2, 3, -4]); + const norm = tf.norm(a, -Infinity); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 1); + }); + + it('vector 1 norm', async () => { + const a = tf.tensor1d([1, -2, 3, -4]); + const norm = tf.norm(a, 1); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 10); + }); + + it('vector euclidean norm', async () => { + const a = tf.tensor1d([1, -2, 3, -4]); + const norm = tf.norm(a, 'euclidean'); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 5.4772); + }); + + it('vector 2-norm', async () => { + const a = tf.tensor1d([1, -2, 3, -4]); + const norm = tf.norm(a, 2); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 5.4772); + }); + + it('vector >2-norm to throw error', () => { + const a = tf.tensor1d([1, -2, 3, -4]); + expect(() => tf.norm(a, 3)).toThrowError(); + }); + + it('matrix inf norm', async () => { + const a = tf.tensor2d([1, 2, -3, 1, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity, [0, 1]); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 4); + }); + + it('matrix -inf norm', async () => { + const a = tf.tensor2d([1, 2, -3, 1, 0, 1], [3, 2]); + const norm = tf.norm(a, -Infinity, [0, 1]); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 1); + }); + + it('matrix 1 norm', async () => { + const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); + const norm = tf.norm(a, 1, [0, 1]); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 5); + }); + + it('matrix euclidean norm', async () => { + const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); + const norm = tf.norm(a, 'euclidean', [0, 1]); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 4.123); + }); + + it('matrix fro norm', async () => { + const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); + const norm = tf.norm(a, 'fro', [0, 1]); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 4.123); + }); + + it('matrix other norm to throw error', () => { + const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); + expect(() => tf.norm(a, 2, [0, 1])).toThrowError(); + }); + + it('propagates NaNs for norm', async () => { + const a = tf.tensor2d([1, 2, 3, NaN, 0, 1], [3, 2]); + const norm = tf.norm(a); + + expect(norm.dtype).toBe('float32'); + expectArraysEqual(await norm.data(), NaN); + }); + + it('axis=null in 2D array norm', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity); + + expect(norm.shape).toEqual([]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3]); + }); + + it('2D array norm with keep dim', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity, null, true /* keepDims */); + + expect(norm.shape).toEqual([1, 1]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3]); + }); + + it('axis=0 in 2D array norm', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity, [0]); + + expect(norm.shape).toEqual([2]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3, 2]); + }); + + it('axis=1 in 2D array norm', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity, [1]); + + expect(norm.dtype).toBe('float32'); + expect(norm.shape).toEqual([3]); + expectArraysClose(await norm.data(), [2, 3, 1]); + }); + + it('axis=1 keepDims in 2D array norm', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity, [1], true); + + expect(norm.dtype).toBe('float32'); + expect(norm.shape).toEqual([3, 1]); + expectArraysClose(await norm.data(), [2, 3, 1]); + }); + + it('2D norm with axis=1 provided as number', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); + const norm = tf.norm(a, Infinity, 1); + + expect(norm.shape).toEqual([2]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3, 1]); + }); + + it('axis=0,1 in 2D array norm', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity, [0, 1]); + + expect(norm.shape).toEqual([]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3]); + }); + + it('axis=0,1 keepDims in 2D array norm', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const norm = tf.norm(a, Infinity, [0, 1], true); + + expect(norm.shape).toEqual([1, 1]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3]); + }); + + it('3D norm axis=0,1, matrix inf norm', async () => { + const a = tf.tensor3d([1, 2, -3, 1, 0, 1], [3, 2, 1]); + const norm = tf.norm(a, Infinity, [0, 1]); + + expect(norm.shape).toEqual([1]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [4]); + }); + + it('axis=0,1 keepDims in 3D array norm', async () => { + const a = tf.tensor3d([1, 2, 3, 0, 0, 1], [3, 2, 1]); + const norm = tf.norm(a, Infinity, [0, 1], true); + + expect(norm.shape).toEqual([1, 1, 1]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3]); + }); + + it('axis=0,1 keepDims in 3D array norm', async () => { + const a = tf.tensor3d([1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1], [3, 2, 2]); + const norm = tf.norm(a, Infinity, [0, 1], true); + + expect(norm.shape).toEqual([1, 1, 2]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [4, 3]); + }); + + it('axis=null in 3D array norm', async () => { + const a = tf.tensor3d([1, 2, 3, 0, 0, 1], [3, 2, 1]); + const norm = tf.norm(a, Infinity); + + expect(norm.shape).toEqual([]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3]); + }); + + it('axis=null in 4D array norm', async () => { + const a = tf.tensor4d([1, 2, 3, 0, 0, 1], [3, 2, 1, 1]); + const norm = tf.norm(a, Infinity); + + expect(norm.shape).toEqual([]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [3]); + }); + + it('axis=0,1 in 4D array norm', async () => { + const a = tf.tensor4d( + [ + 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1, + 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1 + ], + [3, 2, 2, 2]); + const norm = tf.norm(a, Infinity, [0, 1]); + + expect(norm.shape).toEqual([2, 2]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [4, 3, 4, 3]); + }); + + it('axis=0,1 in 4D array norm', async () => { + const a = tf.tensor4d( + [ + 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1, + 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1 + ], + [3, 2, 2, 2]); + const norm = tf.norm(a, Infinity, [0, 1], true); + + expect(norm.shape).toEqual([1, 1, 2, 2]); + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), [4, 3, 4, 3]); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.norm({} as tf.Tensor)) + .toThrowError(/Argument 'x' passed to 'norm' must be a Tensor/); + }); + + it('accepts a tensor-like object', async () => { + const norm = tf.norm([1, -2, 3, -4], 1); + + expect(norm.dtype).toBe('float32'); + expectArraysClose(await norm.data(), 10); + }); + + it('throws error for string tensors', () => { + expect(() => tf.norm([ + 'a', 'b' + ])).toThrowError(/Argument 'x' passed to 'norm' must be numeric tensor/); + }); +}); diff --git a/tfjs-core/src/ops/ops.ts b/tfjs-core/src/ops/ops.ts index 9ebebddbad4..5cce7903ad0 100644 --- a/tfjs-core/src/ops/ops.ts +++ b/tfjs-core/src/ops/ops.ts @@ -20,6 +20,8 @@ export {add} from './add'; export {addN} from './add_n'; export {all} from './all'; export {any} from './any'; +export {argMax} from './arg_max'; +export {argMin} from './arg_min'; export {atan2} from './atan2'; export {avgPool} from './avg_pool'; export {avgPool3d} from './avg_pool_3d'; @@ -74,6 +76,8 @@ export {maxPool} from './max_pool'; export {maxPool3d} from './max_pool_3d'; export {maxPoolWithArgmax} from './max_pool_with_argmax'; export {maximum} from './maximum'; +export {mean} from './mean'; +export {min} from './min'; export {minimum} from './minimum'; export {mod} from './mod'; export {moments} from './moments'; @@ -114,6 +118,7 @@ export {squaredDifference} from './squared_difference'; export {squeeze} from './squeeze'; export {stack} from './stack'; export {sub} from './sub'; +export {sum} from './sum'; export {tile} from './tile'; export {truncatedNormal} from './truncated_normal'; export {unstack} from './unstack'; @@ -123,7 +128,6 @@ export {whereAsync} from './where_async'; export * from './boolean_mask'; export * from './slice'; export * from './unary_ops'; -export * from './reduction_ops'; export * from './compare'; export * from './binary_ops'; export * from './array_ops'; diff --git a/tfjs-core/src/ops/reduction_ops.ts b/tfjs-core/src/ops/reduction_ops.ts deleted file mode 100644 index 8c1aea8f9f8..00000000000 --- a/tfjs-core/src/ops/reduction_ops.ts +++ /dev/null @@ -1,328 +0,0 @@ -/** - * @license - * Copyright 2018 Google LLC. All Rights Reserved. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ============================================================================= - */ - -import {ENGINE} from '../engine'; -import {customGrad} from '../gradients'; -import {Tensor} from '../tensor'; -import {convertToTensor} from '../tensor_util_env'; -import {TensorLike} from '../types'; -import * as util from '../util'; - -import * as axis_util from './axis_util'; -import {op} from './operation'; -import {gradForMinAndMax} from './reduction_ops_util'; -import {ones, scalar, zerosLike} from './tensor_ops'; - -/** - * Computes the sum of elements across dimensions of a `tf.Tensor`. - * - * Reduces the input along the dimensions given in `axes`. Unless `keepDims` - * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in - * `axes`. If `keepDims` is true, the reduced dimensions are retained with - * length 1. If axes has no entries, all dimensions are reduced, and a - * `tf.Tensor` with a single element is returned. - * - * ```js - * const x = tf.tensor1d([1, 2, 3]); - * - * x.sum().print(); // or tf.sum(x) - * ``` - * - * ```js - * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); - * - * const axis = 1; - * x.sum(axis).print(); // or tf.sum(x, axis) - * ``` - * - * @param x The input tensor to compute the sum over. If the dtype is `bool` - * it will be converted to `int32` and the output dtype will be `int32`. - * @param axis The dimension(s) to reduce. By default it reduces - * all dimensions. - * @param keepDims If true, retains reduced dimensions with size 1. - */ -/** @doc {heading: 'Operations', subheading: 'Reduction'} */ -function sum_( - x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T { - let $x = convertToTensor(x, 'x', 'sum'); - - if ($x.dtype === 'bool') { - $x = $x.toInt(); - } - const axes = util.parseAxisParam(axis, $x.shape); - - // Use a custom gradient to bypass 2 gradient backprops since sum is used - // extremely often. - const customOp = customGrad((x: Tensor) => { - const permutation = axis_util.getAxesPermutation(axes, x.rank); - let reductionAxes = axes; - let permutedX = x; - if (permutation != null) { - permutedX = x.transpose(permutation); - reductionAxes = axis_util.getInnerMostAxes(reductionAxes.length, x.rank); - } - - const gradFunc = (dy: Tensor) => { - const expandedDyShape = x.shape.slice(); - axes.forEach(axis => { - expandedDyShape[axis] = 1; - }); - const expandedDy = dy.reshape(expandedDyShape); - const derX = expandedDy.mul(ones(x.shape, 'float32')); - return derX; - }; - - const gradInputs = (dy: Tensor) => { - return {x: () => gradFunc(dy)}; - }; - - const attrs = {axes: reductionAxes}; - let value = ENGINE.runKernelFunc( - backend => backend.sum(permutedX, reductionAxes), {x: permutedX}, - gradInputs, 'Sum', attrs); - - if (keepDims) { - const newShape = axis_util.expandShapeToKeepDim(value.shape, axes); - value = value.reshape(newShape); - } - - return {value, gradFunc}; - }); - - return customOp($x) as T; -} - -/** - * Computes the mean of elements across dimensions of a `tf.Tensor`. - * - * Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is - * true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`. - * If `keepDims` is true, the reduced dimensions are retained with length 1. - * If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with - * a single element is returned. - * - * ```js - * const x = tf.tensor1d([1, 2, 3]); - * - * x.mean().print(); // or tf.mean(a) - * ``` - * - * ```js - * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); - * - * const axis = 1; - * x.mean(axis).print(); // or tf.mean(x, axis) - * ``` - * - * @param x The input tensor. - * @param axis The dimension(s) to reduce. By default it reduces - * all dimensions. - * @param keepDims If true, retains reduced dimensions with size 1. - */ -/** @doc {heading: 'Operations', subheading: 'Reduction'} */ -function mean_( - x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T { - const $x = convertToTensor(x, 'x', 'mean'); - - const axes = util.parseAxisParam(axis, $x.shape); - const shapes = axis_util.computeOutAndReduceShapes($x.shape, axes); - const reduceShape = shapes[1]; - const reduceSize = util.sizeFromShape(reduceShape); - - // Use a custom gradient to bypass 2 gradient backprops since mean is used - // extremely often. - const customOp = customGrad((x: Tensor) => { - const reduceSizeScalar = scalar(reduceSize); - // Cast if needed. - const xReduce = - reduceSizeScalar.dtype === x.dtype ? x : x.cast(reduceSizeScalar.dtype); - const res = xReduce.div(reduceSizeScalar); - const value = res.sum(axis, keepDims); - - const gradFunc = (dy: Tensor) => { - const expandedDyShape = x.shape.slice(); - axes.forEach(axis => { - expandedDyShape[axis] = 1; - }); - const expandedDy = dy.reshape(expandedDyShape); - const derX = expandedDy.mul(ones(x.shape, 'float32')).div(reduceSize); - return derX; - }; - return {value, gradFunc}; - }); - - return customOp($x) as T; -} - -/** - * Computes the minimum value from the input. - * - * Reduces the input along the dimensions given in `axes`. Unless `keepDims` - * is true, the rank of the array is reduced by 1 for each entry in `axes`. - * If `keepDims` is true, the reduced dimensions are retained with length 1. - * If `axes` has no entries, all dimensions are reduced, and an array with a - * single element is returned. - * - * ```js - * const x = tf.tensor1d([1, 2, 3]); - * - * x.min().print(); // or tf.min(x) - * ``` - * - * ```js - * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); - * - * const axis = 1; - * x.min(axis).print(); // or tf.min(x, axis) - * ``` - * - * @param x The input Tensor. - * @param axis The dimension(s) to reduce. By default it reduces - * all dimensions. - * @param keepDims If true, retains reduced dimensions with size 1. - */ -/** @doc {heading: 'Operations', subheading: 'Reduction'} */ -function min_( - x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T { - let $x = convertToTensor(x, 'x', 'min'); - const xOrig = $x; - - const origAxes = util.parseAxisParam(axis, $x.shape); - let axes = origAxes; - const permutedAxes = axis_util.getAxesPermutation(axes, $x.rank); - if (permutedAxes != null) { - $x = $x.transpose(permutedAxes); - axes = axis_util.getInnerMostAxes(axes.length, $x.rank); - } - - const grad = (dy: T, saved: Tensor[]) => - gradForMinAndMax(dy, saved[1], saved[0], origAxes, permutedAxes); - - const inputsToSave = [$x]; - const outputsToSave: boolean[] = [true]; - let res = ENGINE.runKernelFunc((backend, save) => { - const y = backend.min($x, axes); - save([xOrig, y]); - return y as T; - }, {x: $x}, grad, 'Min', {axes}, inputsToSave, outputsToSave); - if (keepDims) { - const newShape = axis_util.expandShapeToKeepDim(res.shape, origAxes); - res = res.reshape(newShape); - } - return res; -} - -/** - * Returns the indices of the minimum values along an `axis`. - * - * The result has the same shape as `input` with the dimension along `axis` - * removed. - * - * ```js - * const x = tf.tensor1d([1, 2, 3]); - * - * x.argMin().print(); // or tf.argMin(x) - * ``` - * - * ```js - * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]); - * - * const axis = 1; - * x.argMin(axis).print(); // or tf.argMin(x, axis) - * ``` - * - * @param x The input tensor. - * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension). - * - */ -/** @doc {heading: 'Operations', subheading: 'Reduction'} */ -function argMin_(x: Tensor|TensorLike, axis = 0): T { - let $x = convertToTensor(x, 'x', 'argMin'); - - if (axis == null) { - axis = 0; - } - let axes = util.parseAxisParam(axis, $x.shape); - const permutedAxes = axis_util.getAxesPermutation(axes, $x.rank); - if (permutedAxes != null) { - $x = $x.transpose(permutedAxes); - axes = axis_util.getInnerMostAxes(axes.length, $x.rank); - } - const grad = (dy: T, saved: Tensor[]) => { - const [$x] = saved; - return {$x: () => zerosLike($x)}; - }; - return ENGINE.runKernelFunc((backend, save) => { - const res = backend.argMin($x, axes[0]); - save([$x]); - return res; - }, {$x}, grad) as T; -} - -/** - * Returns the indices of the maximum values along an `axis`. - * - * The result has the same shape as `input` with the dimension along `axis` - * removed. - * - * ```js - * const x = tf.tensor1d([1, 2, 3]); - * - * x.argMax().print(); // or tf.argMax(x) - * ``` - * - * ```js - * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]); - * - * const axis = 1; - * x.argMax(axis).print(); // or tf.argMax(x, axis) - * ``` - * - * @param x The input tensor. - * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension). - */ -/** @doc {heading: 'Operations', subheading: 'Reduction'} */ -function argMax_(x: Tensor|TensorLike, axis = 0): T { - let $x = convertToTensor(x, 'x', 'argMax'); - - if (axis == null) { - axis = 0; - } - let axes = util.parseAxisParam(axis, $x.shape); - const permutedAxes = axis_util.getAxesPermutation(axes, $x.rank); - if (permutedAxes != null) { - $x = $x.transpose(permutedAxes); - axes = axis_util.getInnerMostAxes(axes.length, $x.rank); - } - const grad = (dy: T, saved: Tensor[]) => { - const [$x] = saved; - return {x: () => zerosLike($x)}; - }; - const attrs = {axis: axes[0]}; - const inputsToSave = [$x]; - return ENGINE.runKernelFunc((backend, save) => { - const res = backend.argMax($x, axes[0]); - save([$x]); - return res; - }, {x: $x}, grad, 'ArgMax', attrs, inputsToSave) as T; -} - -export const argMax = op({argMax_}); -export const argMin = op({argMin_}); -export const mean = op({mean_}); -export const min = op({min_}); -export const sum = op({sum_}); diff --git a/tfjs-core/src/ops/reduction_ops_test.ts b/tfjs-core/src/ops/reduction_ops_test.ts deleted file mode 100644 index bdd5c347b8f..00000000000 --- a/tfjs-core/src/ops/reduction_ops_test.ts +++ /dev/null @@ -1,1301 +0,0 @@ -/** - * @license - * Copyright 2017 Google LLC. All Rights Reserved. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ============================================================================= - */ - -import * as tf from '../index'; -import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; -import {expectArraysClose, expectArraysEqual} from '../test_util'; - -import * as reduce_util from './reduce_util'; - -describeWithFlags('min', ALL_ENVS, () => { - it('Tensor1D', async () => { - const a = tf.tensor1d([3, -1, 0, 100, -7, 2]); - expectArraysClose(await tf.min(a).data(), -7); - }); - - it('ignores NaNs', async () => { - const a = tf.tensor1d([3, NaN, 2]); - expectArraysEqual(await tf.min(a).data(), 2); - }); - - it('2D', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - expectArraysClose(await tf.min(a).data(), -7); - }); - - it('2D axis=[0,1]', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - expectArraysClose(await tf.min(a, [0, 1]).data(), -7); - }); - - it('2D, axis=0', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - const r = tf.min(a, 0); - - expect(r.shape).toEqual([3]); - expectArraysClose(await r.data(), [3, -7, 0]); - }); - - it('2D, axis=0, keepDims', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - const r = tf.min(a, 0, true /* keepDims */); - - expect(r.shape).toEqual([1, 3]); - expectArraysClose(await r.data(), [3, -7, 0]); - }); - - it('2D, axis=1 provided as a number', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.min(a, 1); - expectArraysClose(await r.data(), [2, -7]); - }); - - it('2D, axis = -1 provided as a number', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.min(a, -1); - expectArraysClose(await r.data(), [2, -7]); - }); - - it('2D, axis=[1]', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.min(a, [1]); - expectArraysClose(await r.data(), [2, -7]); - }); - - it('throws when passed a non-tensor', () => { - expect(() => tf.min({} as tf.Tensor)) - .toThrowError(/Argument 'x' passed to 'min' must be a Tensor/); - }); - - it('accepts a tensor-like object', async () => { - expectArraysClose(await tf.min([3, -1, 0, 100, -7, 2]).data(), -7); - }); - - it('min gradient: Scalar', async () => { - const x = tf.scalar(42); - const dy = tf.scalar(-1); - const gradients = tf.grad(v => tf.min(v))(x, dy); - expectArraysClose(await gradients.data(), -1); - }); - - it('gradient with clones', async () => { - const x = tf.scalar(42); - const dy = tf.scalar(-1); - const gradients = tf.grad(v => tf.min(v.clone()).clone())(x, dy); - expectArraysClose(await gradients.data(), -1); - }); - - it('min gradient: 1D, ties', async () => { - const x = tf.tensor1d([-1, -3, -7, -7]); - const dy = tf.scalar(-1); - const gradients = tf.grad(v => tf.min(v))(x, dy); - expectArraysClose(await gradients.data(), [0, 0, -1, -1]); - }); - - it('min gradient: 2D, axes=-1, keepDims=false', async () => { - const x = tf.tensor2d([[-0, -20, -10], [10, 30, 20]]); - const dy = tf.tensor1d([-1, -1]); - const axis = -1; - const gradients = tf.grad(v => tf.min(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('min gradient: ties, 2D, axes=-1, keepDims=false', async () => { - const x = tf.tensor2d([[0, -20, -20], [10, 30, 10]]); - const dy = tf.tensor1d([-1, -1]); - const axis = -1; - const gradients = tf.grad(v => tf.min(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, -1, -1, 0, -1]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('min gradient: 2D, axes=0, keepDims=false', async () => { - const x = tf.tensor2d([[0, 20, 10], [-10, -30, 20]]); - const dy = tf.tensor1d([-1, -1, -1]); - const axis = 0; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('min gradient: 2D, axes=-1, keepDims=true', async () => { - const x = tf.tensor2d([[0, -20, -10], [10, 30, 20]]); - const dy = tf.tensor2d([[-1], [-1]]); - const axis = -1; - const keepDims = true; - const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('min gradient: 2D, axes=0, keepDims=true', async () => { - const x = tf.tensor2d([[0, -20, -10], [10, 30, -20]]); - const dy = tf.tensor2d([[-1, -1, -1]]); - const axis = 0; - const keepDims = true; - const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); - expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('min gradient: 3D, axes=[1, 2], keepDims=false', async () => { - const x = tf.tensor3d([[[0, -20], [-10, -15]], [[10, 30], [20, 15]]]); - const dy = tf.tensor1d([-1, -1]); - const axis = [1, 2]; - const gradients = tf.grad(v => tf.min(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, 0, -1, 0, 0, 0]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('min gradient: ties, 3D, axes=[1, 2], keepDims=false', async () => { - const x = tf.tensor3d([[[0, -20], [-20, -20]], [[10, 30], [10, 15]]]); - const dy = tf.tensor1d([-1, -1]); - const axis = [1, 2]; - const gradients = tf.grad(v => tf.min(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, -1, -1, -1, 0, -1, 0]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('min gradient: 3D, axes=2, keepDims=false', async () => { - const x = tf.tensor3d([[[0, -20], [-10, -15]], [[10, 30], [20, 15]]]); - const dy = tf.tensor2d([[-1, -1], [-1, -1]]); - const axis = 2; - const gradients = tf.grad(v => tf.min(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('min gradient: 3D, axes=2, keepDims=true', async () => { - const x = tf.tensor3d([[[0, -20], [-10, -15]], [[10, 30], [20, 15]]]); - const dy = tf.tensor3d([[[-1], [-1]], [[-1], [-1]]]); - const axis = 2; - const keepDims = true; - const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('min gradient: ties, 4D, axes=[1, 2, 3], keepDims=false', async () => { - const x = tf.tensor4d([ - [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]], - [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]] - ]); - const dy = tf.tensor1d([-1, -1]); - const axis = [1, 2, 3]; - const gradients = tf.grad(v => tf.min(v, axis))(x, dy); - expectArraysClose( - await gradients.data(), - [0, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1]); - expect(gradients.shape).toEqual([2, 2, 2, 2]); - }); - - it('min gradient: ties, 4D, axes=[2, 3], keepDims=true', async () => { - const x = tf.tensor4d([ - [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]], - [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]] - ]); - const dy = tf.tensor4d([[[[-1]], [[-2]]], [[[-3]], [[-4]]]]); - const axis = [2, 3]; - const keepDims = true; - const gradients = tf.grad(v => tf.min(v, axis, keepDims))(x, dy); - expectArraysClose( - await gradients.data(), - [0, -1, -1, -1, -2, 0, -2, 0, -3, 0, 0, 0, 0, -4, 0, -4]); - expect(gradients.shape).toEqual([2, 2, 2, 2]); - }); - - it('throws error for string tensor', () => { - expect(() => tf.min(['a'])) - .toThrowError(/Argument 'x' passed to 'min' must be numeric tensor/); - }); -}); - -describeWithFlags('max', ALL_ENVS, () => { - it('with one element dominating', async () => { - const a = tf.tensor1d([3, -1, 0, 100, -7, 2]); - const r = tf.max(a); - expectArraysClose(await r.data(), 100); - }); - - it('with all elements being the same', async () => { - const a = tf.tensor1d([3, 3, 3]); - const r = tf.max(a); - expectArraysClose(await r.data(), 3); - }); - - it('ignores NaNs', async () => { - expectArraysClose(await tf.max([3, NaN, 2]).data(), 3); - }); - - it('2D', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - expectArraysClose(await tf.max(a).data(), 100); - }); - - it('2D axis=[0,1]', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - expectArraysClose(await tf.max(a, [0, 1]).data(), 100); - }); - - it('2D, axis=0', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - const r = tf.max(a, [0]); - - expect(r.shape).toEqual([3]); - expectArraysClose(await r.data(), [100, -1, 2]); - }); - - it('2D, axis=0, keepDims', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - const r = tf.max(a, [0], true /* keepDims */); - - expect(r.shape).toEqual([1, 3]); - expectArraysClose(await r.data(), [100, -1, 2]); - }); - - it('2D, axis=1 provided as a number', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.max(a, 1); - expectArraysClose(await r.data(), [5, 100]); - }); - - it('2D, axis = -1 provided as a number', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.max(a, -1); - expectArraysClose(await r.data(), [5, 100]); - }); - - it('2D, axis=[1]', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.max(a, [1]); - expectArraysClose(await r.data(), [5, 100]); - }); - - it('6D, axis=[5]', async () => { - const a = tf.range(0, 64).reshape([2, 2, 2, 2, 2, 2]); - const r = tf.max(a, [5]); - const expectedResult = [ - 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, - 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63 - ]; - expectArraysClose(await r.data(), expectedResult); - }); - - it('throws when passed a non-tensor', () => { - expect(() => tf.max({} as tf.Tensor)) - .toThrowError(/Argument 'x' passed to 'max' must be a Tensor/); - }); - - it('accepts a tensor-like object', async () => { - const r = tf.max([3, -1, 0, 100, -7, 2]); - expectArraysClose(await r.data(), 100); - }); - - it('max gradient: Scalar', async () => { - const x = tf.scalar(42); - const dy = tf.scalar(-1); - const gradients = tf.grad(v => tf.max(v))(x, dy); - expectArraysClose(await gradients.data(), [-1]); - }); - - it('gradient with clones', async () => { - const x = tf.scalar(42); - const dy = tf.scalar(-1); - const gradients = tf.grad(v => tf.max(v.clone()).clone())(x, dy); - expectArraysClose(await gradients.data(), [-1]); - }); - - it('max gradient: 1D, ties', async () => { - const x = tf.tensor1d([1, 3, 7, 7]); - const dy = tf.scalar(-1); - const gradients = tf.grad(v => tf.max(v))(x, dy); - expectArraysClose(await gradients.data(), [0, 0, -1, -1]); - }); - - it('max gradient: 2D, axes=-1, keepDims=false', async () => { - const x = tf.tensor2d([[0, 20, 10], [-10, -30, -20]]); - const dy = tf.tensor1d([-1, -1]); - const axis = -1; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('max gradient: ties, 2D, axes=-1, keepDims=false', async () => { - const x = tf.tensor2d([[0, 20, 20], [-10, -30, -10]]); - const dy = tf.tensor1d([-1, -1]); - const axis = -1; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, -1, -1, 0, -1]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('max gradient: 2D, axes=0, keepDims=false', async () => { - const x = tf.tensor2d([[0, 20, 10], [-10, -30, 20]]); - const dy = tf.tensor1d([-1, -1, -1]); - const axis = 0; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('max gradient: 2D, axes=-1, keepDims=true', async () => { - const x = tf.tensor2d([[0, 20, 10], [-10, -30, -20]]); - const dy = tf.tensor2d([[-1], [-1]]); - const axis = -1; - const keepDims = true; - const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, 0, 0]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('max gradient: 2D, axes=0, keepDims=true', async () => { - const x = tf.tensor2d([[0, 20, 10], [-10, -30, 20]]); - const dy = tf.tensor2d([[-1, -1, -1]]); - const axis = 0; - const keepDims = true; - const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); - expectArraysClose(await gradients.data(), [-1, -1, 0, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 3]); - }); - - it('max gradient: 3D, axes=[1, 2], keepDims=false', async () => { - const x = tf.tensor3d([[[0, 20], [10, 15]], [[-10, -30], [-20, -15]]]); - const dy = tf.tensor1d([-1, -1]); - const axis = [1, 2]; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, 0, -1, 0, 0, 0]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('max gradient: ties, 3D, axes=[1, 2], keepDims=false', async () => { - const x = tf.tensor3d([[[0, 20], [20, 20]], [[-10, -30], [-10, -15]]]); - const dy = tf.tensor1d([-1, -1]); - const axis = [1, 2]; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, -1, -1, -1, 0, -1, 0]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('max gradient: 3D, axes=2, keepDims=false', async () => { - const x = tf.tensor3d([[[0, 20], [10, 15]], [[-10, -30], [-20, -15]]]); - const dy = tf.tensor2d([[-1, -1], [-1, -1]]); - const axis = 2; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('max gradient: 3D, axes=2, keepDims=true', async () => { - const x = tf.tensor3d([[[0, 20], [10, 15]], [[-10, -30], [-20, -15]]]); - const dy = tf.tensor3d([[[-1], [-1]], [[-1], [-1]]]); - const axis = 2; - const keepDims = true; - const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); - expectArraysClose(await gradients.data(), [0, -1, 0, -1, -1, 0, 0, -1]); - expect(gradients.shape).toEqual([2, 2, 2]); - }); - - it('max gradient: ties, 4D, axes=[1, 2, 3], keepDims=false', async () => { - const x = tf.tensor4d([ - [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]], - [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]] - ]); - const dy = tf.tensor1d([-1, -1]); - const axis = [1, 2, 3]; - const gradients = tf.grad(v => tf.max(v, axis))(x, dy); - expectArraysClose( - await gradients.data(), - [0, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1]); - expect(gradients.shape).toEqual([2, 2, 2, 2]); - }); - - it('max gradient: ties, 4D, axes=[2, 3], keepDims=true', async () => { - const x = tf.tensor4d([ - [[[0, 20], [20, 20]], [[-10, -30], [-10, -30]]], - [[[0, -20], [-20, -20]], [[10, 30], [10, 30]]] - ]); - const dy = tf.tensor4d([[[[-1]], [[-2]]], [[[-3]], [[-4]]]]); - const axis = [2, 3]; - const keepDims = true; - const gradients = tf.grad(v => tf.max(v, axis, keepDims))(x, dy); - expectArraysClose( - await gradients.data(), - [0, -1, -1, -1, -2, 0, -2, 0, -3, 0, 0, 0, 0, -4, 0, -4]); - expect(gradients.shape).toEqual([2, 2, 2, 2]); - }); - - it('throws error for string tensor', () => { - expect(() => tf.max(['a'])) - .toThrowError(/Argument 'x' passed to 'max' must be numeric tensor/); - }); -}); - -describeWithFlags('argmax', ALL_ENVS, () => { - it('Tensor1D', async () => { - const a = tf.tensor1d([1, 0, 3, 2]); - const result = tf.argMax(a); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), 2); - }); - - it('one value', async () => { - const a = tf.tensor1d([10]); - const result = tf.argMax(a); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), 0); - }); - - it('N > than parallelization threshold', async () => { - const n = reduce_util.PARALLELIZE_THRESHOLD * 2; - const values = new Float32Array(n); - for (let i = 0; i < n; i++) { - values[i] = i; - } - const a = tf.tensor1d(values); - const result = tf.argMax(a); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), n - 1); - }); - - it('3D, N > than parallelization threshold', async () => { - const n = reduce_util.PARALLELIZE_THRESHOLD * 2; - const values = new Float32Array(n); - for (let i = 0; i < n; i++) { - values[i] = i; - } - const a = tf.tensor3d(values, [1, 1, n]); - const result = tf.argMax(a, -1); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), n - 1); - }); - - it('max index corresponds to start of a non-initial window', async () => { - const n = reduce_util.PARALLELIZE_THRESHOLD * 2; - const windowSize = reduce_util.computeOptimalWindowSize(n); - const values = new Float32Array(n); - const index = windowSize * 2; - values[index] = 1; - const a = tf.tensor1d(values); - const result = tf.argMax(a); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), index); - }); - - it('5D, max index corresponds to start of a non-initial window', async () => { - const n = reduce_util.PARALLELIZE_THRESHOLD * 2; - const windowSize = reduce_util.computeOptimalWindowSize(n); - const values = new Float32Array(n); - const index = windowSize * 2; - values[index] = 1; - const a = tf.tensor5d(values, [1, 1, 1, 1, n]); - const result = tf.argMax(a, -1); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), index); - }); - - it('ignores NaNs', async () => { - const a = tf.tensor1d([0, 3, 5, NaN, 3]); - const res = tf.argMax(a); - expect(res.dtype).toBe('int32'); - expectArraysEqual(await res.data(), 2); - }); - - it('2D, no axis specified', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - expectArraysEqual(await tf.argMax(a).data(), [1, 0, 1]); - }); - - it('4D, no axis specified', async () => { - const a = tf.tensor4d([3, -1, 0, 100, -7, 2], [2, 1, 1, 3]); - expectArraysEqual(await tf.argMax(a).data(), [1, 0, 1]); - }); - - it('2D, axis=0', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - const r = tf.argMax(a, 0); - - expect(r.shape).toEqual([3]); - expect(r.dtype).toBe('int32'); - expectArraysEqual(await r.data(), [1, 0, 1]); - }); - - it('6D, axis=0', async () => { - const a = tf.tensor6d([3, -1, 0, 100, -7, 2], [2, 1, 1, 1, 1, 3]); - const r = tf.argMax(a, 0); - - expect(r.shape).toEqual([1, 1, 1, 1, 3]); - expect(r.dtype).toBe('int32'); - expectArraysEqual(await r.data(), [1, 0, 1]); - }); - - it('2D, axis=1', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.argMax(a, 1); - expect(r.dtype).toBe('int32'); - expectArraysEqual(await r.data(), [2, 0]); - }); - - it('2D, axis = -1', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const r = tf.argMax(a, -1); - expect(r.dtype).toBe('int32'); - expectArraysEqual(await r.data(), [2, 0]); - }); - - it('throws when passed a non-tensor', () => { - expect(() => tf.argMax({} as tf.Tensor)) - .toThrowError(/Argument 'x' passed to 'argMax' must be a Tensor/); - }); - - it('accepts a tensor-like object', async () => { - const result = tf.argMax([1, 0, 3, 2]); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), 2); - }); - - it('accepts tensor with bool values', async () => { - const t = tf.tensor1d([0, 1], 'bool'); - const result = tf.argMax(t); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), 1); - }); - - it('has gradient', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const dy = tf.ones([3], 'float32'); - const da = tf.grad((x: tf.Tensor2D) => tf.argMax(x))(a, dy); - - expect(da.dtype).toBe('float32'); - expect(da.shape).toEqual([2, 3]); - expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); - }); - - it('gradient with clones', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const dy = tf.ones([3], 'float32'); - const da = tf.grad((x: tf.Tensor2D) => tf.argMax(x.clone()).clone())(a, dy); - - expect(da.dtype).toBe('float32'); - expect(da.shape).toEqual([2, 3]); - expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); - }); - - it('throws error for string tensor', () => { - expect(() => tf.argMax(['a'])) - .toThrowError(/Argument 'x' passed to 'argMax' must be numeric tensor/); - }); -}); - -describeWithFlags('argmin', ALL_ENVS, () => { - it('Tensor1D', async () => { - const a = tf.tensor1d([1, 0, 3, 2]); - const result = tf.argMin(a); - expectArraysEqual(await result.data(), 1); - }); - - it('one value', async () => { - const a = tf.tensor1d([10]); - const result = tf.argMin(a); - expectArraysEqual(await result.data(), 0); - }); - - it('N > than parallelization threshold', async () => { - const n = reduce_util.PARALLELIZE_THRESHOLD * 2; - const values = new Float32Array(n); - for (let i = 0; i < n; i++) { - values[i] = n - i; - } - const a = tf.tensor1d(values); - const result = tf.argMin(a); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), n - 1); - }); - - it('4D, N > than parallelization threshold', async () => { - const n = reduce_util.PARALLELIZE_THRESHOLD * 2; - const values = new Float32Array(n); - for (let i = 0; i < n; i++) { - values[i] = n - i; - } - const a = tf.tensor4d(values, [1, 1, 1, n]); - const result = tf.argMin(a, -1); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), n - 1); - }); - - it('min index corresponds to start of a non-initial window', async () => { - const n = reduce_util.PARALLELIZE_THRESHOLD * 2; - const windowSize = reduce_util.computeOptimalWindowSize(n); - const values = new Float32Array(n); - const index = windowSize * 2; - values[index] = -1; - const a = tf.tensor1d(values); - const result = tf.argMin(a); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), index); - }); - - it('ignores NaNs', async () => { - const a = tf.tensor1d([5, 0, NaN, -1, 3]); - const res = tf.argMin(a); - expectArraysEqual(await res.data(), 3); - }); - - it('3D, ignores NaNs', async () => { - const a = tf.tensor3d([5, 0, NaN, -1, 3], [1, 1, 5]); - const res = tf.argMin(a, -1); - expectArraysEqual(await res.data(), 3); - }); - - it('2D, no axis specified', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - expectArraysEqual(await tf.argMin(a).data(), [0, 1, 0]); - }); - - it('2D, axis=0', async () => { - const a = tf.tensor2d([3, -1, 0, 100, -7, 2], [2, 3]); - const r = tf.argMin(a, 0); - - expect(r.shape).toEqual([3]); - expect(r.dtype).toBe('int32'); - expectArraysEqual(await r.data(), [0, 1, 0]); - }); - - it('2D, axis=1', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, -8], [2, 3]); - const r = tf.argMin(a, 1); - expectArraysEqual(await r.data(), [1, 2]); - }); - - it('2D, axis = -1', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, -8], [2, 3]); - const r = tf.argMin(a, -1); - expectArraysEqual(await r.data(), [1, 2]); - }); - - it('throws when passed a non-tensor', () => { - expect(() => tf.argMin({} as tf.Tensor)) - .toThrowError(/Argument 'x' passed to 'argMin' must be a Tensor/); - }); - - it('accepts a tensor-like object', async () => { - const result = tf.argMin([1, 0, 3, 2]); - expectArraysEqual(await result.data(), 1); - }); - - it('accepts tensor with bool values', async () => { - const t = tf.tensor1d([0, 1], 'bool'); - const result = tf.argMin(t); - expect(result.dtype).toBe('int32'); - expectArraysEqual(await result.data(), 0); - }); - - it('has gradient', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const dy = tf.ones([3], 'float32'); - const da = tf.grad((x: tf.Tensor2D) => tf.argMin(x))(a, dy); - - expect(da.dtype).toBe('float32'); - expect(da.shape).toEqual([2, 3]); - expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); - }); - - it('gradient with clones', async () => { - const a = tf.tensor2d([3, 2, 5, 100, -7, 2], [2, 3]); - const dy = tf.ones([3], 'float32'); - const da = tf.grad((x: tf.Tensor2D) => tf.argMin(x.clone()).clone())(a, dy); - - expect(da.dtype).toBe('float32'); - expect(da.shape).toEqual([2, 3]); - expectArraysClose(await da.data(), [0, 0, 0, 0, 0, 0]); - }); - - it('throws error for string tensor', () => { - expect(() => tf.argMin(['a'])) - .toThrowError(/Argument 'x' passed to 'argMin' must be numeric tensor/); - }); -}); - -describeWithFlags('sum', ALL_ENVS, () => { - it('basic', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const result = tf.sum(a); - expectArraysClose(await result.data(), 7); - }); - - it('propagates NaNs', async () => { - const a = tf.tensor2d([1, 2, 3, NaN, 0, 1], [3, 2]); - expectArraysEqual(await tf.sum(a).data(), NaN); - }); - - it('sum over dtype int32', async () => { - const a = tf.tensor1d([1, 5, 7, 3], 'int32'); - const sum = tf.sum(a); - expectArraysEqual(await sum.data(), 16); - }); - - it('sum over dtype bool', async () => { - const a = tf.tensor1d([true, false, false, true, true], 'bool'); - const sum = tf.sum(a); - expectArraysEqual(await sum.data(), 3); - }); - - it('sums all values in 2D array with keep dim', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.sum(a, null, true /* keepDims */); - - expect(res.shape).toEqual([1, 1]); - expectArraysClose(await res.data(), [7]); - }); - - it('sums across axis=0 in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.sum(a, [0]); - - expect(res.shape).toEqual([2]); - expectArraysClose(await res.data(), [4, 3]); - }); - - it('sums across axis=0 in 2D array, keepDims', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.sum(a, [0], true /* keepDims */); - - expect(res.shape).toEqual([1, 2]); - expectArraysClose(await res.data(), [4, 3]); - }); - - it('sums across axis=1 in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.sum(a, [1]); - - expect(res.shape).toEqual([3]); - expectArraysClose(await res.data(), [3, 3, 1]); - }); - - it('2D, axis=1 provided as number', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); - const res = tf.sum(a, 1); - - expect(res.shape).toEqual([2]); - expectArraysClose(await res.data(), [6, 1]); - }); - - it('2D, axis = -1 provided as number', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); - const res = tf.sum(a, -1); - - expect(res.shape).toEqual([2]); - expectArraysClose(await res.data(), [6, 1]); - }); - - it('sums across axis=0,1 in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.sum(a, [0, 1]); - - expect(res.shape).toEqual([]); - expectArraysClose(await res.data(), [7]); - }); - - it('2D, axis=[-1,-2] in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.sum(a, [-1, -2]); - - expect(res.shape).toEqual([]); - expectArraysClose(await res.data(), [7]); - }); - - it('gradients: sum(2d)', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const dy = tf.scalar(10); - - const gradients = tf.grad(a => a.sum())(a, dy); - - expect(gradients.shape).toEqual(a.shape); - expect(gradients.dtype).toEqual('float32'); - expectArraysClose(await gradients.data(), [10, 10, 10, 10, 10, 10]); - }); - - it('gradient with clones', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const dy = tf.scalar(10); - - const gradients = tf.grad(a => a.clone().sum().clone())(a, dy); - - expect(gradients.shape).toEqual(a.shape); - expect(gradients.dtype).toEqual('float32'); - expectArraysClose(await gradients.data(), [10, 10, 10, 10, 10, 10]); - }); - - it('gradients: sum(2d, axis=0)', async () => { - const a = tf.tensor2d([[1, 2], [3, 0], [0, 1]], [3, 2]); - const dy = tf.tensor1d([10, 20]); - const axis = 0; - - const gradients = tf.grad(a => a.sum(axis))(a, dy); - - expect(gradients.shape).toEqual(a.shape); - expect(gradients.dtype).toEqual('float32'); - expectArraysClose(await gradients.data(), [10, 20, 10, 20, 10, 20]); - }); - - it('gradients: sum(2d, axis=1)', async () => { - const a = tf.tensor2d([[1, 2], [3, 0], [0, 1]], [3, 2]); - const dy = tf.tensor1d([10, 20, 30]); - const axis = 1; - - const gradients = tf.grad(a => a.sum(axis))(a, dy); - - expect(gradients.shape).toEqual(a.shape); - expect(gradients.dtype).toEqual('float32'); - expectArraysClose(await gradients.data(), [10, 10, 20, 20, 30, 30]); - }); - - it('throws when passed a non-tensor', () => { - expect(() => tf.sum({} as tf.Tensor)) - .toThrowError(/Argument 'x' passed to 'sum' must be a Tensor/); - }); - - it('accepts a tensor-like object', async () => { - const result = tf.sum([[1, 2], [3, 0], [0, 1]]); - expectArraysClose(await result.data(), 7); - }); - - it('throws error for string tensor', () => { - expect(() => tf.sum(['a'])) - .toThrowError(/Argument 'x' passed to 'sum' must be numeric tensor/); - }); -}); - -describeWithFlags('mean', ALL_ENVS, () => { - it('basic', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const r = tf.mean(a); - - expect(r.dtype).toBe('float32'); - expectArraysClose(await r.data(), 7 / 6); - }); - - it('propagates NaNs', async () => { - const a = tf.tensor2d([1, 2, 3, NaN, 0, 1], [3, 2]); - const r = tf.mean(a); - - expect(r.dtype).toBe('float32'); - expectArraysEqual(await r.data(), NaN); - }); - - it('mean(int32) => float32', async () => { - const a = tf.tensor1d([1, 5, 7, 3], 'int32'); - const r = tf.mean(a); - - expect(r.dtype).toBe('float32'); - expectArraysClose(await r.data(), 4); - }); - - it('mean(bool) => float32', async () => { - const a = tf.tensor1d([true, false, false, true, true], 'bool'); - const r = tf.mean(a); - - expect(r.dtype).toBe('float32'); - expectArraysClose(await r.data(), 3 / 5); - }); - - it('2D array with keep dim', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.mean(a, null, true /* keepDims */); - - expect(res.shape).toEqual([1, 1]); - expect(res.dtype).toBe('float32'); - expectArraysClose(await res.data(), [7 / 6]); - }); - - it('axis=0 in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.mean(a, [0]); - - expect(res.shape).toEqual([2]); - expect(res.dtype).toBe('float32'); - expectArraysClose(await res.data(), [4 / 3, 1]); - }); - - it('axis=0 in 2D array, keepDims', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.mean(a, [0], true /* keepDims */); - - expect(res.shape).toEqual([1, 2]); - expect(res.dtype).toBe('float32'); - expectArraysClose(await res.data(), [4 / 3, 1]); - }); - - it('axis=1 in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.mean(a, [1]); - - expect(res.dtype).toBe('float32'); - expect(res.shape).toEqual([3]); - expectArraysClose(await res.data(), [1.5, 1.5, 0.5]); - }); - - it('axis = -1 in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.mean(a, [-1]); - - expect(res.dtype).toBe('float32'); - expect(res.shape).toEqual([3]); - expectArraysClose(await res.data(), [1.5, 1.5, 0.5]); - }); - - it('2D, axis=1 provided as number', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); - const res = tf.mean(a, 1); - - expect(res.shape).toEqual([2]); - expect(res.dtype).toBe('float32'); - expectArraysClose(await res.data(), [2, 1 / 3]); - }); - - it('axis=0,1 in 2D array', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const res = tf.mean(a, [0, 1]); - - expect(res.shape).toEqual([]); - expect(res.dtype).toBe('float32'); - expectArraysClose(await res.data(), [7 / 6]); - }); - - it('gradients', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const dy = tf.scalar(1.5); - - const da = tf.grad(a => a.mean())(a, dy); - const dyVal = await dy.array(); - expect(da.shape).toEqual(a.shape); - expectArraysClose(await da.data(), [ - dyVal / a.size, dyVal / a.size, dyVal / a.size, dyVal / a.size, - dyVal / a.size, dyVal / a.size - ]); - }); - - it('gradient with clones', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const dy = tf.scalar(1.5); - - const da = tf.grad(a => a.clone().mean().clone())(a, dy); - const dyVal = await dy.array(); - expect(da.shape).toEqual(a.shape); - expectArraysClose(await da.data(), [ - dyVal / a.size, dyVal / a.size, dyVal / a.size, dyVal / a.size, - dyVal / a.size, dyVal / a.size - ]); - }); - - it('gradients throws for defined axis', () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const dy = tf.scalar(1.5); - - expect(() => tf.grad(a => a.mean(1))(a, dy)).toThrowError(); - }); - - it('throws when passed a non-tensor', () => { - expect(() => tf.mean({} as tf.Tensor)) - .toThrowError(/Argument 'x' passed to 'mean' must be a Tensor/); - }); - - it('accepts a tensor-like object', async () => { - const r = tf.mean([[1, 2, 3], [0, 0, 1]]); - - expect(r.dtype).toBe('float32'); - expectArraysClose(await r.data(), 7 / 6); - }); - - it('throws error for string tensor', () => { - expect(() => tf.mean(['a'])) - .toThrowError(/Argument 'x' passed to 'mean' must be numeric tensor/); - }); -}); - -describeWithFlags('norm', ALL_ENVS, () => { - it('scalar norm', async () => { - const a = tf.scalar(-22.0); - const norm = tf.norm(a); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 22); - }); - - it('vector inf norm', async () => { - const a = tf.tensor1d([1, -2, 3, -4]); - const norm = tf.norm(a, Infinity); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 4); - }); - - it('vector -inf norm', async () => { - const a = tf.tensor1d([1, -2, 3, -4]); - const norm = tf.norm(a, -Infinity); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 1); - }); - - it('vector 1 norm', async () => { - const a = tf.tensor1d([1, -2, 3, -4]); - const norm = tf.norm(a, 1); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 10); - }); - - it('vector euclidean norm', async () => { - const a = tf.tensor1d([1, -2, 3, -4]); - const norm = tf.norm(a, 'euclidean'); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 5.4772); - }); - - it('vector 2-norm', async () => { - const a = tf.tensor1d([1, -2, 3, -4]); - const norm = tf.norm(a, 2); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 5.4772); - }); - - it('vector >2-norm to throw error', () => { - const a = tf.tensor1d([1, -2, 3, -4]); - expect(() => tf.norm(a, 3)).toThrowError(); - }); - - it('matrix inf norm', async () => { - const a = tf.tensor2d([1, 2, -3, 1, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity, [0, 1]); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 4); - }); - - it('matrix -inf norm', async () => { - const a = tf.tensor2d([1, 2, -3, 1, 0, 1], [3, 2]); - const norm = tf.norm(a, -Infinity, [0, 1]); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 1); - }); - - it('matrix 1 norm', async () => { - const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); - const norm = tf.norm(a, 1, [0, 1]); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 5); - }); - - it('matrix euclidean norm', async () => { - const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); - const norm = tf.norm(a, 'euclidean', [0, 1]); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 4.123); - }); - - it('matrix fro norm', async () => { - const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); - const norm = tf.norm(a, 'fro', [0, 1]); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 4.123); - }); - - it('matrix other norm to throw error', () => { - const a = tf.tensor2d([1, 2, -3, 1, 1, 1], [3, 2]); - expect(() => tf.norm(a, 2, [0, 1])).toThrowError(); - }); - - it('propagates NaNs for norm', async () => { - const a = tf.tensor2d([1, 2, 3, NaN, 0, 1], [3, 2]); - const norm = tf.norm(a); - - expect(norm.dtype).toBe('float32'); - expectArraysEqual(await norm.data(), NaN); - }); - - it('axis=null in 2D array norm', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity); - - expect(norm.shape).toEqual([]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3]); - }); - - it('2D array norm with keep dim', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity, null, true /* keepDims */); - - expect(norm.shape).toEqual([1, 1]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3]); - }); - - it('axis=0 in 2D array norm', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity, [0]); - - expect(norm.shape).toEqual([2]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3, 2]); - }); - - it('axis=1 in 2D array norm', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity, [1]); - - expect(norm.dtype).toBe('float32'); - expect(norm.shape).toEqual([3]); - expectArraysClose(await norm.data(), [2, 3, 1]); - }); - - it('axis=1 keepDims in 2D array norm', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity, [1], true); - - expect(norm.dtype).toBe('float32'); - expect(norm.shape).toEqual([3, 1]); - expectArraysClose(await norm.data(), [2, 3, 1]); - }); - - it('2D norm with axis=1 provided as number', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); - const norm = tf.norm(a, Infinity, 1); - - expect(norm.shape).toEqual([2]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3, 1]); - }); - - it('axis=0,1 in 2D array norm', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity, [0, 1]); - - expect(norm.shape).toEqual([]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3]); - }); - - it('axis=0,1 keepDims in 2D array norm', async () => { - const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); - const norm = tf.norm(a, Infinity, [0, 1], true); - - expect(norm.shape).toEqual([1, 1]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3]); - }); - - it('3D norm axis=0,1, matrix inf norm', async () => { - const a = tf.tensor3d([1, 2, -3, 1, 0, 1], [3, 2, 1]); - const norm = tf.norm(a, Infinity, [0, 1]); - - expect(norm.shape).toEqual([1]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [4]); - }); - - it('axis=0,1 keepDims in 3D array norm', async () => { - const a = tf.tensor3d([1, 2, 3, 0, 0, 1], [3, 2, 1]); - const norm = tf.norm(a, Infinity, [0, 1], true); - - expect(norm.shape).toEqual([1, 1, 1]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3]); - }); - - it('axis=0,1 keepDims in 3D array norm', async () => { - const a = tf.tensor3d([1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1], [3, 2, 2]); - const norm = tf.norm(a, Infinity, [0, 1], true); - - expect(norm.shape).toEqual([1, 1, 2]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [4, 3]); - }); - - it('axis=null in 3D array norm', async () => { - const a = tf.tensor3d([1, 2, 3, 0, 0, 1], [3, 2, 1]); - const norm = tf.norm(a, Infinity); - - expect(norm.shape).toEqual([]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3]); - }); - - it('axis=null in 4D array norm', async () => { - const a = tf.tensor4d([1, 2, 3, 0, 0, 1], [3, 2, 1, 1]); - const norm = tf.norm(a, Infinity); - - expect(norm.shape).toEqual([]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [3]); - }); - - it('axis=0,1 in 4D array norm', async () => { - const a = tf.tensor4d( - [ - 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1, - 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1 - ], - [3, 2, 2, 2]); - const norm = tf.norm(a, Infinity, [0, 1]); - - expect(norm.shape).toEqual([2, 2]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [4, 3, 4, 3]); - }); - - it('axis=0,1 in 4D array norm', async () => { - const a = tf.tensor4d( - [ - 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1, - 1, 2, 3, 0, 0, 1, 1, 2, 3, 0, 0, 1 - ], - [3, 2, 2, 2]); - const norm = tf.norm(a, Infinity, [0, 1], true); - - expect(norm.shape).toEqual([1, 1, 2, 2]); - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), [4, 3, 4, 3]); - }); - - it('throws when passed a non-tensor', () => { - expect(() => tf.norm({} as tf.Tensor)) - .toThrowError(/Argument 'x' passed to 'norm' must be a Tensor/); - }); - - it('accepts a tensor-like object', async () => { - const norm = tf.norm([1, -2, 3, -4], 1); - - expect(norm.dtype).toBe('float32'); - expectArraysClose(await norm.data(), 10); - }); - - it('throws error for string tensors', () => { - expect(() => tf.norm([ - 'a', 'b' - ])).toThrowError(/Argument 'x' passed to 'norm' must be numeric tensor/); - }); -}); diff --git a/tfjs-core/src/ops/softmax_cross_entropy.ts b/tfjs-core/src/ops/softmax_cross_entropy.ts index dd352926e01..25158f7cc37 100644 --- a/tfjs-core/src/ops/softmax_cross_entropy.ts +++ b/tfjs-core/src/ops/softmax_cross_entropy.ts @@ -30,9 +30,9 @@ import {logSumExp} from './log_sum_exp'; import {Reduction} from './loss_ops_utils'; import {mul} from './mul'; import {op} from './operation'; -import {sum} from './reduction_ops'; import {reshape} from './reshape'; import {sub} from './sub'; +import {sum} from './sum'; import {scalar} from './tensor_ops'; import {exp, neg} from './unary_ops'; diff --git a/tfjs-core/src/ops/sum.ts b/tfjs-core/src/ops/sum.ts new file mode 100644 index 00000000000..e22bcbfd19e --- /dev/null +++ b/tfjs-core/src/ops/sum.ts @@ -0,0 +1,92 @@ +/** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {ENGINE, ForwardFunc} from '../engine'; +import {Sum, SumAttrs, SumInputs} from '../kernel_names'; +import {NamedAttrMap} from '../kernel_registry'; +import {Tensor} from '../tensor'; +import {NamedTensorMap} from '../tensor_types'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {parseAxisParam} from '../util'; + +import {expandShapeToKeepDim, getAxesPermutation, getInnerMostAxes} from './axis_util'; +import {op} from './operation'; + +/** + * Computes the sum of elements across dimensions of a `tf.Tensor`. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in + * `axes`. If `keepDims` is true, the reduced dimensions are retained with + * length 1. If axes has no entries, all dimensions are reduced, and a + * `tf.Tensor` with a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.sum().print(); // or tf.sum(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.sum(axis).print(); // or tf.sum(x, axis) + * ``` + * + * @param x The input tensor to compute the sum over. If the dtype is `bool` + * it will be converted to `int32` and the output dtype will be `int32`. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + */ +/** @doc {heading: 'Operations', subheading: 'Reduction'} */ +function sum_( + x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T { + let $x = convertToTensor(x, 'x', 'sum'); + if ($x.dtype === 'bool') { + $x = $x.toInt(); + } + + const forward: ForwardFunc = (backend, save) => { + save([$x]); + const axes = parseAxisParam(axis, $x.shape); + + const permutation = getAxesPermutation(axes, $x.rank); + let reductionAxes = axes; + let permutedX = $x; + if (permutation != null) { + permutedX = $x.transpose(permutation); + reductionAxes = getInnerMostAxes(reductionAxes.length, $x.rank); + } + let value = backend.sum(permutedX, reductionAxes); + if (keepDims) { + const newShape = expandShapeToKeepDim(value.shape, axes); + value = value.reshape(newShape); + } + return value; + }; + + const inputs: SumInputs = {x: $x}; + const attrs: SumAttrs = {axis, keepDims}; + + return ENGINE.runKernelFunc( + forward, inputs as {} as NamedTensorMap, null /* grad */, Sum, + attrs as {} as NamedAttrMap) as T; +} + +export const sum = op({sum_}); diff --git a/tfjs-core/src/ops/sum_test.ts b/tfjs-core/src/ops/sum_test.ts new file mode 100644 index 00000000000..567ae5bdcd1 --- /dev/null +++ b/tfjs-core/src/ops/sum_test.ts @@ -0,0 +1,170 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose, expectArraysEqual} from '../test_util'; + +describeWithFlags('sum', ALL_ENVS, () => { + it('basic', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const result = tf.sum(a); + expectArraysClose(await result.data(), 7); + }); + + it('propagates NaNs', async () => { + const a = tf.tensor2d([1, 2, 3, NaN, 0, 1], [3, 2]); + expectArraysEqual(await tf.sum(a).data(), NaN); + }); + + it('sum over dtype int32', async () => { + const a = tf.tensor1d([1, 5, 7, 3], 'int32'); + const sum = tf.sum(a); + expectArraysEqual(await sum.data(), 16); + }); + + it('sum over dtype bool', async () => { + const a = tf.tensor1d([true, false, false, true, true], 'bool'); + const sum = tf.sum(a); + expectArraysEqual(await sum.data(), 3); + }); + + it('sums all values in 2D array with keep dim', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.sum(a, null, true /* keepDims */); + + expect(res.shape).toEqual([1, 1]); + expectArraysClose(await res.data(), [7]); + }); + + it('sums across axis=0 in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.sum(a, [0]); + + expect(res.shape).toEqual([2]); + expectArraysClose(await res.data(), [4, 3]); + }); + + it('sums across axis=0 in 2D array, keepDims', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.sum(a, [0], true /* keepDims */); + + expect(res.shape).toEqual([1, 2]); + expectArraysClose(await res.data(), [4, 3]); + }); + + it('sums across axis=1 in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.sum(a, [1]); + + expect(res.shape).toEqual([3]); + expectArraysClose(await res.data(), [3, 3, 1]); + }); + + it('2D, axis=1 provided as number', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); + const res = tf.sum(a, 1); + + expect(res.shape).toEqual([2]); + expectArraysClose(await res.data(), [6, 1]); + }); + + it('2D, axis = -1 provided as number', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [2, 3]); + const res = tf.sum(a, -1); + + expect(res.shape).toEqual([2]); + expectArraysClose(await res.data(), [6, 1]); + }); + + it('sums across axis=0,1 in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.sum(a, [0, 1]); + + expect(res.shape).toEqual([]); + expectArraysClose(await res.data(), [7]); + }); + + it('2D, axis=[-1,-2] in 2D array', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const res = tf.sum(a, [-1, -2]); + + expect(res.shape).toEqual([]); + expectArraysClose(await res.data(), [7]); + }); + + it('gradients: sum(2d)', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const dy = tf.scalar(10); + + const gradients = tf.grad(a => a.sum())(a, dy); + + expect(gradients.shape).toEqual(a.shape); + expect(gradients.dtype).toEqual('float32'); + expectArraysClose(await gradients.data(), [10, 10, 10, 10, 10, 10]); + }); + + it('gradient with clones', async () => { + const a = tf.tensor2d([1, 2, 3, 0, 0, 1], [3, 2]); + const dy = tf.scalar(10); + + const gradients = tf.grad(a => a.clone().sum().clone())(a, dy); + + expect(gradients.shape).toEqual(a.shape); + expect(gradients.dtype).toEqual('float32'); + expectArraysClose(await gradients.data(), [10, 10, 10, 10, 10, 10]); + }); + + it('gradients: sum(2d, axis=0)', async () => { + const a = tf.tensor2d([[1, 2], [3, 0], [0, 1]], [3, 2]); + const dy = tf.tensor1d([10, 20]); + const axis = 0; + + const gradients = tf.grad(a => a.sum(axis))(a, dy); + + expect(gradients.shape).toEqual(a.shape); + expect(gradients.dtype).toEqual('float32'); + expectArraysClose(await gradients.data(), [10, 20, 10, 20, 10, 20]); + }); + + it('gradients: sum(2d, axis=1)', async () => { + const a = tf.tensor2d([[1, 2], [3, 0], [0, 1]], [3, 2]); + const dy = tf.tensor1d([10, 20, 30]); + const axis = 1; + + const gradients = tf.grad(a => a.sum(axis))(a, dy); + + expect(gradients.shape).toEqual(a.shape); + expect(gradients.dtype).toEqual('float32'); + expectArraysClose(await gradients.data(), [10, 10, 20, 20, 30, 30]); + }); + + it('throws when passed a non-tensor', () => { + expect(() => tf.sum({} as tf.Tensor)) + .toThrowError(/Argument 'x' passed to 'sum' must be a Tensor/); + }); + + it('accepts a tensor-like object', async () => { + const result = tf.sum([[1, 2], [3, 0], [0, 1]]); + expectArraysClose(await result.data(), 7); + }); + + it('throws error for string tensor', () => { + expect(() => tf.sum(['a'])) + .toThrowError(/Argument 'x' passed to 'sum' must be numeric tensor/); + }); +}); diff --git a/tfjs-core/src/public/chained_ops/all.ts b/tfjs-core/src/public/chained_ops/all.ts index 800120cb4a5..2332747cec1 100644 --- a/tfjs-core/src/public/chained_ops/all.ts +++ b/tfjs-core/src/public/chained_ops/all.ts @@ -20,13 +20,13 @@ import {Rank} from '../../types'; declare module '../../tensor' { interface Tensor { - all(this: T, axis: number|number[], keepDims?: boolean): + all(this: T, axis?: number|number[], keepDims?: boolean): T; } } Tensor.prototype.all = function( - this: T, axis: number|number[] = null, keepDims?: boolean): T { + this: T, axis?: number|number[], keepDims?: boolean): T { this.throwIfDisposed(); return all(this, axis, keepDims); }; diff --git a/tfjs-core/src/public/chained_ops/any.ts b/tfjs-core/src/public/chained_ops/any.ts index 5cf0a84de86..023ee44c730 100644 --- a/tfjs-core/src/public/chained_ops/any.ts +++ b/tfjs-core/src/public/chained_ops/any.ts @@ -20,13 +20,13 @@ import {Rank} from '../../types'; declare module '../../tensor' { interface Tensor { - any(this: T, axis: number|number[], keepDims?: boolean): + any(this: T, axis?: number|number[], keepDims?: boolean): T; } } Tensor.prototype.any = function( - this: T, axis: number|number[] = null, keepDims?: boolean): T { + this: T, axis?: number|number[], keepDims?: boolean): T { this.throwIfDisposed(); return any(this, axis, keepDims); }; diff --git a/tfjs-core/src/public/chained_ops/arg_max.ts b/tfjs-core/src/public/chained_ops/arg_max.ts new file mode 100644 index 00000000000..1440c1ad028 --- /dev/null +++ b/tfjs-core/src/public/chained_ops/arg_max.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {argMax} from '../../ops/arg_max'; +import {Tensor} from '../../tensor'; +import {Rank} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + argMax(axis?: number): T; + } +} + +Tensor.prototype.argMax = function(axis?: number): T { + this.throwIfDisposed(); + return argMax(this, axis); +}; diff --git a/tfjs-core/src/public/chained_ops/arg_min.ts b/tfjs-core/src/public/chained_ops/arg_min.ts new file mode 100644 index 00000000000..9ce535d7d78 --- /dev/null +++ b/tfjs-core/src/public/chained_ops/arg_min.ts @@ -0,0 +1,31 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {argMin} from '../../ops/arg_min'; +import {Tensor} from '../../tensor'; +import {Rank} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + argMin(axis?: number): T; + } +} + +Tensor.prototype.argMin = function(axis?: number): T { + this.throwIfDisposed(); + // tslint:disable-next-line: no-unnecessary-type-assertion + return argMin(this, axis) as T; +}; diff --git a/tfjs-core/src/public/chained_ops/log_sum_exp.ts b/tfjs-core/src/public/chained_ops/log_sum_exp.ts index 5712434946c..6dcfce28e6e 100644 --- a/tfjs-core/src/public/chained_ops/log_sum_exp.ts +++ b/tfjs-core/src/public/chained_ops/log_sum_exp.ts @@ -21,12 +21,12 @@ import {Rank} from '../../types'; declare module '../../tensor' { interface Tensor { logSumExp( - this: T, axis: number|number[], keepDims?: boolean): T; + this: T, axis?: number|number[], keepDims?: boolean): T; } } Tensor.prototype.logSumExp = function( - this: T, axis: number|number[] = null, keepDims?: boolean): T { + this: T, axis?: number|number[], keepDims?: boolean): T { this.throwIfDisposed(); return logSumExp(this, axis, keepDims); }; diff --git a/tfjs-core/src/public/chained_ops/mean.ts b/tfjs-core/src/public/chained_ops/mean.ts new file mode 100644 index 00000000000..0a8810e281b --- /dev/null +++ b/tfjs-core/src/public/chained_ops/mean.ts @@ -0,0 +1,32 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {mean} from '../../ops/mean'; +import {Tensor} from '../../tensor'; +import {Rank} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + mean(axis?: number|number[], keepDims?: boolean): T; + } +} + +Tensor.prototype.mean = function( + axis?: number|number[], keepDims?: boolean): T { + this.throwIfDisposed(); + // tslint:disable-next-line: no-unnecessary-type-assertion + return mean(this, axis, keepDims) as T; +}; diff --git a/tfjs-core/src/public/chained_ops/min.ts b/tfjs-core/src/public/chained_ops/min.ts new file mode 100644 index 00000000000..33cab2381af --- /dev/null +++ b/tfjs-core/src/public/chained_ops/min.ts @@ -0,0 +1,32 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {min} from '../../ops/min'; +import {Tensor} from '../../tensor'; +import {Rank} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + min(axis?: number|number[], keepDims?: boolean): T; + } +} + +Tensor.prototype.min = function( + axis?: number|number[], keepDims?: boolean): T { + this.throwIfDisposed(); + return min(this, axis, keepDims); +}; diff --git a/tfjs-core/src/public/chained_ops/prod.ts b/tfjs-core/src/public/chained_ops/prod.ts index 3626d871019..2b7e57053d5 100644 --- a/tfjs-core/src/public/chained_ops/prod.ts +++ b/tfjs-core/src/public/chained_ops/prod.ts @@ -20,13 +20,13 @@ import {Rank} from '../../types'; declare module '../../tensor' { interface Tensor { - prod(this: T, axis: number|number[], keepDims?: boolean): + prod(this: T, axis?: number|number[], keepDims?: boolean): T; } } Tensor.prototype.prod = function( - this: T, axis: number|number[] = null, keepDims?: boolean): T { + this: T, axis?: number|number[], keepDims?: boolean): T { this.throwIfDisposed(); return prod(this, axis, keepDims); }; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts index d9b725d28b9..2c5ed3bffc7 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts @@ -17,6 +17,8 @@ import './add'; import './all'; import './any'; +import './arg_max'; +import './arg_min'; import './atan2'; import './avg_pool'; import './batchnorm'; @@ -53,6 +55,8 @@ import './mat_mul'; import './max'; import './max_pool'; import './maximum'; +import './mean'; +import './min'; import './minimum'; import './mod'; import './mul'; @@ -77,6 +81,7 @@ import './squeeze'; import './space_to_batch_nd'; import './stack'; import './sub'; +import './sum'; import './tile'; import './transpose'; import './unstack'; diff --git a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts index 055aecabc36..0017eb4fdb8 100644 --- a/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts +++ b/tfjs-core/src/public/chained_ops/register_all_chained_ops_test.ts @@ -27,6 +27,8 @@ const CHAINED_OPS = [ 'add', 'all', 'any', + 'argMax', + 'argMin', 'atan2', 'avgPool', 'batchNorm', @@ -63,6 +65,8 @@ const CHAINED_OPS = [ 'max', 'maximum', 'maxPool', + 'mean', + 'min', 'minimum', 'mod', 'mul', @@ -87,6 +91,7 @@ const CHAINED_OPS = [ 'squeeze', 'stack', 'sub', + 'sum', 'tile', 'transpose', 'unstack', diff --git a/tfjs-core/src/public/chained_ops/sum.ts b/tfjs-core/src/public/chained_ops/sum.ts new file mode 100644 index 00000000000..51110967ddb --- /dev/null +++ b/tfjs-core/src/public/chained_ops/sum.ts @@ -0,0 +1,32 @@ +/** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {sum} from '../../ops/sum'; +import {Tensor} from '../../tensor'; +import {Rank} from '../../types'; + +declare module '../../tensor' { + interface Tensor { + sum(axis?: number|number[], keepDims?: boolean): T; + } +} + +Tensor.prototype.sum = function( + axis?: number|number[], keepDims?: boolean): T { + this.throwIfDisposed(); + // tslint:disable-next-line: no-unnecessary-type-assertion + return sum(this, axis, keepDims) as T; +}; diff --git a/tfjs-core/src/register_all_gradients.ts b/tfjs-core/src/register_all_gradients.ts index 58a3d9fb251..f3968bdb284 100644 --- a/tfjs-core/src/register_all_gradients.ts +++ b/tfjs-core/src/register_all_gradients.ts @@ -16,6 +16,8 @@ */ import {addGradConfig} from './gradients/Add_grad'; import {addNGradConfig} from './gradients/AddN_grad'; +import {argMaxGradConfig} from './gradients/ArgMax_grad'; +import {argMinGradConfig} from './gradients/ArgMin_grad'; import {atan2GradConfig} from './gradients/Atan2_grad'; import {avgPool3DGradConfig} from './gradients/AvgPool3D_grad'; import {avgPoolGradConfig} from './gradients/AvgPool_grad'; @@ -40,6 +42,7 @@ import {maxGradConfig} from './gradients/Max_grad'; import {maximumGradConfig} from './gradients/Maximum_grad'; import {maxPool3DGradConfig} from './gradients/MaxPool3D_grad'; import {maxPoolGradConfig} from './gradients/MaxPool_grad'; +import {minGradConfig} from './gradients/Min_grad'; import {minimumGradConfig} from './gradients/Minimum_grad'; import {modGradConfig} from './gradients/Mod_grad'; import {multiplyGradConfig} from './gradients/Multiply_grad'; @@ -60,6 +63,7 @@ import {splitVGradConfig} from './gradients/SplitV_grad'; import {squareGradConfig} from './gradients/Square_grad'; import {squaredDifferenceGradConfig} from './gradients/SquaredDifference_grad'; import {subGradConfig} from './gradients/Sub_grad'; +import {sumGradConfig} from './gradients/Sum_grad'; import {tileGradConfig} from './gradients/Tile_grad'; import {transposeGradConfig} from './gradients/Transpose_grad'; import {unpackGradConfig} from './gradients/Unpack_grad'; @@ -70,6 +74,8 @@ import {registerGradient} from './kernel_registry'; const gradConfigs: GradConfig[] = [ addGradConfig, addNGradConfig, + argMaxGradConfig, + argMinGradConfig, atan2GradConfig, avgPoolGradConfig, avgPool3DGradConfig, @@ -96,6 +102,7 @@ const gradConfigs: GradConfig[] = [ maxGradConfig, spaceToBatchNDGradConfig, maxGradConfig, + minGradConfig, maximumGradConfig, maxPoolGradConfig, maxPool3DGradConfig, @@ -119,6 +126,7 @@ const gradConfigs: GradConfig[] = [ squareGradConfig, squaredDifferenceGradConfig, subGradConfig, + sumGradConfig, tileGradConfig, transposeGradConfig, unpackGradConfig diff --git a/tfjs-core/src/tensor.ts b/tfjs-core/src/tensor.ts index 72fcb998675..d25da99b80f 100644 --- a/tfjs-core/src/tensor.ts +++ b/tfjs-core/src/tensor.ts @@ -181,12 +181,6 @@ export interface OpHandler { keepDims: boolean): Tensor; slice>( x: T, begin: number|number[], size?: number|number[]): T; - sum(x: Tensor, axis: number|number[], keepDims: boolean): T; - mean(x: Tensor, axis: number|number[], keepDims: boolean): - T; - min(x: Tensor, axis: number|number[], keepDims: boolean): T; - argMin(x: Tensor, axis: number): T; - argMax(x: Tensor, axis: number): T; addStrict(a: T, b: T|TensorLike): T; subStrict(a: T, b: T|TensorLike): T; powStrict(base: T, exp: Tensor|TensorLike): T; @@ -628,27 +622,6 @@ export class Tensor { this.throwIfDisposed(); return opHandler.slice(this, begin, size); } - // Reduction ops. - sum(axis: number|number[] = null, keepDims = false): T { - this.throwIfDisposed(); - return opHandler.sum(this, axis, keepDims); - } - mean(axis: number|number[] = null, keepDims = false): T { - this.throwIfDisposed(); - return opHandler.mean(this, axis, keepDims); - } - min(axis: number|number[] = null, keepDims = false): T { - this.throwIfDisposed(); - return opHandler.min(this, axis, keepDims); - } - argMin(axis: number = null): T { - this.throwIfDisposed(); - return opHandler.argMin(this, axis); - } - argMax(axis: number = null): T { - this.throwIfDisposed(); - return opHandler.argMax(this, axis); - } // Transformations cast(dtype: DataType): T { diff --git a/tfjs-core/src/tests.ts b/tfjs-core/src/tests.ts index d17d7ebc09c..f330796dac6 100644 --- a/tfjs-core/src/tests.ts +++ b/tfjs-core/src/tests.ts @@ -43,6 +43,8 @@ import './ops/add_n_test'; import './ops/add_test'; import './ops/all_test'; import './ops/any_test'; +import './ops/arg_max_test'; +import './ops/arg_min_test'; import './ops/arithmetic_test'; import './ops/array_ops_test'; import './ops/avg_pool_3d_test'; @@ -105,13 +107,17 @@ import './ops/mat_mul_test'; import './ops/max_pool_3d_test'; import './ops/max_pool_test'; import './ops/max_pool_with_argmax_test'; +import './ops/max_test'; import './ops/mean_squared_error_test'; +import './ops/mean_test'; +import './ops/min_test'; import './ops/moments_test'; import './ops/moving_average_test'; import './ops/multi_rnn_cell_test'; import './ops/multinomial_test'; import './ops/non_max_suppression_async_test'; import './ops/non_max_suppression_test'; +import './ops/norm_test'; import './ops/not_equal_test'; import './ops/one_hot_test'; import './ops/operation_test'; @@ -123,7 +129,6 @@ import './ops/rand_test'; import './ops/random_gamma_test'; import './ops/random_normal_test'; import './ops/random_uniform_test'; -import './ops/reduction_ops_test'; import './ops/relu6_test'; import './ops/relu_test'; import './ops/resize_bilinear_test'; @@ -148,6 +153,7 @@ import './ops/spectral_ops_test'; import './ops/stack_test'; import './ops/strided_slice_test'; import './ops/sub_test'; +import './ops/sum_test'; import './ops/tile_test'; import './ops/topk_test'; import './ops/transpose_test';