diff --git a/tfjs-core/src/index.ts b/tfjs-core/src/index.ts index 27d710984bc..d3d1fde7c0a 100644 --- a/tfjs-core/src/index.ts +++ b/tfjs-core/src/index.ts @@ -64,7 +64,7 @@ export {GradSaveFunc, NamedTensorMap, TensorContainer, TensorContainerArray, Ten export {BackendValues, DataType, DataTypeMap, DataValues, NumericDataType, PixelData, Rank, RecursiveArray, ShapeMap, sumOutType, TensorLike, TypedArray, upcastType} from './types'; export * from './ops/ops'; -export {Reduction} from './ops/loss_ops'; +export {Reduction} from './ops/loss_ops_utils'; export * from './train'; export * from './globals'; diff --git a/tfjs-core/src/ops/absolute_difference.ts b/tfjs-core/src/ops/absolute_difference.ts new file mode 100644 index 00000000000..30349db9c17 --- /dev/null +++ b/tfjs-core/src/ops/absolute_difference.ts @@ -0,0 +1,60 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {Reduction} from './loss_ops_utils'; +import {op} from './operation'; +import {sub} from './sub'; +import {abs} from './unary_ops'; + +/** + * Computes the absolute difference loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + */ +/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ +function absoluteDifference_( + labels: T|TensorLike, predictions: T|TensorLike, + weights?: Tensor|TensorLike, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + const $labels = convertToTensor(labels, 'labels', 'absoluteDifference'); + const $predictions = + convertToTensor(predictions, 'predictions', 'absoluteDifference'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'absoluteDifference'); + } + assertShapesMatch( + $labels.shape, $predictions.shape, 'Error in absoluteDifference: '); + + const losses = abs(sub($labels, $predictions)); + return computeWeightedLoss(losses, $weights, reduction); +} + +export const absoluteDifference = op({absoluteDifference_}); diff --git a/tfjs-core/src/ops/absolute_difference_test.ts b/tfjs-core/src/ops/absolute_difference_test.ts new file mode 100644 index 00000000000..e1c8d11297a --- /dev/null +++ b/tfjs-core/src/ops/absolute_difference_test.ts @@ -0,0 +1,222 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('absoluteDifference', ALL_ENVS, () => { + it('1D', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + + const y = tf.losses.absoluteDifference(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (Math.abs(1 - 0.3) + Math.abs(2 - (-0.6)) + Math.abs(3 - (-0.1))) / 3); + }); + + it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.absoluteDifference(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (Math.abs(1 - 0.3) * 0.1 + Math.abs(2 - (-0.6)) * 0.2 + + Math.abs(3 - (-0.1)) * 0.3) / + 3); + }); + + it('1D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.absoluteDifference( + label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [ + Math.abs(1 - 0.3) * 0.1, Math.abs(2 - (-0.6)) * 0.2, + Math.abs(3 - (-0.1)) * 0.3 + ]); + }); + + it('1D - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + + const y = tf.losses.absoluteDifference( + label, predictions, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (Math.abs(1 - 0.3) + Math.abs(2 - (-0.6)) + Math.abs(3 - (-0.1))) / 3); + }); + + it('1D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.absoluteDifference( + label, predictions, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((Math.abs(1 - 0.3) * 0.1) + (Math.abs(2 - (-0.6)) * 0.2) + + (Math.abs(3 - (-0.1)) * 0.3)) / + 0.6); + }); + + it('2D', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const y = tf.losses.absoluteDifference(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (Math.abs(4 - 1) + Math.abs(8 - 9) + Math.abs(12 - 2) + + Math.abs(8 - (-5)) + Math.abs(1 - (-2)) + Math.abs(3 - 6)) / + 6); + }); + + it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.absoluteDifference(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (Math.abs(4 - 1) * 3 + Math.abs(8 - 9) * 0 + Math.abs(12 - 2) * 5 + + Math.abs(8 - (-5)) * 0 + Math.abs(1 - (-2)) * 4 + + Math.abs(3 - 6) * 2) / + 4); + }); + + it('2D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.absoluteDifference( + label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 3]); + expectArraysClose(await y.data(), [ + Math.abs(4 - 1) * 3, Math.abs(8 - 9) * 6, Math.abs(12 - 2) * 5, + Math.abs(8 - (-5)) * 0, Math.abs(1 - (-2)) * 4, Math.abs(3 - 6) * 2 + ]); + }); + + it('2D - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const y = tf.losses.absoluteDifference( + label, predictions, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (Math.abs(4 - 1) + Math.abs(8 - 9) + Math.abs(12 - 2) + + Math.abs(8 - (-5)) + Math.abs(1 - (-2)) + Math.abs(3 - 6)) / + 6); + }); + + it('2D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.absoluteDifference( + label, predictions, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (Math.abs(4 - 1) * 3 + Math.abs(8 - 9) * 6 + Math.abs(12 - 2) * 5 + + Math.abs(8 - (-5)) * 0 + Math.abs(1 - (-2)) * 4 + + Math.abs(3 - 6) * 2) / + 20); + }); + + it('throws when passed label as a non-tensor', () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = + /Argument 'labels' passed to 'absoluteDifference' must be a Tensor/; + expect( + () => tf.losses.absoluteDifference( + {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed label as a non-tensor', () => { + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = new RegExp( + 'Argument \'predictions\' passed to \'absoluteDifference\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.absoluteDifference( + label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const e = + /Argument 'weights' passed to 'absoluteDifference' must be a Tensor/; + expect( + () => tf.losses.absoluteDifference( + label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const predictions = [1, 2, 3]; + const label = [0.3, -0.6, -0.1]; + const weights = [0.1, 0.2, 0.3]; + + const y = tf.losses.absoluteDifference( + label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [ + Math.abs(1 - 0.3) * 0.1, Math.abs(2 - (-0.6)) * 0.2, + Math.abs(3 - (-0.1)) * 0.3 + ]); + }); +}); diff --git a/tfjs-core/src/ops/compute_weighted_loss.ts b/tfjs-core/src/ops/compute_weighted_loss.ts new file mode 100644 index 00000000000..2c587d3a380 --- /dev/null +++ b/tfjs-core/src/ops/compute_weighted_loss.ts @@ -0,0 +1,81 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; + +import {cast} from './array_ops'; +import {div} from './div'; +import {Reduction} from './loss_ops_utils'; +import {mul} from './mul'; +import {notEqual} from './not_equal'; +import {op} from './operation'; +import {mean, sum} from './reduction_ops'; +import {ones, scalar} from './tensor_ops'; + +/** + * Computes the weighted loss between two tensors. + * + * @param losses Tensor of shape `[batch_size, d1, ... dN]`. + * @param weights Tensor whose rank is either 0, or the same rank as + * `losses`, and must be broadcastable to `losses` (i.e., all + * dimensions must be either `1`, or the same as the corresponding + * `losses` dimension). + */ +/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ +function computeWeightedLoss_( + losses: T|TensorLike, weights?: Tensor|TensorLike, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + const $losses = convertToTensor(losses, 'losses', 'computeWeightedLoss'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'computeWeightedLoss'); + } + + const weightedLoss = ($weights == null) ? $losses : mul($losses, $weights); + + if (reduction === Reduction.NONE) { + return weightedLoss as O; + } + if (reduction === Reduction.SUM) { + return sum(weightedLoss); + } + if (reduction === Reduction.MEAN) { + if ($weights == null) { + return mean(weightedLoss); + } else { + const broadcastFactor = $losses.size / $weights.size; + const result = div(sum(weightedLoss), sum($weights)); + return broadcastFactor > 1 ? div(result, scalar(broadcastFactor)) : + result as O; + } + } + if (reduction === Reduction.SUM_BY_NONZERO_WEIGHTS) { + if ($weights == null) { + return div(sum(weightedLoss), scalar($losses.size)); + } else { + const broadcastedWeights = mul($weights, ones($losses.shape)); + + const numNonZeros = + cast(sum(notEqual(broadcastedWeights, scalar(0))), 'float32'); + return div(sum(weightedLoss), numNonZeros); + } + } + + throw Error(`Unknown reduction: ${reduction}`); +} +export const computeWeightedLoss = op({computeWeightedLoss_}); diff --git a/tfjs-core/src/ops/compute_weighted_loss_test.ts b/tfjs-core/src/ops/compute_weighted_loss_test.ts new file mode 100644 index 00000000000..763579f2c75 --- /dev/null +++ b/tfjs-core/src/ops/compute_weighted_loss_test.ts @@ -0,0 +1,237 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('computeWeightedLoss', ALL_ENVS, () => { + it('1D - no weights', async () => { + const losses = tf.tensor1d([1, 2, 3]); + + const y = tf.losses.computeWeightedLoss(losses); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 + 2 + 3) / 3); + }); + + it('1D - no weights - Reduction.NONE', async () => { + const losses = tf.tensor1d([1, 2, 3]); + + const y = + tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [1, 2, 3]); + }); + + it('1D - no weights - Reduction.MEAN', async () => { + const losses = tf.tensor1d([1, 2, 3]); + + const y = + tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 + 2 + 3) / 3); + }); + + it('1D - no weights - Reduction.SUM', async () => { + const losses = tf.tensor1d([1, 2, 3]); + + const y = + tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.SUM); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 + 2 + 3)); + }); + + it('1D - weights', async () => { + const losses = tf.tensor1d([1, 2, 3]); + const weights = tf.tensor1d([0.1, 0, 0.3]); + + const y = tf.losses.computeWeightedLoss(losses, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0 + 3 * 0.3) / 2); + }); + + it('2D - weights - broadcast', async () => { + const losses = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const y = tf.losses.computeWeightedLoss(losses, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.06666667); + }); + + it('1D - weights - Reduction.NONE', async () => { + const losses = tf.tensor1d([1, 2, 3]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [1 * 0.1, 2 * 0.2, 3 * 0.3]); + }); + + it('1D - weights - Reduction.MEAN', async () => { + const losses = tf.tensor1d([1, 2, 3]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0.2 + 3 * 0.3) / 0.6); + }); + + it('1D - weights - Reduction.SUM', async () => { + const losses = tf.tensor1d([1, 2, 3]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.SUM); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0.2 + 3 * 0.3)); + }); + + it('2D - no weights', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + + const y = tf.losses.computeWeightedLoss(losses); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (4 + 8 + 12 + 8 + 1 + 3) / 6); + }); + + it('2D - weights', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); + + const y = tf.losses.computeWeightedLoss(losses, weights); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (4 * 1 + 8 * 0 + 12 * 2 + (8 * -5) + 1 * 0 + 3 * 6) / 4); + }); + + it('2D - no weights - Reduction.MEAN', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + + const y = + tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (4 + 8 + 12 + 8 + 1 + 3) / 6); + }); + + it('2D - weights - Reduction.MEAN', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); + + const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (4 * 1 + 8 * 0 + 12 * 2 + (8 * -5) + 1 * 0 + 3 * 6) / 4); + }); + + it('2D - weights - broadcast - MEAN', async () => { + const losses = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (0.3 + 0.1 + 0.2) / (3 * 0.6)); + }); + + it('2D - no weights - Reduction.SUM', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + + const y = + tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.SUM); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (4 + 8 + 12 + 8 + 1 + 3)); + }); + + it('2D - weights - Reduction.SUM', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); + + const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.SUM); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), (4 * 1 + 8 * 0 + 12 * 2 + (8 * -5) + 1 * 0 + 3 * 6)); + }); + + it('2D - no weights - Reduction.NONE', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + + const y = + tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 3]); + expectArraysClose(await y.data(), [4, 8, 12, 8, 1, 3]); + }); + + it('2D - weights - Reduction.NONE', async () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); + + const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 3]); + expectArraysClose( + await y.data(), [4 * 1, 8 * 0, 12 * 2, (8 * -5), 1 * 0, 3 * 6]); + }); + + it('throws when passed losses as a non-tensor', () => { + const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); + + const e = + /Argument 'losses' passed to 'computeWeightedLoss' must be a Tensor/; + expect( + () => tf.losses.computeWeightedLoss( + {} as tf.Tensor, weights, tf.Reduction.NONE)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + + const e = + /Argument 'weights' passed to 'computeWeightedLoss' must be a Tensor/; + expect( + () => tf.losses.computeWeightedLoss( + losses, {} as tf.Tensor, tf.Reduction.NONE)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const losses = [1, 2, 3]; + const weights = [0.1, 0, 0.3]; + const y = tf.losses.computeWeightedLoss(losses, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0 + 3 * 0.3) / 2); + }); +}); diff --git a/tfjs-core/src/ops/cosine_distance.ts b/tfjs-core/src/ops/cosine_distance.ts new file mode 100644 index 00000000000..fa0213418c7 --- /dev/null +++ b/tfjs-core/src/ops/cosine_distance.ts @@ -0,0 +1,62 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {Reduction} from './loss_ops_utils'; +import {mul} from './mul'; +import {op} from './operation'; +import {sum} from './reduction_ops'; +import {sub} from './sub'; +import {scalar} from './tensor_ops'; + +/** + * Computes the cosine distance loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param axis The dimension along which the cosine distance is computed. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + */ +/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ +function cosineDistance_( + labels: T|TensorLike, predictions: T|TensorLike, axis: number, + weights?: Tensor|TensorLike, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + const $labels = convertToTensor(labels, 'labels', 'cosineDistance'); + const $predictions = + convertToTensor(predictions, 'predictions', 'cosineDistance'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'cosineDistance'); + } + assertShapesMatch( + $labels.shape, $predictions.shape, 'Error in cosineDistance: '); + + const one = scalar(1); + const losses = sub(one, sum(mul($labels, $predictions), axis, true)); + return computeWeightedLoss(losses, $weights, reduction); +} +export const cosineDistance = op({cosineDistance_}); diff --git a/tfjs-core/src/ops/cosine_distance_test.ts b/tfjs-core/src/ops/cosine_distance_test.ts new file mode 100644 index 00000000000..43ce3b92d19 --- /dev/null +++ b/tfjs-core/src/ops/cosine_distance_test.ts @@ -0,0 +1,198 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('cosineDistance', ALL_ENVS, () => { + it('1D', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + + const y = tf.losses.cosineDistance(label, predictions, 0); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)); + }); + + it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.scalar(0.1); + + const y = tf.losses.cosineDistance(label, predictions, 0, weights); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), (1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1); + }); + + it('1D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.scalar(0.1); + + const y = tf.losses.cosineDistance( + label, predictions, 0, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([1]); + expectArraysClose( + await y.data(), [(1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1]); + }); + + it('1D - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + + const y = tf.losses.cosineDistance( + label, predictions, 0, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), (1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1))); + }); + + it('1D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.scalar(0.1); + + const y = tf.losses.cosineDistance( + label, predictions, 0, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), ((1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1) / 0.1); + }); + + it('2D', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const y = tf.losses.cosineDistance(label, predictions, 1); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((1 - (4 * 1 + 8 * 9 + 12 * 2)) + (1 - (8 * -5 + 1 * -2 + 3 * 6))) / 2); + }); + + it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 0], [2, 1]); + + const y = tf.losses.cosineDistance(label, predictions, 1, weights); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((1 - (4 * 1 + 8 * 9 + 12 * 2)) * 3 + + (1 - (8 * -5 + 1 * -2 + 3 * 6)) * 0) / + 1); + }); + + it('2D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 0], [2, 1]); + + const y = tf.losses.cosineDistance( + label, predictions, 1, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 1]); + expectArraysClose(await y.data(), [ + (1 - (4 * 1 + 8 * 9 + 12 * 2)) * 3, (1 - (8 * -5 + 1 * -2 + 3 * 6)) * 0 + ]); + }); + + it('2D - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const y = tf.losses.cosineDistance( + label, predictions, 1, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((1 - (4 * 1 + 8 * 9 + 12 * 2)) + (1 - (8 * -5 + 1 * -2 + 3 * 6))) / 2); + }); + + it('2D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 0], [2, 1]); + + const y = tf.losses.cosineDistance( + label, predictions, 1, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((1 - (4 * 1 + 8 * 9 + 12 * 2)) * 3 + + (1 - (8 * -5 + 1 * -2 + 3 * 6)) * 0) / + 3); + }); + + it('throws when passed label as a non-tensor', () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = /Argument 'labels' passed to 'cosineDistance' must be a Tensor/; + expect( + () => tf.losses.cosineDistance( + {} as tf.Tensor, predictions, 0, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed label as a non-tensor', () => { + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = new RegExp( + 'Argument \'predictions\' passed to \'cosineDistance\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.cosineDistance( + label, {} as tf.Tensor, 0, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const e = /Argument 'weights' passed to 'cosineDistance' must be a Tensor/; + expect( + () => tf.losses.cosineDistance( + label, predictions, 0, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const predictions = [1, 2, 3]; + const label = [0.3, -0.6, -0.1]; + const weights = 0.1; + + const y = tf.losses.cosineDistance( + label, predictions, 0, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([1]); + expectArraysClose( + await y.data(), [(1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1]); + }); +}); diff --git a/tfjs-core/src/ops/hinge_loss.ts b/tfjs-core/src/ops/hinge_loss.ts new file mode 100644 index 00000000000..8b6c8ffdf41 --- /dev/null +++ b/tfjs-core/src/ops/hinge_loss.ts @@ -0,0 +1,61 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {Reduction} from './loss_ops_utils'; +import {mul} from './mul'; +import {op} from './operation'; +import {relu} from './relu'; +import {sub} from './sub'; +import {scalar} from './tensor_ops'; + +/** + * Computes the Hinge loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + */ +/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ +function hingeLoss_( + labels: T|TensorLike, predictions: T|TensorLike, + weights?: Tensor|TensorLike, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + let $labels = convertToTensor(labels, 'labels', 'hingeLoss'); + const $predictions = convertToTensor(predictions, 'predictions', 'hingeLoss'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'hingeLoss'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in hingeLoss: '); + + const one = scalar(1); + // Convert binary labels to (-1, 1) + $labels = sub(mul(scalar(2), $labels), one); + const losses = relu(sub(one, mul($labels, $predictions))); + return computeWeightedLoss(losses, $weights, reduction); +} +export const hingeLoss = op({hingeLoss_}); diff --git a/tfjs-core/src/ops/hinge_loss_test.ts b/tfjs-core/src/ops/hinge_loss_test.ts new file mode 100644 index 00000000000..6208b84ae39 --- /dev/null +++ b/tfjs-core/src/ops/hinge_loss_test.ts @@ -0,0 +1,181 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('hingeLoss', ALL_ENVS, () => { + it('1D', async () => { + const predictions = tf.tensor1d([0, 0, 1, 1]); + const label = tf.tensor1d([0, 1, 0, 1]); + + const y = tf.losses.hingeLoss(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1.0); + }); + + it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor1d([0, 0, 1, 1]); + const label = tf.tensor1d([0, 1, 0, 1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3, 0.4]); + + const y = tf.losses.hingeLoss(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.225); + }); + + it('1D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor1d([0, 0, 1, 1]); + const label = tf.tensor1d([0, 1, 0, 1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3, 0.4]); + + const y = + tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([4]); + expectArraysClose(await y.data(), [0.1, 0.2, 0.6, 0.0]); + }); + + it('1D - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([0, 0, 1, 1]); + const label = tf.tensor1d([0, 1, 0, 1]); + + const y = + tf.losses.hingeLoss(label, predictions, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1.0); + }); + + it('1D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([0, 0, 1, 1]); + const label = tf.tensor1d([0, 1, 0, 1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3, 0.4]); + + const y = + tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.9); + }); + + it('2D', async () => { + const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); + const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); + + const y = tf.losses.hingeLoss(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.8333333); + }); + + it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); + const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); + const weights = tf.tensor2d([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [2, 3]); + + const y = tf.losses.hingeLoss(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.26666668); + }); + + it('2D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); + const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); + const weights = tf.tensor2d([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [2, 3]); + + const y = + tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 3]); + expectArraysClose(await y.data(), [0.1, 0.2, 0.3, 0, 1, 0]); + }); + + it('2D - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); + const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); + + const y = + tf.losses.hingeLoss(label, predictions, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.8333333); + }); + + it('2D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); + const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); + const weights = tf.tensor2d([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [2, 3]); + + const y = + tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.76190484); + }); + + it('throws when passed label as a non-tensor', () => { + const predictions = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); + const weights = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); + + const e = /Argument 'labels' passed to 'hingeLoss' must be a Tensor/; + expect( + () => tf.losses.hingeLoss( + {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed label as a non-tensor', () => { + const label = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); + const weights = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); + + const e = new RegExp( + 'Argument \'predictions\' passed to \'hingeLoss\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.hingeLoss( + label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const predictions = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); + const label = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); + + const e = /Argument 'weights' passed to 'hingeLoss' must be a Tensor/; + expect( + () => tf.losses.hingeLoss( + label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const predictions = [0, 0, 1, 1]; + const label = [0, 1, 0, 1]; + const weights = [0.1, 0.2, 0.3, 0.4]; + + const y = + tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([4]); + expectArraysClose(await y.data(), [0.1, 0.2, 0.6, 0.0]); + }); +}); diff --git a/tfjs-core/src/ops/huber_loss.ts b/tfjs-core/src/ops/huber_loss.ts new file mode 100644 index 00000000000..1f83e2d1f1d --- /dev/null +++ b/tfjs-core/src/ops/huber_loss.ts @@ -0,0 +1,69 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; +import {add} from './add'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {Reduction} from './loss_ops_utils'; +import {minimum} from './minimum'; +import {mul} from './mul'; +import {op} from './operation'; +import {square} from './square'; +import {sub} from './sub'; +import {scalar} from './tensor_ops'; +import {abs} from './unary_ops'; + +/** + * Computes the huber loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param delta Point where huber loss changes from quadratic to linear. + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction`. + */ +/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ +function huberLoss_( + labels: T|TensorLike, predictions: T|TensorLike, + weights?: Tensor|TensorLike, delta = 1.0, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + const $labels = convertToTensor(labels, 'labels', 'huberLoss'); + const $predictions = convertToTensor(predictions, 'predictions', 'huberLoss'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'huberLoss'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in huberLoss: '); + + const deltaScalar = scalar(delta); + const error = abs(sub($predictions, $labels)); + const quadratic = minimum(error, deltaScalar); + const linear = sub(error, quadratic); + + const losses = + add(mul(scalar(0.5), square(quadratic)), mul(deltaScalar, linear)); + return computeWeightedLoss(losses, $weights, reduction); +} +export const huberLoss = op({huberLoss_}); diff --git a/tfjs-core/src/ops/huber_loss_test.ts b/tfjs-core/src/ops/huber_loss_test.ts new file mode 100644 index 00000000000..cb55f7b1ab9 --- /dev/null +++ b/tfjs-core/src/ops/huber_loss_test.ts @@ -0,0 +1,191 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('huberLoss', ALL_ENVS, () => { + it('1D', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + + const y = tf.losses.huberLoss(labels, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1.1816667); + }); + + it('1D - delta', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + const delta = 0.4; + + const y = tf.losses.huberLoss(labels, predictions, undefined, delta); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.58666664); + }); + + it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.huberLoss(labels, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.30816665); + }); + + it('1D - weighted - Reduction.NONE', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.huberLoss( + labels, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [0.0245, 0.17999999, 0.72]); + }); + + it('1D - Reduction.MEAN', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + + const y = tf.losses.huberLoss( + labels, predictions, undefined, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1.1816667); + }); + + it('1D - weighted - Reduction.MEAN', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.huberLoss( + labels, predictions, weights, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1.5408332); + }); + + it('2D', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + + const y = tf.losses.huberLoss(labels, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.01795); + }); + + it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.huberLoss(labels, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.040875003); + }); + + it('2D - weighted - Reduction.NONE', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.huberLoss( + labels, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 3]); + expectArraysClose(await y.data(), [0.135, 0., 0.001, 0., 0.005, 0.0225]); + }); + + it('2D - Reduction.MEAN', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + + const y = tf.losses.huberLoss( + labels, predictions, undefined, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.01795); + }); + + it('2D - weighted - Reduction.MEAN', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.huberLoss( + labels, predictions, weights, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.011678572); + }); + + it('throws when passed label as a non-tensor', () => { + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = /Argument 'labels' passed to 'huberLoss' must be a Tensor/; + expect( + () => tf.losses.huberLoss( + {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed label as a non-tensor', () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = new RegExp( + 'Argument \'predictions\' passed to \'huberLoss\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.huberLoss( + labels, {} as tf.Tensor, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + + const e = /Argument 'weights' passed to 'huberLoss' must be a Tensor/; + expect( + () => tf.losses.huberLoss( + labels, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const labels = [1, 2, 3]; + const predictions = [0.3, 0.6, 0.1]; + const weights = [0.1, 0.2, 0.3]; + + const y = tf.losses.huberLoss( + labels, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [0.0245, 0.17999999, 0.72]); + }); +}); diff --git a/tfjs-core/src/ops/log_loss.ts b/tfjs-core/src/ops/log_loss.ts new file mode 100644 index 00000000000..acbe547002b --- /dev/null +++ b/tfjs-core/src/ops/log_loss.ts @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; +import {add} from './add'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {Reduction} from './loss_ops_utils'; +import {mul} from './mul'; +import {op} from './operation'; +import {sub} from './sub'; +import {scalar} from './tensor_ops'; +import {log, neg} from './unary_ops'; + +/** + * Computes the log loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param epsilon A small increment to avoid taking log of zero + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + */ +/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ +function logLoss_( + labels: T|TensorLike, predictions: T|TensorLike, + weights?: Tensor|TensorLike, epsilon = 1e-7, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + const $labels = convertToTensor(labels, 'labels', 'logLoss'); + const $predictions = convertToTensor(predictions, 'predictions', 'logLoss'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'logLoss'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in logLoss: '); + + const one = scalar(1); + const epsilonScalar = scalar(epsilon); + + const l1 = neg(mul($labels, log(add($predictions, epsilonScalar)))); + const l2 = + mul(sub(one, $labels), log(add(sub(one, $predictions), epsilonScalar))); + const losses = sub(l1, l2); + return computeWeightedLoss(losses, $weights, reduction); +} +export const logLoss = op({logLoss_}); diff --git a/tfjs-core/src/ops/log_loss_test.ts b/tfjs-core/src/ops/log_loss_test.ts new file mode 100644 index 00000000000..9a94939f1e4 --- /dev/null +++ b/tfjs-core/src/ops/log_loss_test.ts @@ -0,0 +1,191 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('logLoss', ALL_ENVS, () => { + it('1D', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + + const y = tf.losses.logLoss(labels, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 2.668788); + }); + + it('1D - Check for negative values', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, -0.6, -0.1]); + + const y = tf.losses.logLoss(labels, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), NaN); + }); + + it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.logLoss(labels, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.7168596); + }); + + it('1D - weighted - Reduction.NONE', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.logLoss( + labels, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [0.12039725, 0.02107204, 2.0091095]); + }); + + it('1D - Reduction.MEAN', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + + const y = tf.losses.logLoss( + labels, predictions, undefined, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 2.668788); + }); + + it('1D - weighted - Reduction.MEAN', async () => { + const labels = tf.tensor1d([1, 2, 3]); + const predictions = tf.tensor1d([0.3, 0.6, 0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.logLoss( + labels, predictions, weights, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 3.5842977); + }); + + it('2D', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + + const y = tf.losses.logLoss(labels, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.60019904); + }); + + it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.logLoss(labels, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1.8866577); + }); + + it('2D - weighted - Reduction.NONE', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.logLoss( + labels, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 3]); + expectArraysClose( + await y.data(), [2.9527497, 0., 1.8451363, 0., 1.3829476, 1.3657978]); + }); + + it('2D - Reduction.MEAN', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + + const y = tf.losses.logLoss( + labels, predictions, undefined, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.60019904); + }); + + it('2D - weighted - Reduction.MEAN', async () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.logLoss( + labels, predictions, weights, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0.53904504); + }); + + it('throws when passed label as a non-tensor', () => { + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = /Argument 'labels' passed to 'logLoss' must be a Tensor/; + expect( + () => tf.losses.logLoss( + {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed label as a non-tensor', () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = new RegExp( + 'Argument \'predictions\' passed to \'logLoss\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.logLoss( + labels, {} as tf.Tensor, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); + const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); + + const e = /Argument 'weights' passed to 'logLoss' must be a Tensor/; + expect( + () => tf.losses.logLoss( + labels, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const labels = [1, 2, 3]; + const predictions = [0.3, 0.6, 0.1]; + const weights = [0.1, 0.2, 0.3]; + + const y = tf.losses.logLoss( + labels, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [0.12039725, 0.02107204, 2.0091095]); + }); +}); diff --git a/tfjs-core/src/ops/loss_ops.ts b/tfjs-core/src/ops/loss_ops.ts deleted file mode 100644 index 62ba2921858..00000000000 --- a/tfjs-core/src/ops/loss_ops.ts +++ /dev/null @@ -1,495 +0,0 @@ -/** - * @license - * Copyright 2018 Google Inc. All Rights Reserved. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ============================================================================= - */ - -import {customGrad} from '../gradients'; -import {Tensor} from '../tensor'; -import {GradSaveFunc} from '../tensor_types'; -import {convertToTensor} from '../tensor_util_env'; -import {TensorLike} from '../types'; -import {assertShapesMatch} from '../util'; -import {expandShapeToKeepDim} from './axis_util'; -import {minimum} from './minimum'; -import {op} from './operation'; -import {ones, scalar} from './tensor_ops'; - -export enum Reduction { - NONE, - MEAN, - SUM, - SUM_BY_NONZERO_WEIGHTS -} - -/** - * Computes the weighted loss between two tensors. - * - * @param losses Tensor of shape `[batch_size, d1, ... dN]`. - * @param weights Tensor whose rank is either 0, or the same rank as - * `losses`, and must be broadcastable to `losses` (i.e., all - * dimensions must be either `1`, or the same as the corresponding - * `losses` dimension). - */ -/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ -function computeWeightedLoss_( - losses: T|TensorLike, weights?: Tensor|TensorLike, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - const $losses = convertToTensor(losses, 'losses', 'computeWeightedLoss'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'computeWeightedLoss'); - } - - const weightedLoss = ($weights == null) ? $losses : $losses.mul($weights); - - if (reduction === Reduction.NONE) { - return weightedLoss as O; - } - if (reduction === Reduction.SUM) { - return weightedLoss.sum(); - } - if (reduction === Reduction.MEAN) { - if ($weights == null) { - return weightedLoss.mean(); - } else { - const broadcastFactor = $losses.size / $weights.size; - const result = weightedLoss.sum().div($weights.sum()); - return broadcastFactor > 1 ? result.div(scalar(broadcastFactor)) : - result as O; - } - } - if (reduction === Reduction.SUM_BY_NONZERO_WEIGHTS) { - if ($weights == null) { - return weightedLoss.sum().div(scalar($losses.size)); - } else { - const broadcastedWeights = $weights.mul(ones($losses.shape)); - - const numNonZeros = - broadcastedWeights.notEqual(scalar(0)).sum().toFloat(); - return weightedLoss.sum().div(numNonZeros); - } - } - - throw Error(`Unknown reduction: ${reduction}`); -} - -/** - * Computes the absolute difference loss between two tensors. - * - * @param labels The ground truth output tensor, same dimensions as - * 'predictions'. - * @param predictions The predicted outputs. - * @param weights Tensor whose rank is either 0, or the same rank as - * `labels`, and must be broadcastable to `labels` (i.e., all dimensions - * must be either `1`, or the same as the corresponding `losses` - * dimension). - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction` - */ -/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ -function absoluteDifference_( - labels: T|TensorLike, predictions: T|TensorLike, - weights?: Tensor|TensorLike, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - const $labels = convertToTensor(labels, 'labels', 'absoluteDifference'); - const $predictions = - convertToTensor(predictions, 'predictions', 'absoluteDifference'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'absoluteDifference'); - } - assertShapesMatch( - $labels.shape, $predictions.shape, 'Error in absoluteDifference: '); - - const losses = $labels.sub($predictions).abs(); - return computeWeightedLoss(losses, $weights, reduction); -} - -/** - * Computes the mean squared error between two tensors. - * - * @param labels The ground truth output tensor, same dimensions as - * 'predictions'. - * @param predictions The predicted outputs. - * @param weights Tensor whose rank is either 0, or the same rank as - * `labels`, and must be broadcastable to `labels` (i.e., all dimensions - * must be either `1`, or the same as the corresponding `losses` - * dimension). - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction` - */ -/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ -function meanSquaredError_( - labels: T|TensorLike, predictions: T|TensorLike, - weights?: Tensor|TensorLike, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - const $labels = convertToTensor(labels, 'labels', 'meanSquaredError'); - const $predictions = - convertToTensor(predictions, 'predictions', 'meanSquaredError'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'meanSquaredError'); - } - assertShapesMatch( - $labels.shape, $predictions.shape, 'Error in meanSquaredError: '); - - const losses = $labels.squaredDifference($predictions); - return computeWeightedLoss(losses, $weights, reduction); -} - -/** - * Computes the cosine distance loss between two tensors. - * - * @param labels The ground truth output tensor, same dimensions as - * 'predictions'. - * @param predictions The predicted outputs. - * @param axis The dimension along which the cosine distance is computed. - * @param weights Tensor whose rank is either 0, or the same rank as - * `labels`, and must be broadcastable to `labels` (i.e., all dimensions - * must be either `1`, or the same as the corresponding `losses` - * dimension). - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction` - */ -/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ -function cosineDistance_( - labels: T|TensorLike, predictions: T|TensorLike, axis: number, - weights?: Tensor|TensorLike, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - const $labels = convertToTensor(labels, 'labels', 'cosineDistance'); - const $predictions = - convertToTensor(predictions, 'predictions', 'cosineDistance'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'cosineDistance'); - } - assertShapesMatch( - $labels.shape, $predictions.shape, 'Error in cosineDistance: '); - - const one = scalar(1); - const losses = one.sub($labels.mul($predictions).sum(axis, true)); - return computeWeightedLoss(losses, $weights, reduction); -} - -/** - * Computes the Hinge loss between two tensors. - * - * @param labels The ground truth output tensor, same dimensions as - * 'predictions'. - * @param predictions The predicted outputs. - * @param weights Tensor whose rank is either 0, or the same rank as - * `labels`, and must be broadcastable to `labels` (i.e., all dimensions - * must be either `1`, or the same as the corresponding `losses` - * dimension). - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction` - */ -/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ -function hingeLoss_( - labels: T|TensorLike, predictions: T|TensorLike, - weights?: Tensor|TensorLike, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - let $labels = convertToTensor(labels, 'labels', 'hingeLoss'); - const $predictions = convertToTensor(predictions, 'predictions', 'hingeLoss'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'hingeLoss'); - } - assertShapesMatch($labels.shape, $predictions.shape, 'Error in hingeLoss: '); - - const one = scalar(1); - // Convert binary labels to (-1, 1) - $labels = scalar(2).mul($labels).sub(one); - const losses = one.sub($labels.mul($predictions)).relu(); - return computeWeightedLoss(losses, $weights, reduction); -} - -/** - * Computes the log loss between two tensors. - * - * @param labels The ground truth output tensor, same dimensions as - * 'predictions'. - * @param predictions The predicted outputs. - * @param weights Tensor whose rank is either 0, or the same rank as - * `labels`, and must be broadcastable to `labels` (i.e., all dimensions - * must be either `1`, or the same as the corresponding `losses` - * dimension). - * @param epsilon A small increment to avoid taking log of zero - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction` - */ -/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ -function logLoss_( - labels: T|TensorLike, predictions: T|TensorLike, - weights?: Tensor|TensorLike, epsilon = 1e-7, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - const $labels = convertToTensor(labels, 'labels', 'logLoss'); - const $predictions = convertToTensor(predictions, 'predictions', 'logLoss'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'logLoss'); - } - assertShapesMatch($labels.shape, $predictions.shape, 'Error in logLoss: '); - - const one = scalar(1); - const epsilonScalar = scalar(epsilon); - const losses = $labels.mul($predictions.add(epsilonScalar).log()) - .neg() - .sub(one.sub($labels).mul( - one.sub($predictions).add(epsilonScalar).log())); - return computeWeightedLoss(losses, $weights, reduction); -} - -function sigmoidCrossEntropyWithLogits_( - labels: T|TensorLike, logits: T|TensorLike): O { - const $labels = - convertToTensor(labels, 'labels', 'sigmoidCrossEntropyWithLogits'); - const $logits = - convertToTensor(logits, 'logits', 'sigmoidCrossEntropyWithLogits'); - assertShapesMatch( - $labels.shape, $logits.shape, 'Error in sigmoidCrossEntropyWithLogits: '); - - /** - * Implementation Details: - * - * For brevity, let `x = logits`, `z = labels`. The logistic loss is - * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) - * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) - * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) - * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) - * = (1 - z) * x + log(1 + exp(-x)) - * = x - x * z + log(1 + exp(-x)) - * - * For x < 0, to avoid overflow in exp(-x), we reformulate the above - * x - x * z + log(1 + exp(-x)) - * = log(exp(x)) - x * z + log(1 + exp(-x)) - * = - x * z + log(1 + exp(x)) - * - * Hence, to ensure stability and avoid overflow, the implementation uses - * this equivalent formulation: - * max(x, 0) - x * z + log(1 + exp(-abs(x))) - */ - const maxOutput = $logits.relu(); - const outputXTarget = $logits.mul($labels); - const sigmoidOutput = $logits.abs().neg().exp().log1p(); - - return maxOutput.sub(outputXTarget).add(sigmoidOutput); -} - -/** - * Computes the sigmoid cross entropy loss between two tensors. - * - * If labelSmoothing is nonzero, smooth the labels towards 1/2: - * - * newMulticlassLabels = multiclassLabels * (1 - labelSmoothing) - * + 0.5 * labelSmoothing - * - * @param multiClassLabels The ground truth output tensor of shape - * [batch_size, num_classes], same dimensions as 'predictions'. - * @param logits The predicted outputs. - * @param weights Tensor whose rank is either 0, or the same rank as - * `labels`, and must be broadcastable to `labels` (i.e., all dimensions - * must be either `1`, or the same as the corresponding `losses` - * dimension). - * @param labelSmoothing If greater than 0, then smooth the labels. - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction` - */ -/** @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' } */ -function sigmoidCrossEntropy_( - multiClassLabels: T|TensorLike, logits: T|TensorLike, - weights?: Tensor|TensorLike, labelSmoothing = 0, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - let $multiClassLabels = convertToTensor( - multiClassLabels, 'multiClassLabels', 'sigmoidCrossEntropy'); - const $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropy'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'sigmoidCrossEntropy'); - } - assertShapesMatch( - $multiClassLabels.shape, $logits.shape, 'Error in sigmoidCrossEntropy: '); - - if (labelSmoothing > 0) { - const labelSmoothingScalar = scalar(labelSmoothing); - const one = scalar(1); - const half = scalar(0.5); - - $multiClassLabels = $multiClassLabels.mul(one.sub(labelSmoothingScalar)) - .add(half.mul(labelSmoothingScalar)); - } - const losses = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits); - - return computeWeightedLoss(losses, $weights, reduction); -} - -/** - * Computes the huber loss between two tensors. - * - * @param labels The ground truth output tensor, same dimensions as - * 'predictions'. - * @param predictions The predicted outputs. - * @param weights Tensor whose rank is either 0, or the same rank as - * `labels`, and must be broadcastable to `labels` (i.e., all dimensions - * must be either `1`, or the same as the corresponding `losses` - * dimension). - * @param delta Point where huber loss changes from quadratic to linear. - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction`. - */ -/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ -function huberLoss_( - labels: T|TensorLike, predictions: T|TensorLike, - weights?: Tensor|TensorLike, delta = 1.0, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - const $labels = convertToTensor(labels, 'labels', 'huberLoss'); - const $predictions = convertToTensor(predictions, 'predictions', 'huberLoss'); - let $weights: Tensor = null; - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'huberLoss'); - } - assertShapesMatch($labels.shape, $predictions.shape, 'Error in huberLoss: '); - - const deltaScalar = scalar(delta); - const error = $predictions.sub($labels).abs(); - const quadratic = minimum(error, deltaScalar); - const linear = error.sub(quadratic); - - const losses = - scalar(0.5).mul(quadratic.square()).add(deltaScalar.mul(linear)); - return computeWeightedLoss(losses, $weights, reduction); -} - -/** - * Computes softmax cross entropy between logits and labels. - * - * Measures the probability error in discrete classification tasks in which - * the classes are mutually exclusive (each entry is in exactly one class). - * For example, each CIFAR-10 image is labeled with one and only one label: an - * image can be a dog or a truck, but not both. - * - * `NOTE`: While the classes are mutually exclusive, their probabilities need - * not be. All that is required is that each row of labels is a valid - * probability distribution. If they are not, the computation of the gradient - * will be incorrect. - * - * `WARNING`: This op expects unscaled logits, since it performs a softmax on - * logits internally for efficiency. Do not call this op with the output of - * softmax, as it will produce incorrect results. - * - * logits and labels must have the same shape, e.g. [batch_size, num_classes] - * and the same dtype. - * @param labels The labels array. - * @param logits The logits array. - * @param dim The dimension softmax would be performed on. Defaults to `-1` - * which indicates the last dimension. - */ -function softmaxCrossEntropyWithLogits_( - labels: T, logits: T, dim = -1): O { - if (dim === -1) { - dim = logits.rank - 1; - } - - if (dim !== logits.rank - 1) { - throw Error( - `Softmax cross entropy along a non-last dimension is not yet ` + - `supported. Labels / logits was rank ${logits.rank} ` + - `and dim was ${dim}`); - } - // Use a custom gradient for numerical stability. - const customOp = - customGrad((labels: Tensor, logits: Tensor, save: GradSaveFunc) => { - // Reference: - // 1. http://cs231n.github.io/linear-classify/#softmax - // 2. https://blog.feedly.com/tricks-of-the-trade-logsumexp/ - const keepDims = true; - const lse = logits.logSumExp([dim], keepDims); - const logResult = logits.toFloat().sub(lse); - save([labels, logResult]); - - const costVector = logResult.mul(labels).neg(); - const value: O = costVector.sum([dim]); - - const gradFunc = (dy: O, saved: Tensor[]) => { - const [labels, logResult] = saved; - const dyShape = expandShapeToKeepDim(dy.shape, [dim]); - return [ - dy.reshape(dyShape).mul(labels.toFloat().sub(logResult.exp())), - dy.reshape(dyShape).mul(logResult.exp().sub(labels.toFloat())), - ]; - }; - return {value, gradFunc}; - }); - - return customOp(labels, logits); -} - -/** - * Computes the softmax cross entropy loss between two tensors. - * - * If labelSmoothing is nonzero, smooth the labels towards 1/2: - * - * newOnehotLabels = onehotLabels * (1 - labelSmoothing) - * + labelSmoothing / numClasses - * - * @param onehotLabels One hot encoded labels - * [batch_size, num_classes], same dimensions as 'predictions'. - * @param logits The predicted outputs. - * @param weights Tensor whose rank is either 0, or 1, and must be - * broadcastable to `loss` of shape [batch_size] - * @param labelSmoothing If greater than 0, then smooth the labels. - * @param reduction Type of reduction to apply to loss. Should be of type - * `Reduction` - */ -/** @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' } */ -function softmaxCrossEntropy_( - onehotLabels: T|TensorLike, logits: T|TensorLike, - weights?: Tensor|TensorLike, labelSmoothing = 0, - reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { - let $onehotLabels = - convertToTensor(onehotLabels, 'onehotLabels', 'softmaxCrossEntropy'); - const $logits = convertToTensor(logits, 'logits', 'softmaxCrossEntropy'); - let $weights: Tensor = null; - - if (weights != null) { - $weights = convertToTensor(weights, 'weights', 'softmaxCrossEntropy'); - } - - assertShapesMatch( - $onehotLabels.shape, $logits.shape, 'Error in softmaxCrossEntropy: '); - - if (labelSmoothing > 0) { - const labelSmoothingScalar = scalar(labelSmoothing); - const one = scalar(1); - const numClasses = scalar($onehotLabels.shape[1]); - - $onehotLabels = $onehotLabels.mul(one.sub(labelSmoothingScalar)) - .add(labelSmoothingScalar.div(numClasses)); - } - - const losses = softmaxCrossEntropyWithLogits_($onehotLabels, $logits); - - return computeWeightedLoss(losses, $weights, reduction); -} - -export const absoluteDifference = op({absoluteDifference_}); -export const computeWeightedLoss = op({computeWeightedLoss_}); -export const cosineDistance = op({cosineDistance_}); -export const hingeLoss = op({hingeLoss_}); -export const huberLoss = op({huberLoss_}); -export const logLoss = op({logLoss_}); -export const meanSquaredError = op({meanSquaredError_}); -export const sigmoidCrossEntropy = op({sigmoidCrossEntropy_}); -export const softmaxCrossEntropy = op({softmaxCrossEntropy_}); diff --git a/tfjs-core/src/ops/loss_ops_test.ts b/tfjs-core/src/ops/loss_ops_test.ts deleted file mode 100644 index 448a44dce57..00000000000 --- a/tfjs-core/src/ops/loss_ops_test.ts +++ /dev/null @@ -1,1652 +0,0 @@ -/** - * @license - * Copyright 2018 Google Inc. All Rights Reserved. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ============================================================================= - */ - -import * as tf from '../index'; -import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; -import {expectArraysClose} from '../test_util'; - -describeWithFlags('computeWeightedLoss', ALL_ENVS, () => { - it('1D - no weights', async () => { - const losses = tf.tensor1d([1, 2, 3]); - - const y = tf.losses.computeWeightedLoss(losses); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 + 2 + 3) / 3); - }); - - it('1D - no weights - Reduction.NONE', async () => { - const losses = tf.tensor1d([1, 2, 3]); - - const y = - tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [1, 2, 3]); - }); - - it('1D - no weights - Reduction.MEAN', async () => { - const losses = tf.tensor1d([1, 2, 3]); - - const y = - tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 + 2 + 3) / 3); - }); - - it('1D - no weights - Reduction.SUM', async () => { - const losses = tf.tensor1d([1, 2, 3]); - - const y = - tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.SUM); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 + 2 + 3)); - }); - - it('1D - weights', async () => { - const losses = tf.tensor1d([1, 2, 3]); - const weights = tf.tensor1d([0.1, 0, 0.3]); - - const y = tf.losses.computeWeightedLoss(losses, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0 + 3 * 0.3) / 2); - }); - - it('2D - weights - broadcast', async () => { - const losses = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const y = tf.losses.computeWeightedLoss(losses, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.06666667); - }); - - it('1D - weights - Reduction.NONE', async () => { - const losses = tf.tensor1d([1, 2, 3]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [1 * 0.1, 2 * 0.2, 3 * 0.3]); - }); - - it('1D - weights - Reduction.MEAN', async () => { - const losses = tf.tensor1d([1, 2, 3]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0.2 + 3 * 0.3) / 0.6); - }); - - it('1D - weights - Reduction.SUM', async () => { - const losses = tf.tensor1d([1, 2, 3]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.SUM); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0.2 + 3 * 0.3)); - }); - - it('2D - no weights', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - - const y = tf.losses.computeWeightedLoss(losses); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (4 + 8 + 12 + 8 + 1 + 3) / 6); - }); - - it('2D - weights', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); - - const y = tf.losses.computeWeightedLoss(losses, weights); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (4 * 1 + 8 * 0 + 12 * 2 + (8 * -5) + 1 * 0 + 3 * 6) / 4); - }); - - it('2D - no weights - Reduction.MEAN', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - - const y = - tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (4 + 8 + 12 + 8 + 1 + 3) / 6); - }); - - it('2D - weights - Reduction.MEAN', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); - - const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (4 * 1 + 8 * 0 + 12 * 2 + (8 * -5) + 1 * 0 + 3 * 6) / 4); - }); - - it('2D - weights - broadcast - MEAN', async () => { - const losses = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (0.3 + 0.1 + 0.2) / (3 * 0.6)); - }); - - it('2D - no weights - Reduction.SUM', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - - const y = - tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.SUM); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (4 + 8 + 12 + 8 + 1 + 3)); - }); - - it('2D - weights - Reduction.SUM', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); - - const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.SUM); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), (4 * 1 + 8 * 0 + 12 * 2 + (8 * -5) + 1 * 0 + 3 * 6)); - }); - - it('2D - no weights - Reduction.NONE', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - - const y = - tf.losses.computeWeightedLoss(losses, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 3]); - expectArraysClose(await y.data(), [4, 8, 12, 8, 1, 3]); - }); - - it('2D - weights - Reduction.NONE', async () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); - - const y = tf.losses.computeWeightedLoss(losses, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 3]); - expectArraysClose( - await y.data(), [4 * 1, 8 * 0, 12 * 2, (8 * -5), 1 * 0, 3 * 6]); - }); - - it('throws when passed losses as a non-tensor', () => { - const weights = tf.tensor2d([1, 0, 2, -5, 0, 6], [2, 3]); - - const e = - /Argument 'losses' passed to 'computeWeightedLoss' must be a Tensor/; - expect( - () => tf.losses.computeWeightedLoss( - {} as tf.Tensor, weights, tf.Reduction.NONE)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const losses = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - - const e = - /Argument 'weights' passed to 'computeWeightedLoss' must be a Tensor/; - expect( - () => tf.losses.computeWeightedLoss( - losses, {} as tf.Tensor, tf.Reduction.NONE)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const losses = [1, 2, 3]; - const weights = [0.1, 0, 0.3]; - const y = tf.losses.computeWeightedLoss(losses, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 * 0.1 + 2 * 0 + 3 * 0.3) / 2); - }); -}); - -describeWithFlags('absoluteDifference', ALL_ENVS, () => { - it('1D', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - - const y = tf.losses.absoluteDifference(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (Math.abs(1 - 0.3) + Math.abs(2 - (-0.6)) + Math.abs(3 - (-0.1))) / 3); - }); - - it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.absoluteDifference(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (Math.abs(1 - 0.3) * 0.1 + Math.abs(2 - (-0.6)) * 0.2 + - Math.abs(3 - (-0.1)) * 0.3) / - 3); - }); - - it('1D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.absoluteDifference( - label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [ - Math.abs(1 - 0.3) * 0.1, Math.abs(2 - (-0.6)) * 0.2, - Math.abs(3 - (-0.1)) * 0.3 - ]); - }); - - it('1D - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - - const y = tf.losses.absoluteDifference( - label, predictions, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (Math.abs(1 - 0.3) + Math.abs(2 - (-0.6)) + Math.abs(3 - (-0.1))) / 3); - }); - - it('1D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.absoluteDifference( - label, predictions, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((Math.abs(1 - 0.3) * 0.1) + (Math.abs(2 - (-0.6)) * 0.2) + - (Math.abs(3 - (-0.1)) * 0.3)) / - 0.6); - }); - - it('2D', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const y = tf.losses.absoluteDifference(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (Math.abs(4 - 1) + Math.abs(8 - 9) + Math.abs(12 - 2) + - Math.abs(8 - (-5)) + Math.abs(1 - (-2)) + Math.abs(3 - 6)) / - 6); - }); - - it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.absoluteDifference(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (Math.abs(4 - 1) * 3 + Math.abs(8 - 9) * 0 + Math.abs(12 - 2) * 5 + - Math.abs(8 - (-5)) * 0 + Math.abs(1 - (-2)) * 4 + - Math.abs(3 - 6) * 2) / - 4); - }); - - it('2D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.absoluteDifference( - label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 3]); - expectArraysClose(await y.data(), [ - Math.abs(4 - 1) * 3, Math.abs(8 - 9) * 6, Math.abs(12 - 2) * 5, - Math.abs(8 - (-5)) * 0, Math.abs(1 - (-2)) * 4, Math.abs(3 - 6) * 2 - ]); - }); - - it('2D - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const y = tf.losses.absoluteDifference( - label, predictions, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (Math.abs(4 - 1) + Math.abs(8 - 9) + Math.abs(12 - 2) + - Math.abs(8 - (-5)) + Math.abs(1 - (-2)) + Math.abs(3 - 6)) / - 6); - }); - - it('2D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.absoluteDifference( - label, predictions, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (Math.abs(4 - 1) * 3 + Math.abs(8 - 9) * 6 + Math.abs(12 - 2) * 5 + - Math.abs(8 - (-5)) * 0 + Math.abs(1 - (-2)) * 4 + - Math.abs(3 - 6) * 2) / - 20); - }); - - it('throws when passed label as a non-tensor', () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = - /Argument 'labels' passed to 'absoluteDifference' must be a Tensor/; - expect( - () => tf.losses.absoluteDifference( - {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed label as a non-tensor', () => { - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = new RegExp( - 'Argument \'predictions\' passed to \'absoluteDifference\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.absoluteDifference( - label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const e = - /Argument 'weights' passed to 'absoluteDifference' must be a Tensor/; - expect( - () => tf.losses.absoluteDifference( - label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const predictions = [1, 2, 3]; - const label = [0.3, -0.6, -0.1]; - const weights = [0.1, 0.2, 0.3]; - - const y = tf.losses.absoluteDifference( - label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [ - Math.abs(1 - 0.3) * 0.1, Math.abs(2 - (-0.6)) * 0.2, - Math.abs(3 - (-0.1)) * 0.3 - ]); - }); -}); - -describeWithFlags('meanSquaredError', ALL_ENVS, () => { - it('1D', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - - const y = tf.losses.meanSquaredError(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((1 - 0.3) * (1 - 0.3) + (2 - (-0.6)) * (2 - (-0.6)) + - (3 - (-0.1)) * (3 - (-0.1))) / - 3); - }); - - it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.meanSquaredError(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((1 - 0.3) * (1 - 0.3) * 0.1 + (2 - (-0.6)) * (2 - (-0.6)) * 0.2 + - (3 - (-0.1)) * (3 - (-0.1)) * 0.3) / - 3); - }); - - it('1D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.meanSquaredError( - label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [ - (1 - 0.3) * (1 - 0.3) * 0.1, (2 - (-0.6)) * (2 - (-0.6)) * 0.2, - (3 - (-0.1)) * (3 - (-0.1)) * 0.3 - ]); - }); - - it('1D - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - - const y = tf.losses.meanSquaredError( - label, predictions, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((1 - 0.3) * (1 - 0.3) + (2 - (-0.6)) * (2 - (-0.6)) + - (3 - (-0.1)) * (3 - (-0.1))) / - 3); - }); - - it('1D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.meanSquaredError( - label, predictions, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - (((1 - 0.3) * (1 - 0.3) * 0.1) + ((2 - (-0.6)) * (2 - (-0.6)) * 0.2) + - ((3 - (-0.1)) * (3 - (-0.1)) * 0.3)) / - 0.6); - }); - - it('2D', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const y = tf.losses.meanSquaredError(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((4 - 1) * (4 - 1) + (8 - 9) * (8 - 9) + (12 - 2) * (12 - 2) + - (8 - (-5)) * (8 - (-5)) + (1 - (-2)) * (1 - (-2)) + - (3 - 6) * (3 - 6)) / - 6); - }); - - it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.meanSquaredError(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((4 - 1) * (4 - 1) * 3 + (8 - 9) * (8 - 9) * 0 + - (12 - 2) * (12 - 2) * 5 + (8 - (-5)) * (8 - (-5)) * 0 + - (1 - (-2)) * (1 - (-2)) * 4 + (3 - 6) * (3 - 6) * 2) / - 4); - }); - - it('2D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.meanSquaredError( - label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 3]); - expectArraysClose(await y.data(), [ - (4 - 1) * (4 - 1) * 3, (8 - 9) * (8 - 9) * 6, (12 - 2) * (12 - 2) * 5, - (8 - (-5)) * (8 - (-5)) * 0, (1 - (-2)) * (1 - (-2)) * 4, - (3 - 6) * (3 - 6) * 2 - ]); - }); - - it('2D - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const y = tf.losses.meanSquaredError( - label, predictions, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((4 - 1) * (4 - 1) + (8 - 9) * (8 - 9) + (12 - 2) * (12 - 2) + - (8 - (-5)) * (8 - (-5)) + (1 - (-2)) * (1 - (-2)) + - (3 - 6) * (3 - 6)) / - 6); - }); - - it('2D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.meanSquaredError( - label, predictions, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((4 - 1) * (4 - 1) * 3 + (8 - 9) * (8 - 9) * 6 + - (12 - 2) * (12 - 2) * 5 + (8 - (-5)) * (8 - (-5)) * 0 + - (1 - (-2)) * (1 - (-2)) * 4 + (3 - 6) * (3 - 6) * 2) / - 20); - }); - - it('throws when passed label as a non-tensor', () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = /Argument 'labels' passed to 'meanSquaredError' must be a Tensor/; - expect( - () => tf.losses.meanSquaredError( - {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed label as a non-tensor', () => { - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = new RegExp( - 'Argument \'predictions\' passed to \'meanSquaredError\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.meanSquaredError( - label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const e = - /Argument 'weights' passed to 'meanSquaredError' must be a Tensor/; - expect( - () => tf.losses.meanSquaredError( - label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const predictions = [1, 2, 3]; - const label = [0.3, -0.6, -0.1]; - const weights = [0.1, 0.2, 0.3]; - - const y = tf.losses.meanSquaredError( - label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [ - (1 - 0.3) * (1 - 0.3) * 0.1, (2 - (-0.6)) * (2 - (-0.6)) * 0.2, - (3 - (-0.1)) * (3 - (-0.1)) * 0.3 - ]); - }); -}); - -describeWithFlags('cosineDistance', ALL_ENVS, () => { - it('1D', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - - const y = tf.losses.cosineDistance(label, predictions, 0); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)); - }); - - it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.scalar(0.1); - - const y = tf.losses.cosineDistance(label, predictions, 0, weights); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), (1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1); - }); - - it('1D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.scalar(0.1); - - const y = tf.losses.cosineDistance( - label, predictions, 0, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([1]); - expectArraysClose( - await y.data(), [(1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1]); - }); - - it('1D - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - - const y = tf.losses.cosineDistance( - label, predictions, 0, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), (1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1))); - }); - - it('1D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([1, 2, 3]); - const label = tf.tensor1d([0.3, -0.6, -0.1]); - const weights = tf.scalar(0.1); - - const y = tf.losses.cosineDistance( - label, predictions, 0, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), ((1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1) / 0.1); - }); - - it('2D', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const y = tf.losses.cosineDistance(label, predictions, 1); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((1 - (4 * 1 + 8 * 9 + 12 * 2)) + (1 - (8 * -5 + 1 * -2 + 3 * 6))) / 2); - }); - - it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 0], [2, 1]); - - const y = tf.losses.cosineDistance(label, predictions, 1, weights); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((1 - (4 * 1 + 8 * 9 + 12 * 2)) * 3 + - (1 - (8 * -5 + 1 * -2 + 3 * 6)) * 0) / - 1); - }); - - it('2D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 0], [2, 1]); - - const y = tf.losses.cosineDistance( - label, predictions, 1, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 1]); - expectArraysClose(await y.data(), [ - (1 - (4 * 1 + 8 * 9 + 12 * 2)) * 3, (1 - (8 * -5 + 1 * -2 + 3 * 6)) * 0 - ]); - }); - - it('2D - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const y = tf.losses.cosineDistance( - label, predictions, 1, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((1 - (4 * 1 + 8 * 9 + 12 * 2)) + (1 - (8 * -5 + 1 * -2 + 3 * 6))) / 2); - }); - - it('2D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 0], [2, 1]); - - const y = tf.losses.cosineDistance( - label, predictions, 1, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - ((1 - (4 * 1 + 8 * 9 + 12 * 2)) * 3 + - (1 - (8 * -5 + 1 * -2 + 3 * 6)) * 0) / - 3); - }); - - it('throws when passed label as a non-tensor', () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = /Argument 'labels' passed to 'cosineDistance' must be a Tensor/; - expect( - () => tf.losses.cosineDistance( - {} as tf.Tensor, predictions, 0, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed label as a non-tensor', () => { - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = new RegExp( - 'Argument \'predictions\' passed to \'cosineDistance\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.cosineDistance( - label, {} as tf.Tensor, 0, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); - const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); - - const e = /Argument 'weights' passed to 'cosineDistance' must be a Tensor/; - expect( - () => tf.losses.cosineDistance( - label, predictions, 0, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const predictions = [1, 2, 3]; - const label = [0.3, -0.6, -0.1]; - const weights = 0.1; - - const y = tf.losses.cosineDistance( - label, predictions, 0, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([1]); - expectArraysClose( - await y.data(), [(1 - (1 * 0.3 + 2 * -0.6 + 3 * -0.1)) * 0.1]); - }); -}); - -describeWithFlags('hingeLoss', ALL_ENVS, () => { - it('1D', async () => { - const predictions = tf.tensor1d([0, 0, 1, 1]); - const label = tf.tensor1d([0, 1, 0, 1]); - - const y = tf.losses.hingeLoss(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1.0); - }); - - it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor1d([0, 0, 1, 1]); - const label = tf.tensor1d([0, 1, 0, 1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3, 0.4]); - - const y = tf.losses.hingeLoss(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.225); - }); - - it('1D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor1d([0, 0, 1, 1]); - const label = tf.tensor1d([0, 1, 0, 1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3, 0.4]); - - const y = - tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([4]); - expectArraysClose(await y.data(), [0.1, 0.2, 0.6, 0.0]); - }); - - it('1D - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([0, 0, 1, 1]); - const label = tf.tensor1d([0, 1, 0, 1]); - - const y = - tf.losses.hingeLoss(label, predictions, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1.0); - }); - - it('1D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor1d([0, 0, 1, 1]); - const label = tf.tensor1d([0, 1, 0, 1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3, 0.4]); - - const y = - tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.9); - }); - - it('2D', async () => { - const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); - const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); - - const y = tf.losses.hingeLoss(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.8333333); - }); - - it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); - const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); - const weights = tf.tensor2d([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [2, 3]); - - const y = tf.losses.hingeLoss(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.26666668); - }); - - it('2D - weighted - Reduction.NONE', async () => { - const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); - const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); - const weights = tf.tensor2d([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [2, 3]); - - const y = - tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 3]); - expectArraysClose(await y.data(), [0.1, 0.2, 0.3, 0, 1, 0]); - }); - - it('2D - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); - const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); - - const y = - tf.losses.hingeLoss(label, predictions, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.8333333); - }); - - it('2D - weighted - Reduction.MEAN', async () => { - const predictions = tf.tensor2d([0, 0, 0, 1, 1, 1], [2, 3]); - const label = tf.tensor2d([0, 1, 0, 1, 0, 1], [2, 3]); - const weights = tf.tensor2d([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [2, 3]); - - const y = - tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.76190484); - }); - - it('throws when passed label as a non-tensor', () => { - const predictions = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); - const weights = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); - - const e = /Argument 'labels' passed to 'hingeLoss' must be a Tensor/; - expect( - () => tf.losses.hingeLoss( - {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed label as a non-tensor', () => { - const label = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); - const weights = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); - - const e = new RegExp( - 'Argument \'predictions\' passed to \'hingeLoss\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.hingeLoss( - label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const predictions = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); - const label = tf.tensor2d([1, 0, 1, 0, 1, 0], [2, 3]); - - const e = /Argument 'weights' passed to 'hingeLoss' must be a Tensor/; - expect( - () => tf.losses.hingeLoss( - label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const predictions = [0, 0, 1, 1]; - const label = [0, 1, 0, 1]; - const weights = [0.1, 0.2, 0.3, 0.4]; - - const y = - tf.losses.hingeLoss(label, predictions, weights, tf.Reduction.NONE); - - expect(y.shape).toEqual([4]); - expectArraysClose(await y.data(), [0.1, 0.2, 0.6, 0.0]); - }); -}); - -describeWithFlags('logLoss', ALL_ENVS, () => { - it('1D', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - - const y = tf.losses.logLoss(labels, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 2.668788); - }); - - it('1D - Check for negative values', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, -0.6, -0.1]); - - const y = tf.losses.logLoss(labels, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), NaN); - }); - - it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.logLoss(labels, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.7168596); - }); - - it('1D - weighted - Reduction.NONE', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.logLoss( - labels, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [0.12039725, 0.02107204, 2.0091095]); - }); - - it('1D - Reduction.MEAN', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - - const y = tf.losses.logLoss( - labels, predictions, undefined, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 2.668788); - }); - - it('1D - weighted - Reduction.MEAN', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.logLoss( - labels, predictions, weights, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 3.5842977); - }); - - it('2D', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - - const y = tf.losses.logLoss(labels, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.60019904); - }); - - it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.logLoss(labels, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1.8866577); - }); - - it('2D - weighted - Reduction.NONE', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.logLoss( - labels, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 3]); - expectArraysClose( - await y.data(), [2.9527497, 0., 1.8451363, 0., 1.3829476, 1.3657978]); - }); - - it('2D - Reduction.MEAN', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - - const y = tf.losses.logLoss( - labels, predictions, undefined, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.60019904); - }); - - it('2D - weighted - Reduction.MEAN', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.logLoss( - labels, predictions, weights, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.53904504); - }); - - it('throws when passed label as a non-tensor', () => { - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = /Argument 'labels' passed to 'logLoss' must be a Tensor/; - expect( - () => tf.losses.logLoss( - {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed label as a non-tensor', () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = new RegExp( - 'Argument \'predictions\' passed to \'logLoss\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.logLoss( - labels, {} as tf.Tensor, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - - const e = /Argument 'weights' passed to 'logLoss' must be a Tensor/; - expect( - () => tf.losses.logLoss( - labels, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const labels = [1, 2, 3]; - const predictions = [0.3, 0.6, 0.1]; - const weights = [0.1, 0.2, 0.3]; - - const y = tf.losses.logLoss( - labels, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [0.12039725, 0.02107204, 2.0091095]); - }); -}); - -describeWithFlags('huberLoss', ALL_ENVS, () => { - it('1D', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - - const y = tf.losses.huberLoss(labels, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1.1816667); - }); - - it('1D - delta', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - const delta = 0.4; - - const y = tf.losses.huberLoss(labels, predictions, undefined, delta); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.58666664); - }); - - it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.huberLoss(labels, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.30816665); - }); - - it('1D - weighted - Reduction.NONE', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.huberLoss( - labels, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [0.0245, 0.17999999, 0.72]); - }); - - it('1D - Reduction.MEAN', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - - const y = tf.losses.huberLoss( - labels, predictions, undefined, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1.1816667); - }); - - it('1D - weighted - Reduction.MEAN', async () => { - const labels = tf.tensor1d([1, 2, 3]); - const predictions = tf.tensor1d([0.3, 0.6, 0.1]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.huberLoss( - labels, predictions, weights, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1.5408332); - }); - - it('2D', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - - const y = tf.losses.huberLoss(labels, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.01795); - }); - - it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.huberLoss(labels, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.040875003); - }); - - it('2D - weighted - Reduction.NONE', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.huberLoss( - labels, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([2, 3]); - expectArraysClose(await y.data(), [0.135, 0., 0.001, 0., 0.005, 0.0225]); - }); - - it('2D - Reduction.MEAN', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - - const y = tf.losses.huberLoss( - labels, predictions, undefined, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.01795); - }); - - it('2D - weighted - Reduction.MEAN', async () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); - - const y = tf.losses.huberLoss( - labels, predictions, weights, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0.011678572); - }); - - it('throws when passed label as a non-tensor', () => { - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = /Argument 'labels' passed to 'huberLoss' must be a Tensor/; - expect( - () => tf.losses.huberLoss( - {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed label as a non-tensor', () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); - - const e = new RegExp( - 'Argument \'predictions\' passed to \'huberLoss\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.huberLoss( - labels, {} as tf.Tensor, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const labels = tf.tensor2d([0.4, 0.8, 0.12, 0.8, 0.1, 0.3], [2, 3]); - const predictions = tf.tensor2d([0.1, 0.7, 0.1, 0.5, 0.05, 0.15], [2, 3]); - - const e = /Argument 'weights' passed to 'huberLoss' must be a Tensor/; - expect( - () => tf.losses.huberLoss( - labels, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const labels = [1, 2, 3]; - const predictions = [0.3, 0.6, 0.1]; - const weights = [0.1, 0.2, 0.3]; - - const y = tf.losses.huberLoss( - labels, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [0.0245, 0.17999999, 0.72]); - }); -}); - -describeWithFlags('sigmoidCrossEntropy', ALL_ENVS, () => { - it('All wrong', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const y = tf.losses.sigmoidCrossEntropy(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 6.6667123); - }); - - it('All right', async () => { - const label = tf.tensor2d([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const y = tf.losses.sigmoidCrossEntropy(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0); - }); - - it('Weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const y = tf.losses.sigmoidCrossEntropy(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 1.3333424); - }); - - it('Weighted - Reduction.NONE', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const y = tf.losses.sigmoidCrossEntropy( - label, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([3, 3]); - expectArraysClose(await y.data(), [ - 1.0000046, 9.0797803e-06, 3.0000138e+00, 1.0000046e+00, 2.0000093e+00, - 1.3619671e-05, 4.5398901e-06, 2.0000093e+00, 3.0000138e+00 - ]); - }); - - it('Reduction.MEAN', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const y = tf.losses.sigmoidCrossEntropy( - label, predictions, undefined, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 6.6667123); - }); - - it('Weighted - Reduction.MEAN', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const y = tf.losses.sigmoidCrossEntropy( - label, predictions, weights, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - 6.666712284088135, - ); - }); - - it('Label Smoothing - Weighted - Reduction.MEAN', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - const labelSmoothing = 0.3; - - const y = tf.losses.sigmoidCrossEntropy( - label, predictions, weights, labelSmoothing, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 6.1667128); - }); - - it('throws when multiClassLabels and logits are of different shapes', () => { - const multiClassLabels = - tf.tensor2d([10, 10, 10, 10, 10, 10, 10, 10, 10], [3, 3]); - const logits = tf.tensor2d([10, 10, 10, 10, 10, 10], [2, 3]); - - const e = new RegExp( - 'Error in sigmoidCrossEntropy: Shapes 3,3 and 2,3 must match'); - expect(() => tf.losses.sigmoidCrossEntropy(multiClassLabels, logits)) - .toThrowError(e); - }); - - it('throws when passed multiClassLabels as a non-tensor', () => { - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const e = new RegExp( - 'Argument \'multiClassLabels\' passed to \'sigmoidCrossEntropy\' ' + - 'must be a Tensor'); - - expect( - () => tf.losses.sigmoidCrossEntropy( - {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed logits as a non-tensor', () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const e = new RegExp( - 'Argument \'logits\' passed to \'sigmoidCrossEntropy\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.sigmoidCrossEntropy( - label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const e = - /Argument 'weights' passed to 'sigmoidCrossEntropy' must be a Tensor/; - expect( - () => tf.losses.sigmoidCrossEntropy( - label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); -}); - -describeWithFlags('softmaxCrossEntropy', ALL_ENVS, () => { - it('All wrong', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const y = tf.losses.softmaxCrossEntropy(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 20); - }); - - it('All right', async () => { - const label = tf.tensor2d([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const y = tf.losses.softmaxCrossEntropy(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 0); - }); - - it('Weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const weights = - tf.tensor2d([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]); - - const y = tf.losses.softmaxCrossEntropy(label, predictions, weights); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 4); - }); - - it('Weighted - Reduction.NONE', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.softmaxCrossEntropy( - label, predictions, weights, undefined, tf.Reduction.NONE); - - expect(y.shape).toEqual([3]); - expectArraysClose(await y.data(), [2, 4, 6]); - }); - - it('Reduction.MEAN', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const y = tf.losses.softmaxCrossEntropy( - label, predictions, undefined, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 20); - }); - - it('Weighted - Reduction.MEAN', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor1d([0.1, 0.2, 0.3]); - - const y = tf.losses.softmaxCrossEntropy( - label, predictions, weights, undefined, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose( - await y.data(), - 20, - ); - }); - - it('Label Smoothing - Weighted - Reduction.MEAN', async () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - const labelSmoothing = 0.3; - - const y = tf.losses.softmaxCrossEntropy( - label, predictions, weights, labelSmoothing, tf.Reduction.MEAN); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 18); - }); - - it('throws when multiClassLabels and logits are of different shapes', () => { - const multiClassLabels = - tf.tensor2d([10, 10, 10, 10, 10, 10, 10, 10, 10], [3, 3]); - const logits = tf.tensor2d([10, 10, 10, 10, 10, 10], [2, 3]); - - const e = new RegExp( - 'Error in softmaxCrossEntropy: Shapes 3,3 and 2,3 must match'); - expect(() => tf.losses.softmaxCrossEntropy(multiClassLabels, logits)) - .toThrowError(e); - }); - - it('throws when passed multiClassLabels as a non-tensor', () => { - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const e = new RegExp( - 'Argument \'onehotLabels\' passed to \'softmaxCrossEntropy\' ' + - 'must be a Tensor'); - - expect( - () => tf.losses.softmaxCrossEntropy( - {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed logits as a non-tensor', () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); - - const e = new RegExp( - 'Argument \'logits\' passed to \'softmaxCrossEntropy\' ' + - 'must be a Tensor'); - expect( - () => tf.losses.softmaxCrossEntropy( - label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('throws when passed weights as a non-tensor', () => { - const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); - const predictions = tf.tensor2d( - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], - [3, 3]); - - const e = - /Argument 'weights' passed to 'softmaxCrossEntropy' must be a Tensor/; - expect( - () => tf.losses.softmaxCrossEntropy( - label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) - .toThrowError(e); - }); - - it('accepts a tensor-like object', async () => { - const label = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]; - const predictions = - [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]]; - - const y = tf.losses.softmaxCrossEntropy(label, predictions); - - expect(y.shape).toEqual([]); - expectArraysClose(await y.data(), 20); - }); -}); diff --git a/tfjs-core/src/ops/loss_ops_utils.ts b/tfjs-core/src/ops/loss_ops_utils.ts new file mode 100644 index 00000000000..d24bd18b46e --- /dev/null +++ b/tfjs-core/src/ops/loss_ops_utils.ts @@ -0,0 +1,23 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +export enum Reduction { + NONE, + MEAN, + SUM, + SUM_BY_NONZERO_WEIGHTS +} diff --git a/tfjs-core/src/ops/mean_squared_error.ts b/tfjs-core/src/ops/mean_squared_error.ts new file mode 100644 index 00000000000..82780129819 --- /dev/null +++ b/tfjs-core/src/ops/mean_squared_error.ts @@ -0,0 +1,57 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {Reduction} from './loss_ops_utils'; +import {op} from './operation'; +import {squaredDifference} from './squared_difference'; +/** + * Computes the mean squared error between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + */ +/** @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} */ +function meanSquaredError_( + labels: T|TensorLike, predictions: T|TensorLike, + weights?: Tensor|TensorLike, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + const $labels = convertToTensor(labels, 'labels', 'meanSquaredError'); + const $predictions = + convertToTensor(predictions, 'predictions', 'meanSquaredError'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'meanSquaredError'); + } + assertShapesMatch( + $labels.shape, $predictions.shape, 'Error in meanSquaredError: '); + + const losses = squaredDifference($labels, $predictions); + return computeWeightedLoss(losses, $weights, reduction); +} +export const meanSquaredError = op({meanSquaredError_}); diff --git a/tfjs-core/src/ops/mean_squared_error_test.ts b/tfjs-core/src/ops/mean_squared_error_test.ts new file mode 100644 index 00000000000..1a3390cc6e0 --- /dev/null +++ b/tfjs-core/src/ops/mean_squared_error_test.ts @@ -0,0 +1,227 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('meanSquaredError', ALL_ENVS, () => { + it('1D', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + + const y = tf.losses.meanSquaredError(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((1 - 0.3) * (1 - 0.3) + (2 - (-0.6)) * (2 - (-0.6)) + + (3 - (-0.1)) * (3 - (-0.1))) / + 3); + }); + + it('1D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.meanSquaredError(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((1 - 0.3) * (1 - 0.3) * 0.1 + (2 - (-0.6)) * (2 - (-0.6)) * 0.2 + + (3 - (-0.1)) * (3 - (-0.1)) * 0.3) / + 3); + }); + + it('1D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.meanSquaredError( + label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [ + (1 - 0.3) * (1 - 0.3) * 0.1, (2 - (-0.6)) * (2 - (-0.6)) * 0.2, + (3 - (-0.1)) * (3 - (-0.1)) * 0.3 + ]); + }); + + it('1D - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + + const y = tf.losses.meanSquaredError( + label, predictions, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((1 - 0.3) * (1 - 0.3) + (2 - (-0.6)) * (2 - (-0.6)) + + (3 - (-0.1)) * (3 - (-0.1))) / + 3); + }); + + it('1D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor1d([1, 2, 3]); + const label = tf.tensor1d([0.3, -0.6, -0.1]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.meanSquaredError( + label, predictions, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + (((1 - 0.3) * (1 - 0.3) * 0.1) + ((2 - (-0.6)) * (2 - (-0.6)) * 0.2) + + ((3 - (-0.1)) * (3 - (-0.1)) * 0.3)) / + 0.6); + }); + + it('2D', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const y = tf.losses.meanSquaredError(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((4 - 1) * (4 - 1) + (8 - 9) * (8 - 9) + (12 - 2) * (12 - 2) + + (8 - (-5)) * (8 - (-5)) + (1 - (-2)) * (1 - (-2)) + + (3 - 6) * (3 - 6)) / + 6); + }); + + it('2D - weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 0, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.meanSquaredError(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((4 - 1) * (4 - 1) * 3 + (8 - 9) * (8 - 9) * 0 + + (12 - 2) * (12 - 2) * 5 + (8 - (-5)) * (8 - (-5)) * 0 + + (1 - (-2)) * (1 - (-2)) * 4 + (3 - 6) * (3 - 6) * 2) / + 4); + }); + + it('2D - weighted - Reduction.NONE', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.meanSquaredError( + label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([2, 3]); + expectArraysClose(await y.data(), [ + (4 - 1) * (4 - 1) * 3, (8 - 9) * (8 - 9) * 6, (12 - 2) * (12 - 2) * 5, + (8 - (-5)) * (8 - (-5)) * 0, (1 - (-2)) * (1 - (-2)) * 4, + (3 - 6) * (3 - 6) * 2 + ]); + }); + + it('2D - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const y = tf.losses.meanSquaredError( + label, predictions, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((4 - 1) * (4 - 1) + (8 - 9) * (8 - 9) + (12 - 2) * (12 - 2) + + (8 - (-5)) * (8 - (-5)) + (1 - (-2)) * (1 - (-2)) + + (3 - 6) * (3 - 6)) / + 6); + }); + + it('2D - weighted - Reduction.MEAN', async () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const y = tf.losses.meanSquaredError( + label, predictions, weights, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + ((4 - 1) * (4 - 1) * 3 + (8 - 9) * (8 - 9) * 6 + + (12 - 2) * (12 - 2) * 5 + (8 - (-5)) * (8 - (-5)) * 0 + + (1 - (-2)) * (1 - (-2)) * 4 + (3 - 6) * (3 - 6) * 2) / + 20); + }); + + it('throws when passed label as a non-tensor', () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = /Argument 'labels' passed to 'meanSquaredError' must be a Tensor/; + expect( + () => tf.losses.meanSquaredError( + {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed label as a non-tensor', () => { + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + const weights = tf.tensor2d([3, 6, 5, 0, 4, 2], [2, 3]); + + const e = new RegExp( + 'Argument \'predictions\' passed to \'meanSquaredError\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.meanSquaredError( + label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const predictions = tf.tensor2d([4, 8, 12, 8, 1, 3], [2, 3]); + const label = tf.tensor2d([1, 9, 2, -5, -2, 6], [2, 3]); + + const e = + /Argument 'weights' passed to 'meanSquaredError' must be a Tensor/; + expect( + () => tf.losses.meanSquaredError( + label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const predictions = [1, 2, 3]; + const label = [0.3, -0.6, -0.1]; + const weights = [0.1, 0.2, 0.3]; + + const y = tf.losses.meanSquaredError( + label, predictions, weights, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [ + (1 - 0.3) * (1 - 0.3) * 0.1, (2 - (-0.6)) * (2 - (-0.6)) * 0.2, + (3 - (-0.1)) * (3 - (-0.1)) * 0.3 + ]); + }); +}); diff --git a/tfjs-core/src/ops/ops.ts b/tfjs-core/src/ops/ops.ts index 81c1166dc94..f4a236ade57 100644 --- a/tfjs-core/src/ops/ops.ts +++ b/tfjs-core/src/ops/ops.ts @@ -131,7 +131,6 @@ export * from './in_top_k'; export {op} from './operation'; -import * as losses from './loss_ops'; import * as spectral from './spectral_ops'; import * as fused from './fused_ops'; import * as signal from './signal_ops'; @@ -164,5 +163,27 @@ const linalg = { qr }; +// losses namespace; +import {absoluteDifference} from './absolute_difference'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {cosineDistance} from './cosine_distance'; +import {hingeLoss} from './hinge_loss'; +import {huberLoss} from './huber_loss'; +import {logLoss} from './log_loss'; +import {meanSquaredError} from './mean_squared_error'; +import {sigmoidCrossEntropy} from './sigmoid_cross_entropy'; +import {softmaxCrossEntropy} from './softmax_cross_entropy'; +const losses = { + absoluteDifference, + computeWeightedLoss, + cosineDistance, + hingeLoss, + huberLoss, + logLoss, + meanSquaredError, + sigmoidCrossEntropy, + softmaxCrossEntropy +}; + // Second level exports. export {image, linalg, losses, spectral, fused, signal}; diff --git a/tfjs-core/src/ops/sigmoid_cross_entropy.ts b/tfjs-core/src/ops/sigmoid_cross_entropy.ts new file mode 100644 index 00000000000..c2b57368bf2 --- /dev/null +++ b/tfjs-core/src/ops/sigmoid_cross_entropy.ts @@ -0,0 +1,117 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import {Tensor} from '../tensor'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; + +import {add} from './add'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {Reduction} from './loss_ops_utils'; +import {mul} from './mul'; +import {op} from './operation'; +import {relu} from './relu'; +import {sub} from './sub'; +import {scalar} from './tensor_ops'; +import {abs, exp, log1p, neg} from './unary_ops'; + +function sigmoidCrossEntropyWithLogits_( + labels: T|TensorLike, logits: T|TensorLike): O { + const $labels = + convertToTensor(labels, 'labels', 'sigmoidCrossEntropyWithLogits'); + const $logits = + convertToTensor(logits, 'logits', 'sigmoidCrossEntropyWithLogits'); + assertShapesMatch( + $labels.shape, $logits.shape, 'Error in sigmoidCrossEntropyWithLogits: '); + + /** + * Implementation Details: + * + * For brevity, let `x = logits`, `z = labels`. The logistic loss is + * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) + * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) + * = (1 - z) * x + log(1 + exp(-x)) + * = x - x * z + log(1 + exp(-x)) + * + * For x < 0, to avoid overflow in exp(-x), we reformulate the above + * x - x * z + log(1 + exp(-x)) + * = log(exp(x)) - x * z + log(1 + exp(-x)) + * = - x * z + log(1 + exp(x)) + * + * Hence, to ensure stability and avoid overflow, the implementation uses + * this equivalent formulation: + * max(x, 0) - x * z + log(1 + exp(-abs(x))) + */ + const maxOutput = relu($logits); + const outputXTarget = mul($logits, $labels); + const sigmoidOutput = log1p(exp(neg(abs($logits)))); + + return add(sub(maxOutput, outputXTarget), sigmoidOutput); +} + +/** + * Computes the sigmoid cross entropy loss between two tensors. + * + * If labelSmoothing is nonzero, smooth the labels towards 1/2: + * + * newMulticlassLabels = multiclassLabels * (1 - labelSmoothing) + * + 0.5 * labelSmoothing + * + * @param multiClassLabels The ground truth output tensor of shape + * [batch_size, num_classes], same dimensions as 'predictions'. + * @param logits The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param labelSmoothing If greater than 0, then smooth the labels. + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + */ +/** @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' } */ +function sigmoidCrossEntropy_( + multiClassLabels: T|TensorLike, logits: T|TensorLike, + weights?: Tensor|TensorLike, labelSmoothing = 0, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + let $multiClassLabels = convertToTensor( + multiClassLabels, 'multiClassLabels', 'sigmoidCrossEntropy'); + const $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropy'); + let $weights: Tensor = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'sigmoidCrossEntropy'); + } + assertShapesMatch( + $multiClassLabels.shape, $logits.shape, 'Error in sigmoidCrossEntropy: '); + + if (labelSmoothing > 0) { + const labelSmoothingScalar = scalar(labelSmoothing); + const one = scalar(1); + const half = scalar(0.5); + + $multiClassLabels = + add(mul($multiClassLabels, sub(one, labelSmoothingScalar)), + mul(half, labelSmoothingScalar)); + } + const losses = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits); + + return computeWeightedLoss(losses, $weights, reduction); +} + +export const sigmoidCrossEntropy = op({sigmoidCrossEntropy_}); diff --git a/tfjs-core/src/ops/sigmoid_cross_entropy_test.ts b/tfjs-core/src/ops/sigmoid_cross_entropy_test.ts new file mode 100644 index 00000000000..743569cc106 --- /dev/null +++ b/tfjs-core/src/ops/sigmoid_cross_entropy_test.ts @@ -0,0 +1,175 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('sigmoidCrossEntropy', ALL_ENVS, () => { + it('All wrong', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const y = tf.losses.sigmoidCrossEntropy(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 6.6667123); + }); + + it('All right', async () => { + const label = tf.tensor2d([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const y = tf.losses.sigmoidCrossEntropy(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0); + }); + + it('Weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const y = tf.losses.sigmoidCrossEntropy(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 1.3333424); + }); + + it('Weighted - Reduction.NONE', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const y = tf.losses.sigmoidCrossEntropy( + label, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([3, 3]); + expectArraysClose(await y.data(), [ + 1.0000046, 9.0797803e-06, 3.0000138e+00, 1.0000046e+00, 2.0000093e+00, + 1.3619671e-05, 4.5398901e-06, 2.0000093e+00, 3.0000138e+00 + ]); + }); + + it('Reduction.MEAN', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const y = tf.losses.sigmoidCrossEntropy( + label, predictions, undefined, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 6.6667123); + }); + + it('Weighted - Reduction.MEAN', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const y = tf.losses.sigmoidCrossEntropy( + label, predictions, weights, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + 6.666712284088135, + ); + }); + + it('Label Smoothing - Weighted - Reduction.MEAN', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + const labelSmoothing = 0.3; + + const y = tf.losses.sigmoidCrossEntropy( + label, predictions, weights, labelSmoothing, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 6.1667128); + }); + + it('throws when multiClassLabels and logits are of different shapes', () => { + const multiClassLabels = + tf.tensor2d([10, 10, 10, 10, 10, 10, 10, 10, 10], [3, 3]); + const logits = tf.tensor2d([10, 10, 10, 10, 10, 10], [2, 3]); + + const e = new RegExp( + 'Error in sigmoidCrossEntropy: Shapes 3,3 and 2,3 must match'); + expect(() => tf.losses.sigmoidCrossEntropy(multiClassLabels, logits)) + .toThrowError(e); + }); + + it('throws when passed multiClassLabels as a non-tensor', () => { + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const e = new RegExp( + 'Argument \'multiClassLabels\' passed to \'sigmoidCrossEntropy\' ' + + 'must be a Tensor'); + + expect( + () => tf.losses.sigmoidCrossEntropy( + {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed logits as a non-tensor', () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const e = new RegExp( + 'Argument \'logits\' passed to \'sigmoidCrossEntropy\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.sigmoidCrossEntropy( + label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const e = + /Argument 'weights' passed to 'sigmoidCrossEntropy' must be a Tensor/; + expect( + () => tf.losses.sigmoidCrossEntropy( + label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); +}); diff --git a/tfjs-core/src/ops/softmax_cross_entropy.ts b/tfjs-core/src/ops/softmax_cross_entropy.ts new file mode 100644 index 00000000000..b7cd8e11605 --- /dev/null +++ b/tfjs-core/src/ops/softmax_cross_entropy.ts @@ -0,0 +1,152 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +import {customGrad} from '../gradients'; +import {Tensor} from '../tensor'; +import {GradSaveFunc} from '../tensor_types'; +import {convertToTensor} from '../tensor_util_env'; +import {TensorLike} from '../types'; +import {assertShapesMatch} from '../util'; + +import {add} from './add'; +import {cast, reshape} from './array_ops'; +import {expandShapeToKeepDim} from './axis_util'; +import {computeWeightedLoss} from './compute_weighted_loss'; +import {div} from './div'; +import {Reduction} from './loss_ops_utils'; +import {mul} from './mul'; +import {op} from './operation'; +import {logSumExp, sum} from './reduction_ops'; +import {sub} from './sub'; +import {scalar} from './tensor_ops'; +import {exp, neg} from './unary_ops'; + +/** + * Computes softmax cross entropy between logits and labels. + * + * Measures the probability error in discrete classification tasks in which + * the classes are mutually exclusive (each entry is in exactly one class). + * For example, each CIFAR-10 image is labeled with one and only one label: an + * image can be a dog or a truck, but not both. + * + * `NOTE`: While the classes are mutually exclusive, their probabilities need + * not be. All that is required is that each row of labels is a valid + * probability distribution. If they are not, the computation of the gradient + * will be incorrect. + * + * `WARNING`: This op expects unscaled logits, since it performs a softmax on + * logits internally for efficiency. Do not call this op with the output of + * softmax, as it will produce incorrect results. + * + * logits and labels must have the same shape, e.g. [batch_size, num_classes] + * and the same dtype. + * @param labels The labels array. + * @param logits The logits array. + * @param dim The dimension softmax would be performed on. Defaults to `-1` + * which indicates the last dimension. + */ +function softmaxCrossEntropyWithLogits_( + labels: T, logits: T, dim = -1): O { + if (dim === -1) { + dim = logits.rank - 1; + } + + if (dim !== logits.rank - 1) { + throw Error( + `Softmax cross entropy along a non-last dimension is not yet ` + + `supported. Labels / logits was rank ${logits.rank} ` + + `and dim was ${dim}`); + } + // Use a custom gradient for numerical stability. + const customOp = + customGrad((labels: Tensor, logits: Tensor, save: GradSaveFunc) => { + // Reference: + // 1. http://cs231n.github.io/linear-classify/#softmax + // 2. https://blog.feedly.com/tricks-of-the-trade-logsumexp/ + const keepDims = true; + const lse = logSumExp(logits, [dim], keepDims); + const logResult = sub(cast(logits, 'float32'), lse); + save([labels, logResult]); + + const costVector = neg(mul(logResult, labels)); + const value: O = sum(costVector, [dim]); + + const gradFunc = (dy: O, saved: Tensor[]) => { + const [labels, logResult] = saved; + const dyShape = expandShapeToKeepDim(dy.shape, [dim]); + return [ + mul(reshape(dy, dyShape), + sub(cast(labels, 'float32'), exp(logResult))), + mul(reshape(dy, dyShape), + sub(exp(logResult), cast(labels, 'float32'))), + ]; + }; + return {value, gradFunc}; + }); + + return customOp(labels, logits); +} + +/** + * Computes the softmax cross entropy loss between two tensors. + * + * If labelSmoothing is nonzero, smooth the labels towards 1/2: + * + * newOnehotLabels = onehotLabels * (1 - labelSmoothing) + * + labelSmoothing / numClasses + * + * @param onehotLabels One hot encoded labels + * [batch_size, num_classes], same dimensions as 'predictions'. + * @param logits The predicted outputs. + * @param weights Tensor whose rank is either 0, or 1, and must be + * broadcastable to `loss` of shape [batch_size] + * @param labelSmoothing If greater than 0, then smooth the labels. + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + */ +/** @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' } */ +function softmaxCrossEntropy_( + onehotLabels: T|TensorLike, logits: T|TensorLike, + weights?: Tensor|TensorLike, labelSmoothing = 0, + reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O { + let $onehotLabels = + convertToTensor(onehotLabels, 'onehotLabels', 'softmaxCrossEntropy'); + const $logits = convertToTensor(logits, 'logits', 'softmaxCrossEntropy'); + let $weights: Tensor = null; + + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'softmaxCrossEntropy'); + } + + assertShapesMatch( + $onehotLabels.shape, $logits.shape, 'Error in softmaxCrossEntropy: '); + + if (labelSmoothing > 0) { + const labelSmoothingScalar = scalar(labelSmoothing); + const one = scalar(1); + const numClasses = scalar($onehotLabels.shape[1]); + + $onehotLabels = + add(mul($onehotLabels, sub(one, labelSmoothingScalar)), + div(labelSmoothingScalar, numClasses)); + } + + const losses = softmaxCrossEntropyWithLogits_($onehotLabels, $logits); + + return computeWeightedLoss(losses, $weights, reduction); +} + +export const softmaxCrossEntropy = op({softmaxCrossEntropy_}); diff --git a/tfjs-core/src/ops/softmax_cross_entropy_test.ts b/tfjs-core/src/ops/softmax_cross_entropy_test.ts new file mode 100644 index 00000000000..b4b01e9d00b --- /dev/null +++ b/tfjs-core/src/ops/softmax_cross_entropy_test.ts @@ -0,0 +1,185 @@ +/** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + +import * as tf from '../index'; +import {ALL_ENVS, describeWithFlags} from '../jasmine_util'; +import {expectArraysClose} from '../test_util'; + +describeWithFlags('softmaxCrossEntropy', ALL_ENVS, () => { + it('All wrong', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const y = tf.losses.softmaxCrossEntropy(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 20); + }); + + it('All right', async () => { + const label = tf.tensor2d([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const y = tf.losses.softmaxCrossEntropy(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 0); + }); + + it('Weighted - Reduction.SUM_BY_NONZERO_WEIGHTS', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const weights = + tf.tensor2d([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]); + + const y = tf.losses.softmaxCrossEntropy(label, predictions, weights); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 4); + }); + + it('Weighted - Reduction.NONE', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.softmaxCrossEntropy( + label, predictions, weights, undefined, tf.Reduction.NONE); + + expect(y.shape).toEqual([3]); + expectArraysClose(await y.data(), [2, 4, 6]); + }); + + it('Reduction.MEAN', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const y = tf.losses.softmaxCrossEntropy( + label, predictions, undefined, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 20); + }); + + it('Weighted - Reduction.MEAN', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor1d([0.1, 0.2, 0.3]); + + const y = tf.losses.softmaxCrossEntropy( + label, predictions, weights, undefined, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose( + await y.data(), + 20, + ); + }); + + it('Label Smoothing - Weighted - Reduction.MEAN', async () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + const labelSmoothing = 0.3; + + const y = tf.losses.softmaxCrossEntropy( + label, predictions, weights, labelSmoothing, tf.Reduction.MEAN); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 18); + }); + + it('throws when multiClassLabels and logits are of different shapes', () => { + const multiClassLabels = + tf.tensor2d([10, 10, 10, 10, 10, 10, 10, 10, 10], [3, 3]); + const logits = tf.tensor2d([10, 10, 10, 10, 10, 10], [2, 3]); + + const e = new RegExp( + 'Error in softmaxCrossEntropy: Shapes 3,3 and 2,3 must match'); + expect(() => tf.losses.softmaxCrossEntropy(multiClassLabels, logits)) + .toThrowError(e); + }); + + it('throws when passed multiClassLabels as a non-tensor', () => { + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const e = new RegExp( + 'Argument \'onehotLabels\' passed to \'softmaxCrossEntropy\' ' + + 'must be a Tensor'); + + expect( + () => tf.losses.softmaxCrossEntropy( + {} as tf.Tensor, predictions, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed logits as a non-tensor', () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const weights = tf.tensor2d([[0.1, 0.2, 0.3]]); + + const e = new RegExp( + 'Argument \'logits\' passed to \'softmaxCrossEntropy\' ' + + 'must be a Tensor'); + expect( + () => tf.losses.softmaxCrossEntropy( + label, {} as tf.Tensor, weights, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('throws when passed weights as a non-tensor', () => { + const label = tf.tensor2d([[0, 0, 1], [1, 0, 0], [0, 1, 0]], [3, 3]); + const predictions = tf.tensor2d( + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]], + [3, 3]); + + const e = + /Argument 'weights' passed to 'softmaxCrossEntropy' must be a Tensor/; + expect( + () => tf.losses.softmaxCrossEntropy( + label, predictions, {} as tf.Tensor, tf.Reduction.MEAN)) + .toThrowError(e); + }); + + it('accepts a tensor-like object', async () => { + const label = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]; + const predictions = + [[10.0, -10.0, -10.0], [-10.0, 10.0, -10.0], [-10.0, -10.0, 10.0]]; + + const y = tf.losses.softmaxCrossEntropy(label, predictions); + + expect(y.shape).toEqual([]); + expectArraysClose(await y.data(), 20); + }); +}); diff --git a/tfjs-core/src/tests.ts b/tfjs-core/src/tests.ts index 43a1524980f..3cc1ebe433a 100644 --- a/tfjs-core/src/tests.ts +++ b/tfjs-core/src/tests.ts @@ -38,6 +38,7 @@ import './io/router_registry_test'; import './io/weights_loader_test'; import './jasmine_util_test'; import './kernel_registry_test'; +import './ops/absolute_difference_test'; import './ops/add_n_test'; import './ops/add_test'; import './ops/arithmetic_test'; @@ -56,6 +57,7 @@ import './ops/broadcast_util_test'; import './ops/clone_test'; import './ops/compare_ops_test'; import './ops/complex_ops_test'; +import './ops/compute_weighted_loss_test'; import './ops/concat_test'; import './ops/concat_util_test'; import './ops/confusion_matrix_test'; @@ -67,6 +69,7 @@ import './ops/conv2d_transpose_test'; import './ops/conv3d_test'; import './ops/conv3d_transpose_test'; import './ops/conv_util_test'; +import './ops/cosine_distance_test'; import './ops/crop_and_resize_test'; import './ops/cumsum_test'; import './ops/depth_to_space_test'; @@ -82,20 +85,23 @@ import './ops/gather_nd_test'; import './ops/gram_schmidt_test'; import './ops/greater_equal_test'; import './ops/greater_test'; +import './ops/hinge_loss_test'; +import './ops/huber_loss_test'; import './ops/in_top_k_test'; import './ops/leaky_relu_test'; import './ops/less_equal_test'; import './ops/less_test'; import './ops/local_response_normalization_test'; +import './ops/log_loss_test'; import './ops/logical_and_test'; import './ops/logical_not_test'; import './ops/logical_or_test'; import './ops/logical_xor_test'; -import './ops/loss_ops_test'; import './ops/mat_mul_test'; import './ops/max_pool_3d_test'; import './ops/max_pool_test'; import './ops/max_pool_with_argmax_test'; +import './ops/mean_squared_error_test'; import './ops/moving_average_test'; import './ops/multi_rnn_cell_test'; import './ops/multinomial_test'; @@ -120,9 +126,11 @@ import './ops/reverse_test'; import './ops/scatter_nd_test'; import './ops/segment_ops_test'; import './ops/selu_test'; +import './ops/sigmoid_cross_entropy_test'; import './ops/signal_ops_test'; import './ops/slice_test'; import './ops/slice_util_test'; +import './ops/softmax_cross_entropy_test'; import './ops/softmax_test'; import './ops/space_to_batch_nd_test'; import './ops/sparse_to_dense_test';