Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions tfjs-core/src/gradients/Elu_grad.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {ENGINE, ForwardFunc} from '../engine';
import {Elu, EluGrad, EluGradInputs} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {Tensor} from '../tensor';
import {NamedTensorMap} from '../tensor_types';

export const eluGradConfig: GradConfig = {
kernelName: Elu,
outputsToSave: [true],
gradFunc: (dy: Tensor, saved: Tensor[]) => {
const [y] = saved;

const backPropKernelFunc: ForwardFunc<Tensor> = (backend) => {
return backend.eluDer(dy, y);
};

const inputs: EluGradInputs = {dy, y};

return {
x: () => ENGINE.runKernelFunc(
backPropKernelFunc, inputs as {} as NamedTensorMap, null /* grad */,
EluGrad)
};
}
};
47 changes: 47 additions & 0 deletions tfjs-core/src/gradients/Prelu_grad.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {Prelu} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {reshape} from '../ops/array_ops';
import {getReductionAxes} from '../ops/broadcast_util';
import {greater} from '../ops/greater';
import {where} from '../ops/logical_ops';
import {mul} from '../ops/mul';
import {sum} from '../ops/reduction_ops';
import {zerosLike} from '../ops/tensor_ops';
import {Tensor} from '../tensor';

export const preluGradConfig: GradConfig = {
kernelName: Prelu,
inputsToSave: ['x', 'alpha'],
gradFunc: (dy: Tensor, saved: Tensor[]) => {
const [x, alpha] = saved;
const mask = greater(x, 0);

return {
x: () => where(mask, dy, mul(dy, alpha)),
alpha: () => {
let res = where(mask, zerosLike(dy), mul(dy, x));
const reduceAxes = getReductionAxes(alpha.shape, dy.shape);
if (reduceAxes.length > 0) {
res = sum(res, reduceAxes);
}
return reshape(res, alpha.shape);
}
};
}
};
34 changes: 34 additions & 0 deletions tfjs-core/src/gradients/Relu6_grad.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {Relu6} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {cast} from '../ops/array_ops';
import {lessEqual} from '../ops/less_equal';
import {mul} from '../ops/mul';
import {step} from '../ops/unary_ops';
import {Tensor} from '../tensor';

export const relu6GradConfig: GradConfig = {
kernelName: Relu6,
inputsToSave: ['x'],
gradFunc: (dy: Tensor, saved: Tensor[]) => {
const [x] = saved;
const mask = mul(lessEqual(x, 6), step(x));

return {x: () => mul(dy, cast(mask, 'float32'))};
}
};
4 changes: 3 additions & 1 deletion tfjs-core/src/gradients/Relu_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,16 @@
*/
import {Relu} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {cast} from '../ops/array_ops';
import {mul} from '../ops/mul';
import {step} from '../ops/unary_ops';
import {Tensor} from '../tensor';

export const reluGradConfig: GradConfig = {
kernelName: Relu,
inputsToSave: ['x'],
gradFunc: (dy: Tensor, saved: Tensor[]) => {
const [x] = saved;
return {x: () => mul(dy, x.step().toFloat())};
return {x: () => mul(dy, cast(step(x), 'float32'))};
}
};
48 changes: 48 additions & 0 deletions tfjs-core/src/gradients/Selu_grad.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {Selu} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {cast} from '../ops/array_ops';
import {greater} from '../ops/greater';
import {where} from '../ops/logical_ops';
import {mul} from '../ops/mul';
import {SELU_SCALE, SELU_SCALEALPHA} from '../ops/selu_util';
import {scalar} from '../ops/tensor_ops';
import {exp} from '../ops/unary_ops';
import {Tensor} from '../tensor';

export const seluGradConfig: GradConfig = {
kernelName: Selu,
inputsToSave: ['x'],
gradFunc: (dy: Tensor, saved: Tensor[]) => {
const [x] = saved;
return {
x: () => {
const mask = greater(x, scalar(0));

const scaleAlpha = scalar(SELU_SCALEALPHA);
const scale = scalar(SELU_SCALE);

const greaterThanZeroDer = mul(dy, scale);
const lessEqualZeroDer =
mul(mul(dy, scaleAlpha), exp(cast(x, 'float32')));

return where(mask, greaterThanZeroDer, lessEqualZeroDer);
}
};
}
};
15 changes: 15 additions & 0 deletions tfjs-core/src/kernel_names.ts
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,12 @@ export type DiagInputs = Pick<NamedTensorInfoMap, 'x'>;
export const Div = 'Div';
export type DivInputs = BinaryInputs;

export const Elu = 'Elu';
export type EluInputs = Pick<NamedTensorInfoMap, 'x'>;

export const EluGrad = 'EluGrad';
export type EluGradInputs = Pick<NamedTensorInfoMap, 'dy'|'y'>;

export const Equal = 'Equal';
export type EqualInputs = BinaryInputs;

Expand Down Expand Up @@ -364,15 +370,24 @@ export type PoolInputs = Pick<NamedTensorInfoMap, 'input'>;
export const Pow = 'Pow';
export type PowInputs = BinaryInputs;

export const Prelu = 'Prelu';
export type PreluInputs = Pick<NamedTensorInfoMap, 'x'|'alpha'>;

export const Real = 'Real';
export type RealInputs = Pick<NamedTensorInfoMap, 'input'>;

export const Relu = 'Relu';
export type ReluInputs = Pick<NamedTensorInfoMap, 'x'>;

export const Relu6 = 'Relu6';
export type Relu6Inputs = Pick<NamedTensorInfoMap, 'x'>;

export const SelectV2 = 'SelectV2';
export type SelectV2Inputs = Pick<NamedTensorInfoMap, 'condition'|'t'|'e'>;

export const Selu = 'Selu';
export type SeluInputs = Pick<NamedTensorInfoMap, 'x'>;

export const SpaceToBatchND = 'SpaceToBatchND';
export type SpaceToBatchNDInputs = Pick<NamedTensorInfoMap, 'x'>;
export interface SpaceToBatchNDAttrs {
Expand Down
54 changes: 54 additions & 0 deletions tfjs-core/src/ops/elu.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {ENGINE, ForwardFunc} from '../engine';
import {Elu, EluInputs} from '../kernel_names';
import {Tensor} from '../tensor';
import {NamedTensorMap} from '../tensor_types';
import {convertToTensor} from '../tensor_util_env';
import {TensorLike} from '../types';

import {op} from './operation';

/**
* Computes exponential linear element-wise: `x > 0 ? e ^ x - 1 : 0`.
*
* ```js
* const x = tf.tensor1d([-1, 1, -3, 2]);
*
* x.elu().print(); // or tf.elu(x)
* ```
* @param x The input tensor.
*/
/** @doc {heading: 'Operations', subheading: 'Basic math'} */
function elu_<T extends Tensor>(x: T|TensorLike): T {
const $x = convertToTensor(x, 'x', 'elu');

const forward: ForwardFunc<Tensor> = (backend, save) => {
const y = backend.elu($x);
save([y]);
return y;
};

const inputs: EluInputs = {x: $x};

return ENGINE.runKernelFunc(
forward, inputs as {} as NamedTensorMap, null /* grad */, Elu) as
T;
}

export const elu = op({elu_});
73 changes: 73 additions & 0 deletions tfjs-core/src/ops/elu_test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import * as tf from '../index';
import {ALL_ENVS, describeWithFlags} from '../jasmine_util';
import {expectArraysClose} from '../test_util';

describeWithFlags('elu', ALL_ENVS, () => {
it('calculate elu', async () => {
const a = tf.tensor1d([1, -1, 0]);
const result = tf.elu(a);

expect(result.shape).toEqual(a.shape);
expectArraysClose(await result.data(), [1, -0.6321, 0]);
});

it('elu propagates NaN', async () => {
const a = tf.tensor1d([1, NaN]);
const result = tf.elu(a);
expect(result.shape).toEqual(a.shape);
expectArraysClose(await result.data(), [1, NaN]);
});

it('derivative', async () => {
const x = tf.tensor1d([1, 3, -2]);
const dy = tf.tensor1d([5, 50, 500]);
const gradients = tf.grad(a => tf.elu(a))(x, dy);

expect(gradients.shape).toEqual(x.shape);
expect(gradients.dtype).toEqual('float32');
expectArraysClose(await gradients.data(), [5, 50, 500 * Math.exp(-2)]);
});

it('gradient with clones', async () => {
const x = tf.tensor1d([1, 3, -2]);
const dy = tf.tensor1d([5, 50, 500]);
const gradients = tf.grad(a => tf.elu(a.clone()).clone())(x, dy);

expect(gradients.shape).toEqual(x.shape);
expect(gradients.dtype).toEqual('float32');
expectArraysClose(await gradients.data(), [5, 50, 500 * Math.exp(-2)]);
});

it('throws when passed a non-tensor', () => {
expect(() => tf.elu({} as tf.Tensor))
.toThrowError(/Argument 'x' passed to 'elu' must be a Tensor/);
});

it('accepts a tensor-like object', async () => {
const result = tf.elu([1, -1, 0]);
expect(result.shape).toEqual(result.shape);
expectArraysClose(await result.data(), [1, -0.6321, 0]);
});

it('throws for string tensor', () => {
expect(() => tf.elu('q'))
.toThrowError(/Argument 'x' passed to 'elu' must be numeric/);
});
});
4 changes: 3 additions & 1 deletion tfjs-core/src/ops/fused_ops.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,12 @@ import {conv2DBackpropInput} from './conv2d_backprop_input';
import {depthwiseConv2d as unfusedDepthwiseConv2d} from './depthwise_conv2d';
import {depthwiseConv2dNativeBackpropFilter} from './depthwise_conv2d_native_backprop_filter';
import {depthwiseConv2dNativeBackpropInput} from './depthwise_conv2d_native_backprop_input';
import {elu} from './elu';
import {Activation, shouldFuse} from './fused_util';
import {matMul as unfusedMatMul} from './mat_mul';
import {prelu} from './prelu';
import {relu} from './relu';
import {elu, prelu, relu6} from './relu_ops';
import {relu6} from './relu6';

// Returns gradient for fused activation.
const getFusedDyActivation =
Expand Down
Loading