Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion tfjs-core/src/backends/backend_util.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
*/

import {ENGINE} from '../engine';
import {scalar, tensor1d, zeros} from '../ops/tensor_ops';
import {scalar} from '../ops/scalar';
import {tensor1d} from '../ops/tensor1d';
import {zeros} from '../ops/zeros';
import {Tensor} from '../tensor';
import {Rank} from '../types';
import {DataType, ShapeMap} from '../types';
Expand Down
3 changes: 2 additions & 1 deletion tfjs-core/src/backends/non_max_suppression_impl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
* Implementation of the NonMaxSuppression kernel shared between webgl and cpu.
*/

import {scalar, tensor1d} from '../ops/tensor_ops';
import {scalar} from '../ops/scalar';
import {tensor1d} from '../ops/tensor1d';
import {Tensor1D} from '../tensor';
import {NamedTensorMap} from '../tensor_types';
import {TypedArray} from '../types';
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/backends/topk_impl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

/** An implementation of the TopK kernel shared between webgl and cpu. */

import {tensor} from '../ops/tensor_ops';
import {tensor} from '../ops/tensor';
import {Tensor} from '../tensor';
import {NumericDataType, TypedArray} from '../types';
import {getTypedArrayFromDType} from '../util';
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Acos_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ import {GradConfig} from '../kernel_registry';
import {cast} from '../ops/cast';
import {div} from '../ops/div';
import {neg} from '../ops/neg';
import {scalar} from '../ops/scalar';
import {square} from '../ops/square';
import {sub} from '../ops/sub';
import {scalar} from '../ops/tensor_ops';
import {sqrt} from '../ops/unary_ops';
import {Tensor} from '../tensor';

Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/ArgMax_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import {ArgMax} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const argMaxGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/ArgMin_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import {ArgMin} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const argMinGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Asin_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ import {Asin} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {cast} from '../ops/cast';
import {div} from '../ops/div';
import {scalar} from '../ops/scalar';
import {square} from '../ops/square';
import {sub} from '../ops/sub';
import {scalar} from '../ops/tensor_ops';
import {sqrt} from '../ops/unary_ops';
import {Tensor} from '../tensor';

Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Asinh_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ import {GradConfig} from '../kernel_registry';
import {add} from '../ops/add';
import {cast} from '../ops/cast';
import {div} from '../ops/div';
import {scalar} from '../ops/scalar';
import {square} from '../ops/square';
import {scalar} from '../ops/tensor_ops';
import {sqrt} from '../ops/unary_ops';
import {Tensor} from '../tensor';

Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Atanh_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import {cast} from '../ops/cast';
import {div} from '../ops/div';
import {square} from '../ops/square';
import {sub} from '../ops/sub';
import {scalar} from '../ops/tensor_ops';
import {scalar} from '../ops/scalar';
import {Tensor} from '../tensor';

export const atanhGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Ceil_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import {Ceil} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const ceilGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/ClipByValue_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ import {GradConfig, NamedAttrMap} from '../kernel_registry';
import {greaterEqual} from '../ops/greater_equal';
import {lessEqual} from '../ops/less_equal';
import {logicalAnd} from '../ops/logical_and';
import {zerosLike} from '../ops/tensor_ops';
import {where} from '../ops/where';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const clipByValueGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Floor_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import {Floor} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const floorGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/FusedBatchNorm_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ import {add} from '../ops/add';
import {getReductionAxes} from '../ops/broadcast_util';
import {mul} from '../ops/mul';
import {reshape} from '../ops/reshape';
import {scalar} from '../ops/scalar';
import {sub} from '../ops/sub';
import {sum} from '../ops/sum';
import {scalar} from '../ops/tensor_ops';
import {tile} from '../ops/tile';
import {rsqrt} from '../ops/unary_ops';
import {Tensor} from '../tensor';
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/GreaterEqual_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
*/
import {GreaterEqual} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const greaterEqualGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/OneHot_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import {OneHot} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zeros} from '../ops/tensor_ops';
import {zeros} from '../ops/zeros';
import {Tensor} from '../tensor';

export const oneHotGradConfig: GradConfig = {
Expand Down
28 changes: 28 additions & 0 deletions tfjs-core/src/gradients/OnesLike_grad.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {OnesLike} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const onesLikeGradConfig: GradConfig = {
kernelName: OnesLike,
gradFunc: (dy: Tensor) => {
return {x: () => zerosLike(dy)};
}
};
3 changes: 2 additions & 1 deletion tfjs-core/src/gradients/Pow_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,11 @@ import {log} from '../ops/log';
import {mul} from '../ops/mul';
import {pow} from '../ops/pow';
import {reshape} from '../ops/reshape';
import {scalar} from '../ops/scalar';
import {sub} from '../ops/sub';
import {sum} from '../ops/sum';
import {scalar, zerosLike} from '../ops/tensor_ops';
import {where} from '../ops/where';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const powGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Prelu_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ import {greater} from '../ops/greater';
import {mul} from '../ops/mul';
import {reshape} from '../ops/reshape';
import {sum} from '../ops/sum';
import {zerosLike} from '../ops/tensor_ops';
import {where} from '../ops/where';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const preluGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/SelectV2_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import {GradConfig} from '../kernel_registry';
import {cast} from '../ops/cast';
import {logicalNot} from '../ops/logical_not';
import {mul} from '../ops/mul';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const selectV2PoolGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Selu_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ import {cast} from '../ops/cast';
import {exp} from '../ops/exp';
import {greater} from '../ops/greater';
import {mul} from '../ops/mul';
import {scalar} from '../ops/scalar';
import {SELU_SCALE, SELU_SCALEALPHA} from '../ops/selu_util';
import {scalar} from '../ops/tensor_ops';
import {where} from '../ops/where';
import {Tensor} from '../tensor';

Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Sign_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import {Sign} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const signGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/SquaredDifference_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
import {SquaredDifference} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {mul} from '../ops/mul';
import {scalar} from '../ops/scalar';
import {sub} from '../ops/sub';
import {scalar} from '../ops/tensor_ops';
import {Tensor} from '../tensor';

export const squaredDifferenceGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Sum_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
import {Sum, SumAttrs} from '../kernel_names';
import {GradConfig, NamedAttrMap} from '../kernel_registry';
import {mul} from '../ops/mul';
import {ones} from '../ops/ones';
import {reshape} from '../ops/reshape';
import {ones} from '../ops/tensor_ops';
import {Tensor} from '../tensor';
import {parseAxisParam} from '../util';

Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Tanh_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@
import {Tanh} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {mul} from '../ops/mul';
import {scalar} from '../ops/scalar';
import {square} from '../ops/square';
import {sub} from '../ops/sub';
import {scalar} from '../ops/tensor_ops';
import {Tensor} from '../tensor';

export const tanhGradConfig: GradConfig = {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-core/src/gradients/Tile_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import {Tile, TileAttrs} from '../kernel_names';
import {GradConfig, NamedAttrMap} from '../kernel_registry';
import {add} from '../ops/add';
import {slice} from '../ops/slice';
import {zerosLike} from '../ops/tensor_ops';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const tileGradConfig: GradConfig = {
Expand Down
4 changes: 3 additions & 1 deletion tfjs-core/src/gradients/UnsortedSegmentSum_grad.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,10 @@ import {gather} from '../ops/gather';
import {greaterEqual} from '../ops/greater_equal';
import {logicalAnd} from '../ops/logical_and';
import {maximum} from '../ops/maximum';
import {ones, scalar, zerosLike} from '../ops/tensor_ops';
import {ones} from '../ops/ones';
import {scalar} from '../ops/scalar';
import {where} from '../ops/where';
import {zerosLike} from '../ops/zeros_like';
import {Tensor, Tensor1D} from '../tensor';

export const unsortedSegmentSumGradConfig: GradConfig = {
Expand Down
28 changes: 28 additions & 0 deletions tfjs-core/src/gradients/ZerosLike_grad.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {ZerosLike} from '../kernel_names';
import {GradConfig} from '../kernel_registry';
import {zerosLike} from '../ops/zeros_like';
import {Tensor} from '../tensor';

export const zerosLikeGradConfig: GradConfig = {
kernelName: ZerosLike,
gradFunc: (dy: Tensor) => {
return {x: () => zerosLike(dy)};
}
};
29 changes: 13 additions & 16 deletions tfjs-core/src/io/io_utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import {complex} from '../ops/complex';

import {tensor} from '../ops/tensor_ops';
import {tensor} from '../ops/tensor';
import {NamedTensor, NamedTensorMap} from '../tensor_types';
import {TypedArray} from '../types';
import {sizeFromShape} from '../util';
Expand Down Expand Up @@ -128,16 +128,14 @@ export function decodeWeights(
if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {
if (!('min' in quantization && 'scale' in quantization)) {
throw new Error(
`Weight ${spec.name} with quantization ${quantization.dtype} ` +
`doesn't have corresponding metadata min and scale.`
);
`Weight ${spec.name} with quantization ${quantization.dtype} ` +
`doesn't have corresponding metadata min and scale.`);
}
} else if (quantization.dtype === 'float16') {
if (dtype !== 'float32') {
throw new Error(
`Weight ${spec.name} is quantized with ${quantization.dtype} ` +
`which only supports weights of type float32 not ${dtype}.`
);
`Weight ${spec.name} is quantized with ${quantization.dtype} ` +
`which only supports weights of type float32 not ${dtype}.`);
}
} else {
throw new Error(
Expand Down Expand Up @@ -166,16 +164,14 @@ export function decodeWeights(
values = float16Decode(quantizedArray as Uint16Array);
} else {
throw new Error(
`Unsupported quantization type ${quantization.dtype} ` +
`for weight type float32.`
);
`Unsupported quantization type ${quantization.dtype} ` +
`for weight type float32.`);
}
} else if (dtype === 'int32') {
if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {
throw new Error(
`Unsupported quantization type ${quantization.dtype} ` +
`for weight type int32.`
);
`Unsupported quantization type ${quantization.dtype} ` +
`for weight type int32.`);
}
values = new Int32Array(quantizedArray.length);
for (let i = 0; i < quantizedArray.length; i++) {
Expand Down Expand Up @@ -480,7 +476,8 @@ function computeFloat16OffsetTable(): Uint32Array {
* the Uint16Array of Float16 bytes to a Float32Array.
*/
export function getFloat16Decoder(): (buffer: Uint16Array) => Float32Array {
// Algorithm is based off of http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
// Algorithm is based off of
// http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf

// Cache lookup tables
const mantisaTable = computeFloat16MantisaTable();
Expand All @@ -493,8 +490,8 @@ export function getFloat16Decoder(): (buffer: Uint16Array) => Float32Array {
for (let index = 0; index < quantizedArray.length; index++) {
const float16Bits = quantizedArray[index];
const float32Bits =
mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +
exponentTable[float16Bits >> 10];
mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +
exponentTable[float16Bits >> 10];
bufferUint32View[index] = float32Bits;
}
return new Float32Array(buffer);
Expand Down
Loading