Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions tfjs-backend-cpu/run_tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,13 @@ const cpuTests = 'src/**/*_test.ts';
const runner = new jasmineCtor();
runner.loadConfig({spec_files: [cpuTests, coreTests], random: false});

// customInclude takes higher priority then TEST_FILTERS, only when
// customInclude return false will TEST_FILTERS be considered.
const TEST_FILTERS: TestFilter[] = [];
const customInclude = (testName: string) => {
// Exclude webworker test
if (testName.includes('computation in worker')) {
return false;
}

// Include all other tests.
return true;
};
Expand Down
101 changes: 101 additions & 0 deletions tfjs-backend-cpu/src/kernels/Dilation2D.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {backend_util, Dilation2D, Dilation2DAttrs, Dilation2DInputs, KernelConfig, TypedArray, util} from '@tensorflow/tfjs-core';

import {MathBackendCPU} from '../backend_cpu';

export const dilation2dConfig: KernelConfig = {
kernelName: Dilation2D,
backendName: 'cpu',
kernelFunc: ({inputs, backend, attrs}) => {
const {x, filter} = inputs as Dilation2DInputs;
const {strides, pad, dilations} = attrs as {} as Dilation2DAttrs;
const cpuBackend = backend as MathBackendCPU;

const $x =
util.toNestedArray(
x.shape, cpuBackend.data.get(x.dataId).values as TypedArray) as
number[][][][];

const $filter = util.toNestedArray(
filter.shape,
cpuBackend.data.get(filter.dataId).values as
TypedArray) as number[][][];

const {
batchSize,
inHeight,
inWidth,
inChannels,
outHeight,
outWidth,
padInfo,
strideHeight,
strideWidth,
filterHeight,
filterWidth,
dilationHeight,
dilationWidth,
outShape
} =
backend_util.computeDilation2DInfo(
x.shape as [number, number, number, number],
filter.shape as [number, number, number], strides, pad,
'NHWC' /* dataFormat */, dilations);

const output =
util.makeZerosNestedTypedArray(outShape, x.dtype) as number[][][][];

// Upsampling the input by fill in `dilation size - 1` values between each
// input value.
// This implementation follows the TF c++ implementation:
// https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc
for (let b = 0; b < batchSize; ++b) {
for (let hOut = 0; hOut < outHeight; ++hOut) {
const hBeg = hOut * strideHeight - padInfo.top;
for (let wOut = 0; wOut < outWidth; ++wOut) {
const wBeg = wOut * strideWidth - padInfo.left;
for (let d = 0; d < inChannels; ++d) {
let curVal = Number.MIN_SAFE_INTEGER;
for (let h = 0; h < filterHeight; ++h) {
const hIn = hBeg + h * dilationHeight;
if (hIn >= 0 && hIn < inHeight) {
for (let w = 0; w < filterWidth; ++w) {
const wIn = wBeg + w * dilationWidth;
if (wIn >= 0 && wIn < inWidth) {
const val = $x[b][hIn][wIn][d] + $filter[h][w][d];
if (val > curVal) {
curVal = val;
}
}
}
}
}
output[b][hOut][wOut][d] = curVal;
}
}
}
}

const dataId = cpuBackend.write(
util.toTypedArray(output, x.dtype, false /* debug mode */), outShape,
x.dtype);

return {dataId, shape: outShape, dtype: x.dtype};
}
};
121 changes: 121 additions & 0 deletions tfjs-backend-cpu/src/kernels/Dilation2DBackpropFilter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {backend_util, Dilation2DAttrs, Dilation2DBackpropFilter, Tensor3D, Tensor4D, TypedArray, util} from '@tensorflow/tfjs-core';
import {KernelConfig} from '@tensorflow/tfjs-core';

import {MathBackendCPU} from '../backend_cpu';

export const dilation2dBackpropFilterConfig: KernelConfig = {
kernelName: Dilation2DBackpropFilter,
backendName: 'cpu',
kernelFunc: ({inputs, backend, attrs}) => {
const {x, filter, dy} =
inputs as {x: Tensor4D, filter: Tensor3D, dy: Tensor4D};
const {strides, pad, dilations} = attrs as {} as Dilation2DAttrs;
const cpuBackend = backend as MathBackendCPU;

const $x =
util.toNestedArray(
x.shape, cpuBackend.data.get(x.dataId).values as TypedArray) as
number[][][][];

const $filter = util.toNestedArray(
filter.shape,
cpuBackend.data.get(filter.dataId).values as
TypedArray) as number[][][];

const {
batchSize,
inHeight,
inWidth,
inChannels,
outHeight,
outWidth,
padInfo,
strideHeight,
strideWidth,
filterHeight,
filterWidth,
dilationHeight,
dilationWidth,
outShape
} =
backend_util.computeDilation2DInfo(
x.shape as [number, number, number, number],
filter.shape as [number, number, number], strides, pad,
'NHWC' /* dataFormat */, dilations);

util.assert(
dy.rank === outShape.length,
() => `Error in ${Dilation2DBackpropFilter}, dy ` +
`must have the same rank as output ${outShape.length}, but got ` +
`${dy.rank}`);

const $dy =
util.toNestedArray(
outShape, cpuBackend.data.get(dy.dataId).values as TypedArray) as
number[][][][];

// The computed filter gradients has the same dimensions as the filter:
// [filterHeight, filterWidth, depth]
const gradients = util.makeZerosNestedTypedArray(
filter.shape, filter.dtype) as number[][][];

// In the case of multiple argmax branches, we only back-propagate along the
// last branch, i.e., the one with largest value of `h * filter_cols + w`,
// similarly to the max-pooling backward routines.
// This implementation follows the TF c++ implementation:
// https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc
for (let b = 0; b < batchSize; ++b) {
for (let hOut = 0; hOut < outHeight; ++hOut) {
const hBeg = hOut * strideHeight - padInfo.top;
for (let wOut = 0; wOut < outWidth; ++wOut) {
const wBeg = wOut * strideWidth - padInfo.left;
for (let d = 0; d < inChannels; ++d) {
let curVal = Number.MIN_SAFE_INTEGER;
let hMax = 0;
let wMax = 0;
for (let h = 0; h < filterHeight; ++h) {
const hIn = hBeg + h * dilationHeight;
if (hIn >= 0 && hIn < inHeight) {
for (let w = 0; w < filterWidth; ++w) {
const wIn = wBeg + w * dilationWidth;
if (wIn >= 0 && wIn < inWidth) {
const val = $x[b][hIn][wIn][d] + $filter[h][w][d];
if (val > curVal) {
curVal = val;
hMax = h;
wMax = w;
}
}
}
}
}
gradients[hMax][wMax][d] += $dy[b][hOut][wOut][d];
}
}
}
}

const dataId = cpuBackend.write(
util.toTypedArray(gradients, x.dtype, false /* debug mode */),
filter.shape, filter.dtype);

return {dataId, shape: filter.shape, dtype: filter.dtype};
}
};
121 changes: 121 additions & 0 deletions tfjs-backend-cpu/src/kernels/Dilation2DBackpropInput.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {backend_util, Dilation2DAttrs, Dilation2DBackpropInput, Tensor3D, Tensor4D, TypedArray, util} from '@tensorflow/tfjs-core';
import {KernelConfig} from '@tensorflow/tfjs-core';

import {MathBackendCPU} from '../backend_cpu';

export const dilation2dBackpropInputConfig: KernelConfig = {
kernelName: Dilation2DBackpropInput,
backendName: 'cpu',
kernelFunc: ({inputs, backend, attrs}) => {
const {x, filter, dy} =
inputs as {x: Tensor4D, filter: Tensor3D, dy: Tensor4D};
const {strides, pad, dilations} = attrs as {} as Dilation2DAttrs;
const cpuBackend = backend as MathBackendCPU;

const $x =
util.toNestedArray(
x.shape, cpuBackend.data.get(x.dataId).values as TypedArray) as
number[][][][];

const $filter = util.toNestedArray(
filter.shape,
cpuBackend.data.get(filter.dataId).values as
TypedArray) as number[][][];

const {
batchSize,
inHeight,
inWidth,
inChannels,
outHeight,
outWidth,
padInfo,
strideHeight,
strideWidth,
filterHeight,
filterWidth,
dilationHeight,
dilationWidth,
outShape
} =
backend_util.computeDilation2DInfo(
x.shape as [number, number, number, number],
filter.shape as [number, number, number], strides, pad,
'NHWC' /* dataFormat */, dilations);

util.assert(
dy.rank === outShape.length,
() => `Error in ${Dilation2DBackpropInput}, dy ` +
`must have the same rank as output ${outShape.length}, but got ` +
`${dy.rank}`);

const $dy =
util.toNestedArray(
outShape, cpuBackend.data.get(dy.dataId).values as TypedArray) as
number[][][][];

// The computed gradients has the same dimensions as the input:
// [batch, inputHeight, inputCols, inChannel]
const gradients =
util.makeZerosNestedTypedArray(x.shape, x.dtype) as number[][][][];

// In the case of multiple argmax branches, we only back-propagate along the
// last branch, i.e., the one with largest value of `h * filter_cols + w`,
// similarly to the max-pooling backward routines.
// This implementation follows the TF c++ implementation:
// https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc
for (let b = 0; b < batchSize; ++b) {
for (let hOut = 0; hOut < outHeight; ++hOut) {
const hBeg = hOut * strideHeight - padInfo.top;
for (let wOut = 0; wOut < outWidth; ++wOut) {
const wBeg = wOut * strideWidth - padInfo.left;
for (let d = 0; d < inChannels; ++d) {
let curVal = Number.MIN_SAFE_INTEGER;
let hInMax = (hBeg < 0) ? 0 : hBeg;
let wInMax = (wBeg < 0) ? 0 : wBeg;
for (let h = 0; h < filterHeight; ++h) {
const hIn = hBeg + h * dilationHeight;
if (hIn >= 0 && hIn < inHeight) {
for (let w = 0; w < filterWidth; ++w) {
const wIn = wBeg + w * dilationWidth;
if (wIn >= 0 && wIn < inWidth) {
const val = $x[b][hIn][wIn][d] + $filter[h][w][d];
if (val > curVal) {
curVal = val;
hInMax = hIn;
wInMax = wIn;
}
}
}
}
}
gradients[b][hInMax][wInMax][d] += $dy[b][hOut][wOut][d];
}
}
}
}

const dataId = cpuBackend.write(
util.toTypedArray(gradients, x.dtype, false /* debug mode */), x.shape,
x.dtype);

return {dataId, shape: x.shape, dtype: x.dtype};
}
};
9 changes: 7 additions & 2 deletions tfjs-backend-cpu/src/register_all_kernels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
// the contents of this file and import only the kernels that are needed.
import {KernelConfig, registerKernel} from '@tensorflow/tfjs-core';

import {dilation2dConfig} from './kernels/Dilation2D';
import {dilation2dBackpropFilterConfig} from './kernels/Dilation2DBackpropFilter';
import {dilation2dBackpropInputConfig} from './kernels/Dilation2DBackpropInput';
import {divConfig} from './kernels/Div';
import {maxConfig} from './kernels/Max';
import {maxPoolWithArgmaxConfig} from './kernels/MaxPoolWithArgmax';
Expand All @@ -29,8 +32,10 @@ import {transposeConfig} from './kernels/Transpose';

// List all kernel configs here
const kernelConfigs: KernelConfig[] = [
nonMaxSuppressionV5Config, squareConfig, squaredDifferenceConfig, divConfig,
transposeConfig, maxPoolWithArgmaxConfig, maxConfig
dilation2dConfig, dilation2dBackpropInputConfig,
dilation2dBackpropFilterConfig, nonMaxSuppressionV5Config, squareConfig,
squaredDifferenceConfig, divConfig, transposeConfig, maxPoolWithArgmaxConfig,
maxConfig
];

for (const kernelConfig of kernelConfigs) {
Expand Down
2 changes: 1 addition & 1 deletion tfjs-backend-wasm/src/setup_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ const TEST_FILTERS: TestFilter[] = [
startsWith: 'onesLike',
// Complex numbers not supported yet.
excludes: ['complex'],
},
}
];

const customInclude = (testName: string) => {
Expand Down
Loading