Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 1 addition & 133 deletions tfjs-backend-cpu/src/backend_cpu.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
*/

import * as tf from '@tensorflow/tfjs-core';
import {backend_util, BackendTimingInfo, buffer, DataStorage, DataType, DataValues, engine, env, kernel_impls, KernelBackend, max, NumericDataType, Rank, Scalar, ShapeMap, slice_util, Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, Tensor5D, TensorBuffer, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';
import {backend_util, BackendTimingInfo, DataStorage, DataType, DataValues, engine, env, kernel_impls, KernelBackend, max, NumericDataType, Rank, Scalar, ShapeMap, slice_util, Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, Tensor5D, TensorBuffer, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';

const nonMaxSuppressionV3Impl = kernel_impls.nonMaxSuppressionV3Impl;
const split = kernel_impls.split;
Expand All @@ -25,7 +25,6 @@ const topkImpl = kernel_impls.topkImpl;
const whereImpl = kernel_impls.whereImpl;
import * as seedrandom from 'seedrandom';
import {assertNotComplex} from './cpu_util';
import {maxPoolPositions, pool} from './utils/pool_utils';

interface DataId {}

Expand Down Expand Up @@ -1529,128 +1528,6 @@ export class MathBackendCPU extends KernelBackend {
.slice(sliceBeginCoords, sliceSize) as T;
}

maxPool(x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D {
assertNotComplex(x, 'maxPool');
const xValues = this.readSync(x.dataId) as TypedArray;
return pool(xValues, x.shape, x.dtype, x.strides, convInfo, 'max')
.toTensor() as Tensor4D;
}

maxPoolBackprop(
dy: Tensor4D, x: Tensor4D, y: Tensor4D,
convInfo: backend_util.Conv2DInfo): Tensor4D {
assertNotComplex([x, y], 'maxPoolBackprop');

const xValues = this.readSync(x.dataId) as TypedArray;
const maxPosBuf = buffer(
convInfo.outShape, x.dtype,
maxPoolPositions(xValues, x.shape, x.dtype, convInfo).values);
const strideHeight = convInfo.strideHeight;
const strideWidth = convInfo.strideWidth;
const dilationHeight = convInfo.dilationHeight;
const dilationWidth = convInfo.dilationWidth;
const effectiveFilterHeight = convInfo.effectiveFilterHeight;
const effectiveFilterWidth = convInfo.effectiveFilterWidth;
const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
const dx = tf.buffer<Rank.R4>(x.shape, 'float32');

const dyBuf = this.bufferSync(dy);

for (let b = 0; b < convInfo.batchSize; ++b) {
for (let d = 0; d < convInfo.inChannels; ++d) {
for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) {
for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) {
// Shader code begins.
const dyRCorner = dxR - padTop;
const dyCCorner = dxC - padLeft;
let dotProd = 0;
for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {
const dyR = (dyRCorner + wR) / strideHeight;
if (dyR < 0 || dyR >= convInfo.outHeight ||
Math.floor(dyR) !== dyR) {
continue;
}
for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {
const dyC = (dyCCorner + wC) / strideWidth;
if (dyC < 0 || dyC >= convInfo.outWidth ||
Math.floor(dyC) !== dyC) {
continue;
}
const maxPos = effectiveFilterHeight * effectiveFilterWidth -
1 - (maxPosBuf.get(b, dyR, dyC, d) as number);
const curPos = wR * effectiveFilterWidth + wC;

const mask = maxPos === curPos ? 1 : 0;
if (mask === 0) {
continue;
}

const pixel = dyBuf.get(b, dyR, dyC, d);
dotProd += pixel * mask;
}
}
dx.set(dotProd, b, dxR, dxC, d);
}
}
}
}
return dx.toTensor();
}

avgPoolBackprop(dy: Tensor4D, x: Tensor4D, convInfo: backend_util.Conv2DInfo):
Tensor4D {
assertNotComplex([dy, x], 'avgPoolBackprop');

const strideHeight = convInfo.strideHeight;
const strideWidth = convInfo.strideWidth;
const filterHeight = convInfo.filterHeight;
const filterWidth = convInfo.filterWidth;
const dilationHeight = convInfo.dilationHeight;
const dilationWidth = convInfo.dilationWidth;
const effectiveFilterHeight = convInfo.effectiveFilterHeight;
const effectiveFilterWidth = convInfo.effectiveFilterWidth;
const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
const dx = tf.buffer<Rank.R4>(x.shape, 'float32');

const avgMultiplier = 1 / (filterHeight * filterWidth);

const dyBuf = this.bufferSync(dy);

for (let b = 0; b < convInfo.batchSize; ++b) {
for (let d = 0; d < convInfo.inChannels; ++d) {
for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) {
for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) {
// Shader code begins.
const dyRCorner = dxR - padTop;
const dyCCorner = dxC - padLeft;
let dotProd = 0;
for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {
const dyR = (dyRCorner + wR) / strideHeight;
if (dyR < 0 || dyR >= convInfo.outHeight ||
Math.floor(dyR) !== dyR) {
continue;
}
for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {
const dyC = (dyCCorner + wC) / strideWidth;
if (dyC < 0 || dyC >= convInfo.outWidth ||
Math.floor(dyC) !== dyC) {
continue;
}

const pixel = dyBuf.get(b, dyR, dyC, d);
dotProd += pixel;
}
}
dx.set(dotProd * avgMultiplier, b, dxR, dxC, d);
}
}
}
}
return dx.toTensor();
}

private pool3d(
x: Tensor5D, convInfo: backend_util.Conv3DInfo,
poolType: 'max'|'avg'): Tensor5D {
Expand Down Expand Up @@ -2005,15 +1882,6 @@ export class MathBackendCPU extends KernelBackend {
return dx.toTensor();
}

avgPool(x: Tensor4D, convInfo: backend_util.Conv2DInfo): Tensor4D {
assertNotComplex(x, 'avgPool');
assertNotComplex(x, 'maxPool');
const xValues = this.readSync(x.dataId) as TypedArray;
return pool(xValues, x.shape, x.dtype, x.strides, convInfo, 'avg')
.toTensor()
.toFloat() as Tensor4D;
}

resizeBilinear(
x: Tensor4D, newHeight: number, newWidth: number,
alignCorners: boolean): Tensor4D {
Expand Down
61 changes: 61 additions & 0 deletions tfjs-backend-cpu/src/kernels/AvgPool.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {AvgPool, AvgPoolAttrs, AvgPoolInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';

import {MathBackendCPU} from '../backend_cpu';
import {assertNotComplex} from '../cpu_util';
import {pool} from '../utils/pool_utils';
import {identity} from './Identity';

export function avgPool(
args:
{inputs: AvgPoolInputs, backend: MathBackendCPU, attrs: AvgPoolAttrs}):
TensorInfo {
const {inputs, backend, attrs} = args;
const {x} = inputs;
assertNotComplex(x, 'avgPool');
const {filterSize, strides, pad, dimRoundingMode} = attrs;
const dilations = 1;

util.assert(
backend_util.eitherStridesOrDilationsAreOne(strides, dilations),
() => 'Error in avgPool: Either strides or dilations must be 1. ' +
`Got strides ${strides} and dilations '${dilations}'`);

const convInfo = backend_util.computePool2DInfo(
x.shape as [number, number, number, number], filterSize, strides,
dilations, pad, dimRoundingMode);
let res: TensorInfo;

if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&
util.arraysEqual(convInfo.inShape, convInfo.outShape)) {
res = identity({inputs: {x}, backend});
} else {
const xValues = backend.data.get(x.dataId).values as TypedArray;
const strides = util.computeStrides(x.shape);
const buffer = pool(xValues, x.shape, x.dtype, strides, convInfo, 'avg');
res = backend.makeTensorInfo(
convInfo.outShape, x.dtype, buffer.values as TypedArray);
}
return res;
}

export const avgPoolConfig: KernelConfig = {
kernelName: AvgPool,
backendName: 'cpu',
kernelFunc: avgPool as {} as KernelFunc
};
92 changes: 92 additions & 0 deletions tfjs-backend-cpu/src/kernels/AvgPoolBackprop.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {AvgPoolBackprop, AvgPoolBackpropAttrs, AvgPoolBackpropInputs, backend_util, buffer, KernelConfig, KernelFunc, Rank, TensorInfo} from '@tensorflow/tfjs-core';

import {MathBackendCPU} from '../backend_cpu';
import {assertNotComplex} from '../cpu_util';

export function avgPoolBackprop(args: {
inputs: AvgPoolBackpropInputs,
backend: MathBackendCPU,
attrs: AvgPoolBackpropAttrs
}): TensorInfo {
const {inputs, backend, attrs} = args;
const {dy, input} = inputs;
const x = input;
assertNotComplex([dy, input], 'avgPoolBackprop');
const {filterSize, strides, pad} = attrs;

const convInfo = backend_util.computePool2DInfo(
x.shape as [number, number, number, number], filterSize, strides,
1 /* dilations */, pad);
const strideHeight = convInfo.strideHeight;
const strideWidth = convInfo.strideWidth;
const filterHeight = convInfo.filterHeight;
const filterWidth = convInfo.filterWidth;
const dilationHeight = convInfo.dilationHeight;
const dilationWidth = convInfo.dilationWidth;
const effectiveFilterHeight = convInfo.effectiveFilterHeight;
const effectiveFilterWidth = convInfo.effectiveFilterWidth;
const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
const dx =
buffer<Rank.R4>(x.shape as [number, number, number, number], 'float32');

const avgMultiplier = 1 / (filterHeight * filterWidth);

const dyData = backend.data.get(dy.dataId).values as Float32Array;
const dyBuf = buffer<Rank.R4>(
dy.shape as [number, number, number, number], 'float32', dyData);

for (let b = 0; b < convInfo.batchSize; ++b) {
for (let d = 0; d < convInfo.inChannels; ++d) {
for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) {
for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) {
// Shader code begins.
const dyRCorner = dxR - padTop;
const dyCCorner = dxC - padLeft;
let dotProd = 0;
for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {
const dyR = (dyRCorner + wR) / strideHeight;
if (dyR < 0 || dyR >= convInfo.outHeight ||
Math.floor(dyR) !== dyR) {
continue;
}
for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {
const dyC = (dyCCorner + wC) / strideWidth;
if (dyC < 0 || dyC >= convInfo.outWidth ||
Math.floor(dyC) !== dyC) {
continue;
}

const pixel = dyBuf.get(b, dyR, dyC, d);
dotProd += pixel;
}
}
dx.set(dotProd * avgMultiplier, b, dxR, dxC, d);
}
}
}
}
return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);
}

export const avgPoolBackpropConfig: KernelConfig = {
kernelName: AvgPoolBackprop,
backendName: 'cpu',
kernelFunc: avgPoolBackprop as {} as KernelFunc
};
61 changes: 61 additions & 0 deletions tfjs-backend-cpu/src/kernels/MaxPool.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {backend_util, KernelConfig, KernelFunc, MaxPool, MaxPoolAttrs, MaxPoolInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';

import {MathBackendCPU} from '../backend_cpu';
import {assertNotComplex} from '../cpu_util';
import {pool} from '../utils/pool_utils';
import {identity} from './Identity';

export function maxPool(
args:
{inputs: MaxPoolInputs, backend: MathBackendCPU, attrs: MaxPoolAttrs}):
TensorInfo {
const {inputs, backend, attrs} = args;
const {x} = inputs;
assertNotComplex(x, 'maxPool');
const {filterSize, strides, pad, dimRoundingMode} = attrs;
const dilations = 1;

util.assert(
backend_util.eitherStridesOrDilationsAreOne(strides, dilations),
() => 'Error in maxPool: Either strides or dilations must be 1. ' +
`Got strides ${strides} and dilations '${dilations}'`);

const convInfo = backend_util.computePool2DInfo(
x.shape as [number, number, number, number], filterSize, strides,
dilations, pad, dimRoundingMode);
let res: TensorInfo;

if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&
util.arraysEqual(convInfo.inShape, convInfo.outShape)) {
res = identity({inputs: {x}, backend});
} else {
const xValues = backend.data.get(x.dataId).values as TypedArray;
const strides = util.computeStrides(x.shape);
const buffer = pool(xValues, x.shape, x.dtype, strides, convInfo, 'max');
res = backend.makeTensorInfo(
convInfo.outShape, x.dtype, buffer.values as TypedArray);
}
return res;
}

export const maxPoolConfig: KernelConfig = {
kernelName: MaxPool,
backendName: 'cpu',
kernelFunc: maxPool as {} as KernelFunc
};
Loading