Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions tfjs-backend-wasm/src/cc/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,8 @@ tfjs_cc_library(
":FusedBatchNorm",
":FusedConv2D",
":FusedDepthwiseConv2D",
":Gather",
":GatherNd",
":Greater",
":GreaterEqual",
":Less",
Expand Down Expand Up @@ -387,6 +389,24 @@ tfjs_unit_test(
],
)

tfjs_cc_library(
name = "Gather",
srcs = ["kernels/Gather.cc"],
deps = [
":backend",
":util",
],
)

tfjs_cc_library(
name = "GatherNd",
srcs = ["kernels/GatherNd.cc"],
deps = [
":backend",
":util",
],
)

tfjs_cc_library(
name = "Greater",
srcs = ["kernels/Greater.cc"],
Expand Down
86 changes: 86 additions & 0 deletions tfjs-backend-wasm/src/cc/kernels/Gather.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
/* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ===========================================================================*/

#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif

#include "src/cc/backend.h"
#include "src/cc/util.h"

namespace {

template <typename T>
void gather_impl(const T* x_ptr, const std::vector<size_t>& x_strides,
const int32_t* indices_ptr, const size_t axis,
const size_t out_size, const std::vector<size_t>& out_strides,
T* out_buf_ptr) {
for (size_t i = 0; i < out_size; ++i) {
auto loc = tfjs::util::offset_to_loc(i, out_strides);
const size_t new_loc = loc[axis];
loc[axis] = indices_ptr[new_loc];

const size_t original_index = tfjs::util::loc_to_offset(loc, x_strides);

*out_buf_ptr = x_ptr[original_index];

out_buf_ptr++;
}
}
} // namespace

namespace tfjs {
namespace wasm {
extern "C" {
#ifdef __EMSCRIPTEN__
EMSCRIPTEN_KEEPALIVE
#endif

void Gather(const size_t x_id, const DType dtype, const int32_t* x_strides_ptr,
const size_t strides_size, const size_t indices_id,
const size_t axis, const int32_t* out_strides_ptr,
const size_t out_id) {
auto& x_info = backend::get_tensor_info(x_id);
auto& indices_info = backend::get_tensor_info(indices_id);

const int* indices_buf = indices_info.i32();
auto& out_info = backend::get_tensor_info_out(out_id);
const size_t out_size = out_info.size;

const auto x_strides =
std::vector<size_t>(x_strides_ptr, x_strides_ptr + strides_size);
const auto out_strides =
std::vector<size_t>(out_strides_ptr, out_strides_ptr + strides_size);

switch (dtype) {
case DType::float32:
gather_impl<float>(x_info.f32(), x_strides, indices_buf, axis, out_size,
out_strides, out_info.f32_write());
break;
case DType::int32:
gather_impl<int32_t>(x_info.i32(), x_strides, indices_buf, axis, out_size,
out_strides, out_info.i32_write());
break;
case DType::boolean:
gather_impl<bool>(x_info.b(), x_strides, indices_buf, axis, out_size,
out_strides, out_info.b_write());
break;
default:
util::warn("Gather for tensor id %d failed. Unknown dtype %d", x_id,
dtype);
}
}
} // extern "C"
} // namespace wasm
} // namespace tfjs
90 changes: 90 additions & 0 deletions tfjs-backend-wasm/src/cc/kernels/GatherNd.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
/* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ===========================================================================*/

#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif

#include "src/cc/backend.h"
#include "src/cc/util.h"

namespace {

template <typename T>
void gathernd_impl(const T* x_ptr, const int32_t* indices_ptr,
const size_t num_slices, const size_t slice_rank,
const size_t slice_size,
const std::vector<int32_t>& strides_ptr, T* out_buf_ptr) {
for (size_t i = 0; i < num_slices; ++i) {
size_t flattened_index = 0;
for (size_t j = 0; j < slice_rank; ++j) {
flattened_index += (*indices_ptr * strides_ptr[j]);

indices_ptr++;
}

x_ptr += flattened_index * slice_size;

for (size_t k = 0; k < slice_size; ++k) {
*out_buf_ptr = *x_ptr;

out_buf_ptr++;
x_ptr++;
}

x_ptr -= ((flattened_index + 1) * slice_size);
}
}
} // namespace

namespace tfjs {
namespace wasm {
extern "C" {
#ifdef __EMSCRIPTEN__
EMSCRIPTEN_KEEPALIVE
#endif

void GatherNd(const size_t x_id, const DType dtype, const size_t indices_id,
const size_t num_slices, const size_t slice_rank,
const size_t slice_size, const int32_t* strides_ptr,
const size_t out_id) {
auto& x_info = backend::get_tensor_info(x_id);
auto& indices_info = backend::get_tensor_info(indices_id);
const std::vector<int32_t>& strides =
std::vector<int32_t>(strides_ptr, strides_ptr + slice_rank);

const int* indices_buf = indices_info.i32();
auto& out_info = backend::get_tensor_info_out(out_id);

switch (dtype) {
case DType::float32:
gathernd_impl<float>(x_info.f32(), indices_buf, num_slices, slice_rank,
slice_size, strides, out_info.f32_write());
break;
case DType::int32:
gathernd_impl<int32_t>(x_info.i32(), indices_buf, num_slices, slice_rank,
slice_size, strides, out_info.i32_write());
break;
case DType::boolean:
gathernd_impl<bool>(x_info.b(), indices_buf, num_slices, slice_rank,
slice_size, strides, out_info.b_write());
break;
default:
util::warn("GatherNd for tensor id %d failed. Unknown dtype %d",
indices_id, dtype);
}
}
} // extern "C"
} // namespace wasm
} // namespace tfjs
91 changes: 91 additions & 0 deletions tfjs-backend-wasm/src/kernels/Gather.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
/**
* @license
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {NamedAttrMap, NamedTensorInfoMap, registerKernel, TensorInfo, util} from '@tensorflow/tfjs-core';

import {BackendWasm} from '../backend_wasm';
import {CppDType} from './types';

interface GatherInputs extends NamedTensorInfoMap {
x: TensorInfo;
indices: TensorInfo;
}

interface GatherAttrs extends NamedAttrMap {
axis: number;
}

let wasmGather:
(xId: number, dtype: CppDType, xStrides: Uint8Array, stridesSize: number,
indicesId: number, axis: number, outStrides: Uint8Array, outId: number) =>
void;

function setup(backend: BackendWasm): void {
wasmGather = backend.wasm.cwrap('Gather', null /*void*/, [
'number', // xId
'number', // dtype
'array', // xStrides
'number', // stridesSize
'number', // indicesId
'number', // axis
'array', // outStrides
'number' // outId
]);
}

function gather(
args: {backend: BackendWasm, inputs: GatherInputs, attrs: GatherAttrs}):
TensorInfo {
const {backend, inputs, attrs} = args;
const {x, indices} = inputs;
const {axis} = attrs;

const newShape = x.shape.slice();
newShape[axis] = util.sizeFromShape(indices.shape);
const stridesSize = x.shape.length - 1;

const out = backend.makeOutput(newShape, x.dtype);
if (util.sizeFromShape(x.shape) === 0) {
return out;
}

const xData = backend.dataIdMap.get(x.dataId);
const xId = xData.id;

const indicesData = backend.dataIdMap.get(indices.dataId);
const indicesId = indicesData.id;

const outId = backend.dataIdMap.get(out.dataId).id;

const xStridesBytes =
new Uint8Array(new Int32Array(util.computeStrides(x.shape)).buffer);
const outStridesBytes =
new Uint8Array(new Int32Array(util.computeStrides(newShape)).buffer);

wasmGather(
xId, CppDType[x.dtype], xStridesBytes, stridesSize, indicesId, axis,
outStridesBytes, outId);

return out;
}

registerKernel({
kernelName: 'Gather',
backendName: 'wasm',
setupFunc: setup,
kernelFunc: gather
});
82 changes: 82 additions & 0 deletions tfjs-backend-wasm/src/kernels/GatherNd.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
/**
* @license
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/

import {gather_util, NamedTensorInfoMap, registerKernel, Tensor, TensorInfo} from '@tensorflow/tfjs-core';

import {BackendWasm} from '../backend_wasm';
import {CppDType} from './types';

interface GatherNdInputs extends NamedTensorInfoMap {
x: TensorInfo;
indices: TensorInfo;
}

let wasmGatherNd: (
xId: number, dtype: CppDType, indicesId: number, numSlices: number,
sliceRank: number, sliceSize: number, strides: Uint8Array, outId: number) =>
void;

function setup(backend: BackendWasm): void {
wasmGatherNd = backend.wasm.cwrap('GatherNd', null /*void*/, [
'number', // xId
'number', // dtype
'number', // indicesId
'number', // numSlices
'number', // sliceRank
'number', // sliceSize
'array', // strides
'number' // outId
]);
}

function gatherNd(args: {backend: BackendWasm, inputs: GatherNdInputs}):
TensorInfo {
const {backend, inputs} = args;
const {x, indices} = inputs;

const [resultShape, numSlices, sliceSize, strides] =
gather_util.prepareAndValidate(x as Tensor, indices as Tensor);

const out = backend.makeOutput(resultShape, x.dtype);
if (numSlices === 0) {
return out;
}

const indicesShape = indices.shape;
const sliceRank = indicesShape[indicesShape.length - 1];

const xData = backend.dataIdMap.get(x.dataId);
const xId = xData.id;
const indicesData = backend.dataIdMap.get(indices.dataId);
const indicesId = indicesData.id;

const stridesBytes = new Uint8Array(new Int32Array(strides).buffer);

const outId = backend.dataIdMap.get(out.dataId).id;
wasmGatherNd(
xId, CppDType[x.dtype], indicesId, numSlices, sliceRank, sliceSize,
stridesBytes, outId);

return out;
}

registerKernel({
kernelName: 'GatherNd',
backendName: 'wasm',
setupFunc: setup,
kernelFunc: gatherNd
});
2 changes: 2 additions & 0 deletions tfjs-backend-wasm/src/kernels/all_kernels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ import './FloorDiv';
import './FusedBatchNorm';
import './FusedConv2D';
import './FusedDepthwiseConv2D';
import './Gather';
import './GatherNd';
import './Greater';
import './GreaterEqual';
import './LogicalAnd';
Expand Down
Loading