Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Eliminate a division by 0 in 3D convolutions. #49355

Merged
merged 3 commits into from
May 24, 2021
Merged
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
98 changes: 98 additions & 0 deletions tensorflow/core/kernels/conv_grad_ops_3d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,28 @@ class Conv3DBackpropInputOp : public OpKernel {
input_shape = context->input(0).shape();
}

OP_REQUIRES(context, input_shape.dims() == 5,
errors::InvalidArgument("input tensor must have 5 dimensions"));
OP_REQUIRES(
context, filter_shape.dims() == 5,
errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
OP_REQUIRES(
context, out_backprop_shape.dims() == 5,
errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
OP_REQUIRES(
context, input_shape.dim_size(4) == filter_shape.dim_size(3),
errors::InvalidArgument("input and filter_sizes must have the same "
"number of channels. Got ",
input_shape.dim_size(4), " for input and ",
filter_shape.dim_size(3), " for filter_sizes"));
OP_REQUIRES(
context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
errors::InvalidArgument("out_backprop and filter_sizes must have the "
"same number of channels. Got ",
out_backprop_shape.dim_size(4),
" for out_backprop and ",
filter_shape.dim_size(4), " for filter_sizes"));

ConvBackpropDimensions dims;
OP_REQUIRES_OK(context, ConvBackpropComputeDimensions(
"Conv3DBackpropInputOp", /*num_spatial_dims=*/3,
Expand Down Expand Up @@ -346,6 +368,28 @@ class Conv3DCustomBackpropInputOp : public OpKernel {
input_shape = context->input(0).shape();
}

OP_REQUIRES(context, input_shape.dims() == 5,
errors::InvalidArgument("input tensor must have 5 dimensions"));
OP_REQUIRES(
context, filter_shape.dims() == 5,
errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
OP_REQUIRES(
context, out_backprop_shape.dims() == 5,
errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
OP_REQUIRES(
context, input_shape.dim_size(4) == filter_shape.dim_size(3),
errors::InvalidArgument("input and filter_sizes must have the same "
"number of channels. Got ",
input_shape.dim_size(4), " for input and ",
filter_shape.dim_size(3), " for filter_sizes"));
OP_REQUIRES(
context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
errors::InvalidArgument("out_backprop and filter_sizes must have the "
"same number of channels. Got ",
out_backprop_shape.dim_size(4),
" for out_backprop and ",
filter_shape.dim_size(4), " for filter_sizes"));

ConvBackpropDimensions dims;
OP_REQUIRES_OK(context, ConvBackpropComputeDimensions(
"Conv3DBackpropInputOp", /*num_spatial_dims=*/3,
Expand Down Expand Up @@ -416,6 +460,11 @@ class Conv3DCustomBackpropInputOp : public OpKernel {
// contraction compared to sharding and matmuls.
const bool use_parallel_contraction = dims.batch_size == 1;

OP_REQUIRES(
context, work_unit_size > 0,
errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
"must all have at least 1 element"));

const size_t shard_size =
use_parallel_contraction
? 1
Expand Down Expand Up @@ -696,6 +745,28 @@ class Conv3DBackpropFilterOp : public OpKernel {
filter_shape = context->input(1).shape();
}

OP_REQUIRES(context, input_shape.dims() == 5,
errors::InvalidArgument("input tensor must have 5 dimensions"));
OP_REQUIRES(
context, filter_shape.dims() == 5,
errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
OP_REQUIRES(
context, out_backprop_shape.dims() == 5,
errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
OP_REQUIRES(
context, input_shape.dim_size(4) == filter_shape.dim_size(3),
errors::InvalidArgument("input and filter_sizes must have the same "
"number of channels. Got ",
input_shape.dim_size(4), " for input and ",
filter_shape.dim_size(3), " for filter_sizes"));
OP_REQUIRES(
context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
errors::InvalidArgument("out_backprop and filter_sizes must have the "
"same number of channels. Got ",
out_backprop_shape.dim_size(4),
" for out_backprop and ",
filter_shape.dim_size(4), " for filter_sizes"));

ConvBackpropDimensions dims;
OP_REQUIRES_OK(context,
ConvBackpropComputeDimensions(
Expand Down Expand Up @@ -808,6 +879,28 @@ class Conv3DCustomBackpropFilterOp : public OpKernel {
filter_shape = context->input(1).shape();
}

OP_REQUIRES(context, input_shape.dims() == 5,
errors::InvalidArgument("input tensor must have 5 dimensions"));
OP_REQUIRES(
context, filter_shape.dims() == 5,
errors::InvalidArgument("filter_sizes tensor must have 5 dimensions"));
OP_REQUIRES(
context, out_backprop_shape.dims() == 5,
errors::InvalidArgument("out_backprop tensor must have 5 dimensions"));
OP_REQUIRES(
context, input_shape.dim_size(4) == filter_shape.dim_size(3),
errors::InvalidArgument("input and filter_sizes must have the same "
"number of channels. Got ",
input_shape.dim_size(4), " for input and ",
filter_shape.dim_size(3), " for filter_sizes"));
OP_REQUIRES(
context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4),
errors::InvalidArgument("out_backprop and filter_sizes must have the "
"same number of channels. Got ",
out_backprop_shape.dim_size(4),
" for out_backprop and ",
filter_shape.dim_size(4), " for filter_sizes"));

ConvBackpropDimensions dims;
OP_REQUIRES_OK(context,
ConvBackpropComputeDimensions(
Expand Down Expand Up @@ -880,6 +973,11 @@ class Conv3DCustomBackpropFilterOp : public OpKernel {

const int64 work_unit_size = size_A + size_B + size_C;

OP_REQUIRES(
context, work_unit_size > 0,
errors::InvalidArgument("input, filter_sizes and out_backprop tensors "
"must all have at least 1 element"));

const size_t shard_size =
(target_working_set_size + work_unit_size - 1) / work_unit_size;

Expand Down