Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_any.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ Tensor& any_dims_out(
in, dim_list, out, [&](const auto begin, const auto end) {
for (const auto out_ix : c10::irange(begin, end)) {
bool any = false;
if (in_not_empty) {
if (plan.has_value()) {
any = plan->execute<CTYPE_IN, bool>(
[](CTYPE_IN v) { return static_cast<bool>(v); },
[](bool outv, bool acc) { return acc || outv; },
Expand Down
9 changes: 6 additions & 3 deletions kernels/portable/cpu/op_mean.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,10 @@ Tensor& mean_dim_out(
InvalidArgument,
out);

MapReduceOverDimListPlan plan(in, dim_list);
std::optional<MapReduceOverDimListPlan> plan;
if (in.numel() > 0) {
plan.emplace(in, dim_list);
}
// @lint-ignore CLANGTIDY facebook-hte-CArray
static constexpr const char op_name[] = "mean.out";
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE_IN, [&] {
Expand All @@ -56,8 +59,8 @@ Tensor& mean_dim_out(
in, dim_list, out, [&](const auto begin, const auto end) {
for (const auto out_ix : c10::irange(begin, end)) {
CTYPE_OUT sum = 0;
if (in.numel() > 0) {
sum = plan.execute<CTYPE_IN, CTYPE_OUT>(
if (plan.has_value()) {
sum = plan->execute<CTYPE_IN, CTYPE_OUT>(
[](CTYPE_IN v) { return static_cast<CTYPE_OUT>(v); },
[](CTYPE_OUT outv, CTYPE_OUT acc) { return acc + outv; },
out_ix);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_var.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ void compute_variance(
for (const auto out_ix : c10::irange(out.numel())) {
out_data[out_ix] = NAN;
}
} else {
} else if (in.numel() > 0) {
MapReduceOverDimListPlan plan(in, dim_list);
const bool success = parallel_for_each_reduce_over_dim_list_output_index(
in, dim_list, out, [&](const auto begin, const auto end) {
Expand Down
13 changes: 9 additions & 4 deletions kernels/portable/cpu/util/reduce_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,9 @@ class MapReduceOverDimListPlan {
const MapOp& map_fun,
const ReduceOp& reduce_fun,
const size_t out_ix) const {
ET_CHECK_MSG(
plan_.get_input_tensor().numel() > 0, "Input tensor must be nonempty");

const size_t init_index =
get_init_index(plan_.get_input_tensor(), plan_.get_dim_list(), out_ix);

Expand Down Expand Up @@ -834,10 +837,12 @@ template <typename Func>
const Func& func) {
#ifdef ET_USE_THREADPOOL
const ssize_t reduction_size = get_reduced_dim_product(in, dim_list);
const auto grain_size = std::max(
static_cast<ssize_t>(1),
static_cast<ssize_t>(executorch::extension::internal::GRAIN_SIZE) /
reduction_size);
const auto grain_size = reduction_size == 0
? 1
: std::max(
static_cast<ssize_t>(1),
static_cast<ssize_t>(executorch::extension::internal::GRAIN_SIZE) /
reduction_size);
#else // ET_USE_THREADPOOL
const auto grain_size = 1;
#endif // ET_USE_THREADPOOL
Expand Down
28 changes: 28 additions & 0 deletions kernels/test/op_any_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,3 +148,31 @@ TEST_F(OpAnyOutTest, SmokeTest) {
op_any_out(self, dim, keepdim, out);
EXPECT_TENSOR_CLOSE(out, out_expected);
}

TEST_F(OpAnyOutTest, EmptyInput) {
TensorFactory<ScalarType::Float> tf;
TensorFactory<ScalarType::Bool> tfBool;

Tensor x = tf.make({2, 0, 3}, {});
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
Tensor out = tfBool.make({2, 0, 3}, {});

op_any_dims_out(x, dim_list, /*keepdim=*/true, out);
EXPECT_TENSOR_CLOSE(out, tfBool.zeros({2, 0, 3}));

out = tfBool.ones({2, 0, 3});
op_any_dims_out(x, dim_list, /*keepdim=*/false, out);
EXPECT_TENSOR_CLOSE(out, tfBool.zeros({2, 0, 3}));

int64_t dims1[1] = {1};
dim_list = ArrayRef<int64_t>{dims1, 1};
out = tfBool.ones({2, 3});
op_any_dims_out(x, dim_list, /*keepdim=*/false, out);
EXPECT_TENSOR_CLOSE(out, tfBool.zeros({2, 3}));

int64_t dims2[1] = {2};
dim_list = ArrayRef<int64_t>{dims2, 1};
out = tfBool.make({2, 0, 1}, {});
op_any_dims_out(x, dim_list, /*keepdim=*/true, out);
EXPECT_TENSOR_CLOSE(out, tfBool.make({2, 0, 1}, {}));
}
27 changes: 27 additions & 0 deletions kernels/test/op_mean_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -551,3 +551,30 @@ TEST_F(OpMeanOutTest, DTypeOutFloatNAN) {
Tensor ret = op_mean_dtype_out(x, ScalarType::Float, out);
EXPECT_TENSOR_CLOSE(out, expected_result);
}

TEST_F(OpMeanOutTest, EmptyInput) {
TensorFactory<ScalarType::Float> tf;

Tensor x = tf.make({2, 0, 3}, {});
optional<ScalarType> dtype = ScalarType::Float;
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
Tensor out = tf.zeros({1, 1, 1});
op_mean_out(x, dim_list, /*keepdim=*/true, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.make({1, 1, 1}, {NAN}));

out = tf.zeros({});
op_mean_out(x, dim_list, /*keepdim=*/false, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.make({}, {NAN}));

int64_t dims1[1] = {1};
dim_list = ArrayRef<int64_t>{dims1, 1};
out = tf.zeros({2, 3});
op_mean_out(x, dim_list, /*keepdim=*/false, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.make({2, 3}, {NAN, NAN, NAN, NAN, NAN, NAN}));

int64_t dims2[1] = {2};
dim_list = ArrayRef<int64_t>{dims2, 1};
out = tf.make({2, 0, 1}, {});
op_mean_out(x, dim_list, /*keepdim=*/true, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.make({2, 0, 1}, {}));
}
27 changes: 27 additions & 0 deletions kernels/test/op_sum_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -490,3 +490,30 @@ TEST_F(OpSumOutTest, InfinityAndNANTest) {
}));
// clang-format on
}

TEST_F(OpSumOutTest, EmptyInput) {
TensorFactory<ScalarType::Float> tf;

Tensor x = tf.make({2, 0, 3}, {});
optional<ScalarType> dtype = ScalarType::Float;
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
Tensor out = tf.ones({1, 1, 1});
op_sum_intlist_out(x, dim_list, /*keepdim=*/true, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.zeros({1, 1, 1}));

out = tf.ones({});
op_sum_intlist_out(x, dim_list, /*keepdim=*/false, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.zeros({}));

int64_t dims1[1] = {1};
dim_list = ArrayRef<int64_t>{dims1, 1};
out = tf.ones({2, 3});
op_sum_intlist_out(x, dim_list, /*keepdim=*/false, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.zeros({2, 3}));

int64_t dims2[1] = {2};
dim_list = ArrayRef<int64_t>{dims2, 1};
out = tf.make({2, 0, 1}, {});
op_sum_intlist_out(x, dim_list, /*keepdim=*/true, dtype, out);
EXPECT_TENSOR_CLOSE(out, tf.make({2, 0, 1}, {}));
}
27 changes: 27 additions & 0 deletions kernels/test/op_var_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -468,3 +468,30 @@ TEST_F(OpVarCorrectionOutTest, SmokeTest) {
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
#undef TEST_ENTRY
}

TEST_F(OpVarOutTest, EmptyInput) {
TensorFactory<ScalarType::Float> tf;

Tensor x = tf.make({2, 0, 3}, {});
bool unbiased = true;
optional<ArrayRef<int64_t>> dim_list = ArrayRef<int64_t>{};
Tensor out = tf.zeros({1, 1, 1});
op_var_out(x, dim_list, unbiased, /*keepdim=*/true, out);
EXPECT_TENSOR_CLOSE(out, tf.make({1, 1, 1}, {NAN}));

out = tf.zeros({});
op_var_out(x, dim_list, unbiased, /*keepdim=*/false, out);
EXPECT_TENSOR_CLOSE(out, tf.make({}, {NAN}));

int64_t dims1[1] = {1};
dim_list = ArrayRef<int64_t>{dims1, 1};
out = tf.zeros({2, 3});
op_var_out(x, dim_list, unbiased, /*keepdim=*/false, out);
EXPECT_TENSOR_CLOSE(out, tf.make({2, 3}, {NAN, NAN, NAN, NAN, NAN, NAN}));

int64_t dims2[1] = {2};
dim_list = ArrayRef<int64_t>{dims2, 1};
out = tf.make({2, 0, 1}, {});
op_var_out(x, dim_list, unbiased, /*keepdim=*/true, out);
EXPECT_TENSOR_CLOSE(out, tf.make({2, 0, 1}, {}));
}
Loading