Skip to content

Commit

Permalink
Refactor exhaustive testing of unary float32 functions into a library.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 634942375
  • Loading branch information
tensorflower-gardener committed May 23, 2024
1 parent 14f88cc commit 53e02fd
Show file tree
Hide file tree
Showing 3 changed files with 83 additions and 19 deletions.
32 changes: 26 additions & 6 deletions third_party/xla/xla/tests/exhaustive/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,31 @@ cc_library(
"//xla/tests:client_library_test_base",
"//xla/tests:literal_test_util",
"//xla/tests:test_macros_header",
"//xla/tests:xla_internal_test_main",
"@com_google_absl//absl/strings",
],
)

cc_library(
name = "exhaustive_unary_test_f32_or_smaller_lib",
testonly = True,
srcs = ["exhaustive_unary_test_f32_or_smaller.cc"],
tags = ["no_pip"],
deps = [
":exhaustive_op_test_utils",
"//xla:util",
"//xla/client:xla_builder",
"//xla/tests:client_library_test_base",
"@com_google_absl//absl/flags:flag",
],
)

xla_test(
name = "exhaustive_unary_test_f32_or_smaller",
srcs = ["exhaustive_unary_test_f32_or_smaller.cc"],
srcs = ["exhaustive_test_main.cc"],
backends = [
"gpu",
"cpu",
],
real_hardware_only = True, # Very slow on the interpreter.
shard_count = 50,
tags = [
Expand All @@ -53,10 +70,9 @@ xla_test(
"no_oss",
],
deps = [
":exhaustive_op_test_utils",
"//xla:util",
"//xla/client:xla_builder",
"//xla/tests:client_library_test_base",
":exhaustive_unary_test_f32_or_smaller_lib",
"@local_tsl//tsl/platform",
"@local_tsl//tsl/platform:test",
],
)

Expand All @@ -80,6 +96,7 @@ xla_test(
"//xla:util",
"//xla/client:xla_builder",
"//xla/tests:client_library_test_base",
"//xla/tests:xla_internal_test_main",
],
)

Expand All @@ -103,6 +120,7 @@ xla_test(
"//xla/tests:client_library_test_base",
"//xla/tests:literal_test_util",
"//xla/tests:test_macros_header",
"//xla/tests:xla_internal_test_main",
"@com_google_absl//absl/types:span",
],
)
Expand All @@ -122,6 +140,7 @@ xla_test(
],
deps = [
":exhaustive_op_test_utils",
"//xla/tests:xla_internal_test_main",
],
)

Expand All @@ -140,5 +159,6 @@ xla_test(
],
deps = [
":exhaustive_op_test_utils",
"//xla/tests:xla_internal_test_main",
],
)
33 changes: 33 additions & 0 deletions third_party/xla/xla/tests/exhaustive/exhaustive_test_main.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// A program with a main that is suitable for unittests, including those
// that also define microbenchmarks. Based on whether the user specified
// the --benchmark_filter flag which specifies which benchmarks to run,
// we will either run benchmarks or run the gtest tests in the program.

#include "tsl/platform/test.h"

namespace xla {
namespace exhaustive_op_test {
static int eup_version = 0;
int GetEupVersion() { return eup_version; }
} // namespace exhaustive_op_test
} // namespace xla

GTEST_API_ int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License.
#include <cmath>
#include <limits>
#include <random>
#include <string>
#include <tuple>
#include <utility>

Expand All @@ -35,6 +36,8 @@ limitations under the License.
namespace xla {
namespace exhaustive_op_test {

extern int GetEupVersion();

using Eigen::half;

template <typename T, size_t N>
Expand Down Expand Up @@ -198,6 +201,13 @@ class Exhaustive32BitOrLessUnaryTest
special_input_bounder_ = true;
}

bool IsGpu(const std::string& platform) const { return platform == "CUDA"; }
bool IsCpu(const std::string& platform) const { return platform == "Host"; }
bool IsTpu(const std::string& platform) const {
return !IsGpu(platform) && !IsCpu(platform);
}
int EupVersion() { return xla::exhaustive_op_test::GetEupVersion(); }

protected:
using typename ExhaustiveUnaryTest<T>::NativeT;

Expand Down Expand Up @@ -264,6 +274,7 @@ class Exhaustive32BitOrLessUnaryTest
float input_lower_bounder_;
float input_upper_bounder_;
bool special_input_bounder_;
int tpu_version_;
};

using ExhaustiveF32UnaryTest = Exhaustive32BitOrLessUnaryTest<F32>;
Expand Down Expand Up @@ -402,7 +413,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Acosh, {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 1e-7, .rel_err = 50 * eps};
};
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{2e-4, eps};
Expand All @@ -412,7 +423,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Acosh, {
})
UNARY_TEST_FLOAT_32_BITS_OR_LESS(Asinh, {
auto error_spec_gen = GetDefaultSpecGenerator();
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 2e-4, .rel_err = eps};
Expand All @@ -422,7 +433,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Asinh, {
})
UNARY_TEST_FLOAT_32_BITS_OR_LESS(Atanh, {
auto error_spec_gen = GetDefaultSpecGenerator();
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 1e-4, .rel_err = eps};
Expand Down Expand Up @@ -461,7 +472,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Atan, {

UNARY_TEST_FLOAT_32_BITS_OR_LESS(Cosh, {
auto error_spec_gen = GetDefaultSpecGenerator();
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
// Cosh is always greater than or equal to 1, so an absolute
Expand All @@ -474,7 +485,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Cosh, {

UNARY_TEST_FLOAT_32_BITS_OR_LESS(Sinh, {
auto error_spec_gen = GetDefaultSpecGenerator();
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 1e-5, .rel_err = 100 * eps};
Expand All @@ -486,7 +497,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Sinh, {
UNARY_TEST_FLOAT_32_BITS_OR_LESS(TanhBounderTestUpperBound, {
SetBounder(8, 9);
ErrorSpecGen error_spec_gen = GetDefaultSpecGenerator();
if (platform_ == "CUDA" || platform_ == "Host") {
if (!IsTpu(platform_)) {
error_spec_gen =
+[](NativeT x) { return ErrorSpec{.abs_err = 0, .rel_err = 0}; };
}
Expand All @@ -498,7 +509,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(TanhBounderTestUpperBound, {
UNARY_TEST_FLOAT_32_BITS_OR_LESS(TanhBounderTestLowerBound, {
SetBounder(-9, -8);
ErrorSpecGen error_spec_gen = GetDefaultSpecGenerator();
if (platform_ == "CUDA" || platform_ == "Host") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) { return ErrorSpec{0, 0}; };
}
Run(
Expand All @@ -508,7 +519,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(TanhBounderTestLowerBound, {

UNARY_TEST_FLOAT_32_BITS_OR_LESS(TanhNormalTest, {
ErrorSpecGen error_spec_gen = GetDefaultSpecGenerator();
if (platform_ != "CUDA" && platform_ != "Host") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
// The range of tanh is [-1:1], so no point in giving a relative
// tolerance when we have an absolute one.
Expand Down Expand Up @@ -548,7 +559,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Tan, {
UNARY_TEST_FLOAT_32_BITS_OR_LESS(Erf, { Run(Erf, std::erf); })
UNARY_TEST_FLOAT_32_BITS_OR_LESS(Erfc, {
auto error_spec_gen = GetDefaultSpecGenerator();
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT min = std::numeric_limits<NativeT>::min();
NativeT eps = std::numeric_limits<NativeT>::epsilon();
Expand All @@ -559,7 +570,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Erfc, {
})
UNARY_TEST_FLOAT_32_BITS_OR_LESS(ErfInv, {
auto error_spec_gen = GetDefaultSpecGenerator();
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 5e-5, .rel_err = 2 * eps};
Expand All @@ -573,7 +584,7 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Digamma, {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{2e-5, 10 * eps};
};
if (platform_ != "Host" && platform_ != "CUDA") {
if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 2e-4, .rel_err = 10 * eps};
Expand All @@ -587,12 +598,12 @@ UNARY_TEST_FLOAT_32_BITS_OR_LESS(Lgamma, {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 1e-5, .rel_err = 150 * eps};
};
if (platform_ == "CUDA") {
if (IsGpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 1e-5, .rel_err = 5000 * eps};
};
} else if (platform_ != "Host") {
} else if (IsTpu(platform_)) {
error_spec_gen = +[](NativeT x) {
NativeT eps = std::numeric_limits<NativeT>::epsilon();
return ErrorSpec{.abs_err = 5e-4, .rel_err = 5000 * eps};
Expand Down

0 comments on commit 53e02fd

Please sign in to comment.