Skip to content

Commit

Permalink
Remove GOOGLE_CUDA and TENSORFLOW_USE_ROCM defines from topk_speciali…
Browse files Browse the repository at this point in the history
…zer.cc

PiperOrigin-RevId: 636891351
  • Loading branch information
thomasjoerg authored and tensorflower-gardener committed May 24, 2024
1 parent b08e206 commit d526a0c
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 17 deletions.
6 changes: 0 additions & 6 deletions third_party/xla/xla/service/gpu/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -5447,9 +5447,6 @@ cc_library(
name = "topk_specializer",
srcs = ["topk_specializer.cc"],
hdrs = ["topk_specializer.h"],
local_defines = if_cuda_is_configured(["GOOGLE_CUDA=1"]) + if_rocm_is_configured([
"TENSORFLOW_USE_ROCM=1",
]),
deps = [
"//xla:executable_run_options",
"//xla:shape_util",
Expand Down Expand Up @@ -5518,9 +5515,6 @@ xla_cc_test(
xla_cc_test(
name = "topk_test",
srcs = ["topk_test.cc"],
local_defines = if_cuda_is_configured(["GOOGLE_CUDA=1"]) + if_rocm_is_configured([
"TENSORFLOW_USE_ROCM=1",
]),
use_gpu = True,
deps = [
":topk_specializer",
Expand Down
11 changes: 0 additions & 11 deletions third_party/xla/xla/service/gpu/topk_specializer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ limitations under the License.
namespace xla {
namespace gpu {

#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace {

absl::StatusOr<HloInstruction*> SmallBufferOptimization(
Expand Down Expand Up @@ -111,15 +110,5 @@ absl::StatusOr<bool> TopkSpecializer::Run(
return SpecializeTopkVisitor().RunOnModule(module, execution_threads);
}

#else // GOOGLE_CUDA || TENSORFLOW_USE_ROCM

absl::StatusOr<bool> TopkSpecializer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return false;
}

#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM

} // namespace gpu
} // namespace xla

0 comments on commit d526a0c

Please sign in to comment.