From 0080a8e6cffba7c5d7d52a38f432c7033c5f42ee Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Tue, 30 Apr 2019 15:46:24 +0000 Subject: [PATCH] Workaround for the duplicate dependency bazel error. For cases when we have the same dependency being specified by both CUDA and ROCm, using `if_cuda` / `if_rocm` to specify that dependency leads to a "duplicate dependency" bazel error. Switch `if_cuda` / `if_rocm` to `if_cuda_is_configured` / `if_rocm_is_configured` is not an acceptable solution, because the `*_is_configured` functions are being phased out. The preferred solution here would be a new `if_gpu(cuda_arg, rocm_arg, default_arg)` function. That (or something alon those lines) solution is in the works. This workaround is meant to be bandage that allows progress to be made, while we wait for the real solution. This workaround introduces a `if_cuda_or_rocm` function which should be used to specify dependencies that are common to both CUDA and ROCM. While this solution works, it is less than ideal because it needs to duplicate the logic inside the `if_cuda` / `if_rocm` functions. --- tensorflow/core/kernels/BUILD | 27 +++++++++++++++------------ tensorflow/tensorflow.bzl | 31 +++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 12 deletions(-) diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index 54586bf7293349..65bdb4f149ee55 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -46,6 +46,7 @@ load( load("@local_config_sycl//sycl:build_defs.bzl", "if_sycl") load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_test") load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_tests") +load("//tensorflow:tensorflow.bzl", "if_cuda_or_rocm") load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda_is_configured") load( "//tensorflow/core:platform/default/build_config.bzl", @@ -4029,13 +4030,15 @@ tf_kernel_library( tf_kernel_library( name = "bias_op", prefix = "bias_op", - deps = NN_DEPS + [":redux_functor"] + if_cuda_is_configured([ + deps = NN_DEPS + [ + ":redux_functor", + ] + if_cuda_or_rocm([ ":reduction_ops", + ]) + if_cuda([ "@cub_archive//:cub", "//tensorflow/core:stream_executor", "//tensorflow/stream_executor/cuda:cuda_stream", - ]) + if_rocm_is_configured([ - ":reduction_ops", + ]) + if_rocm([ "@rocprim_archive//:rocprim", ]), ) @@ -4069,11 +4072,11 @@ tf_kernel_library( tf_kernel_library( name = "softmax_op", prefix = "softmax_op", - deps = NN_DEPS + if_cuda_is_configured([ + deps = NN_DEPS + if_cuda_or_rocm([ ":reduction_ops", + ]) + if_cuda([ "@cub_archive//:cub", - ]) + if_rocm_is_configured([ - ":reduction_ops", + ]) + if_rocm([ "@rocprim_archive//:rocprim", ]), ) @@ -4738,11 +4741,11 @@ tf_kernel_library( deps = SPARSE_DEPS + [ ":bounds_check", "//third_party/eigen3", - ] + if_cuda_is_configured([ + ] + if_cuda_or_rocm([ ":reduction_ops", + ]) + if_cuda([ "@cub_archive//:cub", - ]) + if_rocm_is_configured([ - ":reduction_ops", + ]) + if_rocm([ "@rocprim_archive//:rocprim", ]), ) @@ -5235,11 +5238,11 @@ tf_kernel_library( "//tensorflow/core:framework", "//tensorflow/core:lib", "//tensorflow/core:lib_internal", - ] + if_cuda_is_configured([ + ] + if_cuda_or_rocm([ ":reduction_ops", + ]) + if_cuda([ "@cub_archive//:cub", - ]) + if_rocm_is_configured([ - ":reduction_ops", + ]) + if_rocm([ "@rocprim_archive//:rocprim", ]), ) diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl index 50d69c3d5a6a6a..eae124b27b32f0 100644 --- a/tensorflow/tensorflow.bzl +++ b/tensorflow/tensorflow.bzl @@ -2431,3 +2431,34 @@ def tf_pybind_extension( restricted_to = restricted_to, compatible_with = compatible_with, ) + +def if_cuda_or_rocm(if_true, if_false = []): + """Shorthand for select()'ing on whether we're building with either + CUDA or ROCm. + + Returns a select statement which evaluates to + if_true if we're building with either CUDA or ROCm enabled. + if_false, otherwise. + + Sometimes a target has additional CUDa or ROCm specific dependencies. + The `if_cuda` / `if_rocm` functions are used to specify these additional + dependencies. For eg, see the `//tensorflow/core/kernels:bias_op` target + + If the same additional dependency is needed for both CUDA and ROCm + (for eg. `reduction_ops` dependency for the `bias_op` target above), + then specifying that dependency in both both `if_cuda` and `if_rocm` will + result in both those functions returning a select statement, which contains + the same dependency, which then leads to a duplicate dependency bazel error. + + In order to work around this error, any additional dependency that is common + to both the CUDA and ROCm platforms, should be specified using this function. + Doing so will eliminate the cause of the bazel error (i.e. the same + dependency showing up in two different select statements) + + """ + return select({ + "@local_config_cuda//cuda:using_nvcc": if_true, + "@local_config_cuda//cuda:using_clang": if_true, + "@local_config_rocm//rocm:using_hipcc": if_true, + "//conditions:default": if_false, + })