diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 603b1db84a5..9e001606a23 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -109,11 +109,11 @@ jobs: bash bazel.sh if [[ "${{ matrix.ext }}" == *-gpu ]]; then echo Installing CUDA - curl -L https://developer.download.nvidia.com/compute/cuda/11.0.3/local_installers/cuda-repo-rhel7-11-0-local-11.0.3_450.51.06-1.x86_64.rpm -o $HOME/cuda.rpm - curl -L https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.3/cudnn-11.0-linux-x64-v8.0.3.33.tgz -o $HOME/cudnn.tgz - curl -L https://developer.download.nvidia.com/compute/redist/nccl/v2.7/nccl_2.7.8-1+cuda11.0_x86_64.txz -o $HOME/nccl.txz + curl -L https://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda-repo-rhel7-11-2-local-11.2.2_460.32.03-1.x86_64.rpm -o $HOME/cuda.rpm + curl -L https://developer.download.nvidia.com/compute/redist/cudnn/v8.1.1/cudnn-11.2-linux-x64-v8.1.1.33.tgz -o $HOME/cudnn.tgz + curl -L https://developer.download.nvidia.com/compute/redist/nccl/v2.8/nccl_2.8.4-1+cuda11.2_x86_64.txz -o $HOME/nccl.txz rpm -i $HOME/cuda.rpm - pushd /var/cuda-repo-rhel7-11-0-local/; rpm -i --nodeps cuda*.rpm libc*.rpm libn*.rpm; rm *.rpm; popd + pushd /var/cuda-repo-rhel7-11-2-local/; rpm -i --nodeps cuda*.rpm libc*.rpm libn*.rpm; rm *.rpm; popd ln -sf /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/libcuda.so ln -sf /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/local/cuda/lib64/libnvidia-ml.so tar hxvf $HOME/cudnn.tgz -C /usr/local/ @@ -215,12 +215,12 @@ jobs: echo Removing some unused stuff to avoid running out of disk space rm.exe -Rf "C:/Program Files (x86)/Android" "C:/Program Files/dotnet" "%CONDA%" "%GOROOT_1_10_X64%" "%GOROOT_1_11_X64%" "%GOROOT_1_12_X64%" "%GOROOT_1_13_X64%" "C:\hostedtoolcache\windows\Ruby" "C:\Rust" echo Installing CUDA - curl.exe -L https://developer.download.nvidia.com/compute/cuda/11.0.3/local_installers/cuda_11.0.3_451.82_win10.exe -o cuda.exe - curl.exe -L https://developer.download.nvidia.com/compute/redist/cudnn/v8.0.3/cudnn-11.0-windows-x64-v8.0.3.33.zip -o cudnn.zip + curl.exe -L https://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda_11.2.2_461.33_win10.exe -o cuda.exe + curl.exe -L https://developer.download.nvidia.com/compute/redist/cudnn/v8.1.1/cudnn-11.2-windows-x64-v8.1.1.33.zip -o cudnn.zip cuda.exe -s mkdir cuda unzip.exe cudnn.zip - cp.exe -a cuda/include cuda/lib cuda/bin "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.0/" + cp.exe -a cuda/include cuda/lib cuda/bin "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2/" ) echo %JAVA_HOME% - name: Checkout repository @@ -229,9 +229,9 @@ jobs: shell: cmd run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64 - set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.0" - set "CUDA_PATH_V11_0=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.0" - set "PATH=C:\msys64\usr\bin;C:\bazel;C:\Program Files\Git\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.0\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.0\libnvvp;%PATH%" + set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.2" + set "CUDA_PATH_V11_2=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.2" + set "PATH=C:\msys64\usr\bin;C:\bazel;C:\Program Files\Git\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.2\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.2\libnvvp;%PATH%" echo Shorten work paths to prevent Bazel from reaching MAX_PATH limit set "TEST_TMPDIR=C:\tmp" set "TMPDIR=C:\tmp" diff --git a/tensorflow-core/tensorflow-core-api/.bazelrc b/tensorflow-core/tensorflow-core-api/.bazelrc index d15d83ee9a2..26f501ff451 100644 --- a/tensorflow-core/tensorflow-core-api/.bazelrc +++ b/tensorflow-core/tensorflow-core-api/.bazelrc @@ -1,648 +1,2 @@ -# TensorFlow Bazel configuration file. -# This file tries to group and simplify build options for TensorFlow -# -# ----CONFIG OPTIONS---- -# Android options: -# android: -# android_arm: -# android_arm64: -# android_x86: -# android_x86_64: -# -# iOS options: -# ios: -# ios_armv7: -# ios_arm64: -# ios_i386: -# ios_x86_64: -# ios_fat: -# -# Compiler options: -# cuda_clang: Use clang when building CUDA code. -# c++17: Build with C++17 options (links with libc++) -# c++1z: Build with C++17 options (links with libc++) -# c++17_gcc: Build with C++17 options (links with stdlibc++) -# c++1z_gcc: Build with C++17 options (links with stdlibc++) -# avx_linux: Build with avx instruction set on linux. -# avx2_linux: Build with avx2 instruction set on linux. -# native_arch_linux: Build with instruction sets available to the host machine on linux -# avx_win: Build with avx instruction set on windows -# avx2_win: Build with avx2 instruction set on windows -# -# Other build options: -# short_logs: Only log errors during build, skip warnings. -# verbose_logs: Show all compiler warnings during build. -# monolithic: Build all TF C++ code into a single shared object. -# dynamic_kernels: Try to link all kernels dynamically (experimental). -# libc++: Link against libc++ instead of stdlibc++ -# -# -# TF version options; -# v1: Build TF V1 (without contrib) -# v2: Build TF v2 -# -# Feature and Third party library support options: -# xla: Build TF with XLA -# tpu: Build TF with TPU support -# using_cuda: CUDA is available to build system. -# cuda: Build with full cuda support. -# rocm: Build with AMD GPU support (rocm). -# mkl: Enable full mkl support. -# tensorrt: Enable Tensorrt support. -# ngraph: Enable ngraph support. -# numa: Enable numa using hwloc. -# noaws: Disable AWS S3 storage support -# nogcp: Disable GCS support. -# nohdfs: Disable hadoop hdfs support. -# nonccl: Disable nccl support. -# -# -# Remote build execution options (only configured to work with TF team projects for now.) -# rbe: General RBE options shared by all flavors. -# rbe_linux: General RBE options used on all linux builds. -# rbe_win: General RBE options used on all windows builds. -# -# rbe_cpu_linux: RBE options to build with only CPU support. -# rbe_linux_cuda_nvcc_py*: RBE options to build with GPU support using nvcc. -# -# rbe_linux_py2: Linux Python 2 RBE config. -# rbe_linux_py3: Linux Python 3 RBE config -# -# rbe_win_py37: Windows Python 3.7 RBE config -# rbe_win_py38: Windows Python 3.8 RBE config -# -# tensorflow_testing_rbe_linux: RBE options to use RBE with tensorflow-testing project on linux -# tensorflow_testing_rbe_win: RBE options to use RBE with tensorflow-testing project on windows -# -# Embedded Linux options (experimental and only tested with TFLite build yet) -# elinux: General Embedded Linux options shared by all flavors. -# elinux_aarch64: Embedded Linux options for aarch64 (ARM64) CPU support. -# elinux_armhf: Embedded Linux options for armhf (ARMv7) CPU support. -# -# Release build options (for all operating systems) -# release_common: Common options for all builds on all operating systems. -# release_windows_common: Common options for all builds on Windows. -# release_gpu_common: Common options for GPU builds on Linux and Windows. -# release_cpu_linux: Toolchain and CUDA options for Linux CPU builds. -# release_cpu_macos: Toolchain and CUDA options for MacOS CPU builds. -# release_gpu_linux: Toolchain and CUDA options for Linux GPU builds. -# release_gpu_linux_cuda_10_1: Toolchain and CUDA options for CUDA 10.1 Linux GPU builds. -# release_cpu_windows: Toolchain and CUDA options for Windows CPU builds. -# release_gpu_windows: Toolchain and CUDA options for Windows GPU builds. - -# Allow builds using libc++ as a linker library -# This is mostly for OSSFuzz, so we also pass in the flags from environment to clean build file -build:libc++ --action_env=CC -build:libc++ --action_env=CXX -build:libc++ --action_env=CXXFLAGS=-stdlib=libc++ -build:libc++ --action_env=PATH -build:libc++ --define force_libcpp=enabled -build:libc++ --linkopt -fuse-ld=lld - -# Android configs. Bazel needs to have --cpu and --fat_apk_cpu both set to the -# target CPU to build transient dependencies correctly. See -# https://docs.bazel.build/versions/master/user-manual.html#flag--fat_apk_cpu -build:android --crosstool_top=//external:android/crosstool -build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain -build:android_arm --config=android -build:android_arm --cpu=armeabi-v7a -build:android_arm --fat_apk_cpu=armeabi-v7a -build:android_arm64 --config=android -build:android_arm64 --cpu=arm64-v8a -build:android_arm64 --fat_apk_cpu=arm64-v8a -build:android_x86 --config=android -build:android_x86 --cpu=x86 -build:android_x86 --fat_apk_cpu=x86 -build:android_x86_64 --config=android -build:android_x86_64 --cpu=x86_64 -build:android_x86_64 --fat_apk_cpu=x86_64 - -# Sets the default Apple platform to macOS. -build --apple_platform_type=macos - -# iOS configs for each architecture and the fat binary builds. -build:ios --apple_platform_type=ios -build:ios --apple_bitcode=embedded --copt=-fembed-bitcode -build:ios --copt=-Wno-c++11-narrowing -build:ios_armv7 --config=ios -build:ios_armv7 --cpu=ios_armv7 -build:ios_arm64 --config=ios -build:ios_arm64 --cpu=ios_arm64 -build:ios_i386 --config=ios -build:ios_i386 --cpu=ios_i386 -build:ios_x86_64 --config=ios -build:ios_x86_64 --cpu=ios_x86_64 -build:ios_fat --config=ios -build:ios_fat --ios_multi_cpus=armv7,arm64,i386,x86_64 - -# Config to use a mostly-static build and disable modular op registration -# support (this will revert to loading TensorFlow with RTLD_GLOBAL in Python). -# By default, TensorFlow will build with a dependence on -# //tensorflow:libtensorflow_framework.so. -build:monolithic --define framework_shared_object=false - -# For projects which use TensorFlow as part of a Bazel build process, putting -# nothing in a bazelrc will default to a monolithic build. The following line -# opts in to modular op registration support by default. -build --define framework_shared_object=true - -# Flags for open source build, always set to be true. -build --define open_source_build=true -test --define open_source_build=true - -# For workaround https://github.com/bazelbuild/bazel/issues/8772 with Bazel >= 0.29.1 -build --java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain -build --host_java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain - -# Please note that MKL on MacOS or windows is still not supported. -# If you would like to use a local MKL instead of downloading, please set the -# environment variable "TF_MKL_ROOT" every time before build. -build:mkl --define=build_with_mkl=true --define=enable_mkl=true -build:mkl --define=tensorflow_mkldnn_contraction_kernel=0 -build:mkl --define=build_with_openmp=true -build:mkl -c opt - -# config to build OneDNN backend with a user specified threadpool. -build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true -build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0 -build:mkl_threadpool --define=build_with_mkl_opensource=true -build:mkl_threadpool --define=build_with_mkldnn_threadpool=true -build:mkl_threadpool -c opt - -# Config setting to build with oneDNN and without the binary blob -build:mkl_opensource_only --define=build_with_mkl=true --define=enable_mkl=true -build:mkl_opensource_only --define=tensorflow_mkldnn_contraction_kernel=0 -build:mkl_opensource_only --define=build_with_mkl_opensource=true -build:mkl_opensource_only --define=build_with_openmp=true -build:mkl_opensource_only -c opt - -# Config setting to build with oneDNN for Arm. -build:mkl_aarch64 --define=build_with_mkl_aarch64=true --define=enable_mkl=true -build:mkl_aarch64 --define=tensorflow_mkldnn_contraction_kernel=0 -build:mkl_aarch64 --define=build_with_mkl_opensource=true -build:mkl_aarch64 -c opt - -# This config refers to building with CUDA available. It does not necessarily -# mean that we build CUDA op kernels. -build:using_cuda --define=using_cuda=true -build:using_cuda --action_env TF_NEED_CUDA=1 -build:using_cuda --crosstool_top=@local_config_cuda//crosstool:toolchain - -# Enable the mlir generated GPU kernels only for cuda builds. -build --define=tensorflow_enable_mlir_generated_gpu_kernels=0 -# This is a more specific option, so it takes precedence over the line above for cuda builds. -build:using_cuda --define=tensorflow_enable_mlir_generated_gpu_kernels=1 - -# This config refers to building CUDA op kernels with nvcc. -build:cuda --config=using_cuda -build:cuda --define=using_cuda_nvcc=true - -# This config refers to building CUDA op kernels with clang. -build:cuda_clang --config=using_cuda -build:cuda_clang --define=using_cuda_clang=true -build:cuda_clang --define=using_clang=true -build:cuda_clang --action_env TF_CUDA_CLANG=1 - -# dbg config, as a shorthand for '--config=opt -c dbg' -build:dbg --config=opt -c dbg -# for now, disable arm_neon. see: https://github.com/tensorflow/tensorflow/issues/33360 -build:dbg --cxxopt -DTF_LITE_DISABLE_X86_NEON -# AWS SDK must be compiled in release mode. see: https://github.com/tensorflow/tensorflow/issues/37498 -build:dbg --copt -DDEBUG_BUILD - -# Config to build TPU backend -build:tpu --define=with_tpu_support=true - -build:tensorrt --action_env TF_NEED_TENSORRT=1 - -build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain -build:rocm --define=using_rocm=true --define=using_rocm_hipcc=true -build:rocm --action_env TF_NEED_ROCM=1 - -# Options extracted from configure script -build:ngraph --define=with_ngraph_support=true -build:numa --define=with_numa_support=true - -# Options to disable default on features -build:noaws --define=no_aws_support=true -build:nogcp --define=no_gcp_support=true -build:nohdfs --define=no_hdfs_support=true -build:nonccl --define=no_nccl_support=true - -build:stackdriver_support --define=stackdriver_support=true - -build --define=use_fast_cpp_protos=true -build --define=allow_oversize_protos=true - -build --spawn_strategy=standalone -build -c opt - -# Make Bazel print out all options from rc files. -build --announce_rc - -# Other build flags. -build --define=grpc_no_ares=true - -# See https://github.com/bazelbuild/bazel/issues/7362 for information on what -# --incompatible_remove_legacy_whole_archive flag does. -# This flag is set to true in Bazel 1.0 and newer versions. We tried to migrate -# Tensorflow to the default, however test coverage wasn't enough to catch the -# errors. -# There is ongoing work on Bazel team's side to provide support for transitive -# shared libraries. As part of migrating to transitive shared libraries, we -# hope to provide a better mechanism for control over symbol exporting, and -# then tackle this issue again. -# -# TODO: Remove this line once TF doesn't depend on Bazel wrapping all library -# archives in -whole_archive -no_whole_archive. -build --noincompatible_remove_legacy_whole_archive - -# These are bazel 2.0's incompatible flags. Tensorflow needs to use bazel 2.0.0 -# to use cc_shared_library, as part of the Tensorflow Build Improvements RFC: -# https://github.com/tensorflow/community/pull/179 -build --noincompatible_prohibit_aapt1 - -# Modular TF build options -build:dynamic_kernels --define=dynamic_loaded_kernels=true -build:dynamic_kernels --copt=-DAUTOLOAD_DYNAMIC_KERNELS - -# Build TF with C++ 17 features. -build:c++17 --cxxopt=-std=c++1z -build:c++17 --cxxopt=-stdlib=libc++ -build:c++1z --config=c++17 -build:c++17_gcc --cxxopt=-std=c++1z -build:c++1z_gcc --config=c++17_gcc - -# Enable using platform specific build settings, except when cross-compiling for -# mobile platforms. -build --enable_platform_specific_config -build:android --noenable_platform_specific_config -build:ios --noenable_platform_specific_config - -# Suppress C++ compiler warnings, otherwise build logs become 10s of MBs. -build:android --copt=-w -build:ios --copt=-w -build:linux --copt=-w -build:linux --host_copt=-w -build:macos --copt=-w -build:windows --copt=/W0 - -# Tensorflow uses M_* math constants that only get defined by MSVC headers if -# _USE_MATH_DEFINES is defined. -build:windows --copt=/D_USE_MATH_DEFINES -build:windows --host_copt=/D_USE_MATH_DEFINES - -# Default paths for TF_SYSTEM_LIBS -build:linux --define=PREFIX=/usr -build:linux --define=LIBDIR=$(PREFIX)/lib -build:linux --define=INCLUDEDIR=$(PREFIX)/include -build:linux --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include -build:macos --define=PREFIX=/usr -build:macos --define=LIBDIR=$(PREFIX)/lib -build:macos --define=INCLUDEDIR=$(PREFIX)/include -build:macos --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include -# TF_SYSTEM_LIBS do not work on windows. - -# By default, build TF in C++ 14 mode. -build:android --cxxopt=-std=c++14 -build:android --host_cxxopt=-std=c++14 -build:ios --cxxopt=-std=c++14 -build:ios --host_cxxopt=-std=c++14 -build:linux --cxxopt=-std=c++14 -build:linux --host_cxxopt=-std=c++14 -build:macos --cxxopt=-std=c++14 -build:macos --host_cxxopt=-std=c++14 -build:windows --cxxopt=/std:c++14 -build:windows --host_cxxopt=/std:c++14 - -# On windows, we still link everything into a single DLL. -build:windows --config=monolithic - -# On linux, we dynamically link small amount of kernels -build:linux --config=dynamic_kernels - -# Make sure to include as little of windows.h as possible -build:windows --copt=-DWIN32_LEAN_AND_MEAN -build:windows --host_copt=-DWIN32_LEAN_AND_MEAN -build:windows --copt=-DNOGDI -build:windows --host_copt=-DNOGDI - -# MSVC (Windows): Standards-conformant preprocessor mode -# See https://docs.microsoft.com/en-us/cpp/preprocessor/preprocessor-experimental-overview -build:windows --copt=/experimental:preprocessor -build:windows --host_copt=/experimental:preprocessor - -# Misc build options we need for windows. -build:windows --linkopt=/DEBUG -build:windows --host_linkopt=/DEBUG -build:windows --linkopt=/OPT:REF -build:windows --host_linkopt=/OPT:REF -build:windows --linkopt=/OPT:ICF -build:windows --host_linkopt=/OPT:ICF -build:windows --experimental_strict_action_env=true - -# Verbose failure logs when something goes wrong -build:windows --verbose_failures - -# On windows, we never cross compile -build:windows --distinct_host_configuration=false - -# Following option reduces considerably the compilation time on Windows with VS16.4+ -build:windows --copt=/d2ReducedOptimizeHugeFunctions -build:windows --host_copt=/d2ReducedOptimizeHugeFunctions - -# Suppress all warning messages. -build:short_logs --output_filter=DONT_MATCH_ANYTHING -build:verbose_logs --output_filter= -build --config=short_logs - -# Instruction set optimizations -# TODO(gunan): Create a feature in toolchains for avx/avx2 to -# avoid having to define linux/win separately. -build:avx_linux --copt=-mavx -build:avx_linux --host_copt=-mavx -build:avx2_linux --copt=-mavx2 -build:native_arch_linux --copt=-march=native -build:avx_win --copt=/arch=AVX -build:avx2_win --copt=/arch=AVX2 - -# Options to build TensorFlow 1.x or 2.x. -build:v1 --define=tf_api_version=1 -build:v2 --define=tf_api_version=2 -build:v1 --action_env=TF2_BEHAVIOR=0 -build:v2 --action_env=TF2_BEHAVIOR=1 -build --config=v2 -test --config=v2 - -# Enable XLA -build:xla --define=with_xla_support=true - -# BEGIN TF REMOTE BUILD EXECUTION OPTIONS -# Options when using remote execution -# WARNING: THESE OPTIONS WONT WORK IF YOU DO NOT HAVE PROPER AUTHENTICATION AND PERMISSIONS - -# Flag to enable remote config -common --experimental_repo_remote_exec - -build:rbe --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 -build:rbe --google_default_credentials -build:rbe --bes_backend=buildeventservice.googleapis.com -build:rbe --bes_results_url="https://source.cloud.google.com/results/invocations" -build:rbe --bes_timeout=600s -build:rbe --define=EXECUTOR=remote -build:rbe --distinct_host_configuration=false -build:rbe --flaky_test_attempts=3 -build:rbe --jobs=200 -build:rbe --remote_executor=grpcs://remotebuildexecution.googleapis.com -build:rbe --remote_timeout=3600 -build:rbe --spawn_strategy=remote,worker,standalone,local -test:rbe --test_env=USER=anon -# Attempt to minimize the amount of data transfer between bazel and the remote -# workers: -build:rbe --remote_download_toplevel - -build:rbe_linux --config=rbe -build:rbe_linux --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin" -build:rbe_linux --host_javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 -build:rbe_linux --javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 -build:rbe_linux --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 -build:rbe_linux --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 - -# Non-rbe settings we should include because we do not run configure -build:rbe_linux --config=xla -build:rbe_linux --config=avx_linux -build:rbe_linux --config=short_logs -# TODO(gunan): Check why we need this specified in rbe, but not in other builds. -build:rbe_linux --linkopt=-lrt -build:rbe_linux --host_linkopt=-lrt -build:rbe_linux --linkopt=-lm -build:rbe_linux --host_linkopt=-lm - -build:rbe_cpu_linux --config=rbe_linux -build:rbe_cpu_linux --host_crosstool_top="//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:toolchain" -build:rbe_cpu_linux --crosstool_top="//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:toolchain" -build:rbe_cpu_linux --extra_toolchains="//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:cc-toolchain-k8" -build:rbe_cpu_linux --extra_execution_platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" -build:rbe_cpu_linux --extra_execution_platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" -build:rbe_cpu_linux --host_platform="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" -build:rbe_cpu_linux --platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" - -build:rbe_linux_cuda_base --config=rbe_linux -build:rbe_linux_cuda_base --repo_env=TF_NEED_TENSORRT=1 -build:rbe_linux_cuda_base --repo_env=TF_CUDA_VERSION=10 -build:rbe_linux_cuda_base --repo_env=TF_CUDNN_VERSION=7 -build:rbe_linux_cuda_base --repo_env=REMOTE_GPU_TESTING=1 -build:rbe_linux_cuda_base --repo_env=TF_NEED_CUDA=1 -test:rbe_linux_cuda_base --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64" - -build:rbe_linux_cuda10.1_nvcc_base --config=rbe_linux_cuda_base -build:rbe_linux_cuda10.1_nvcc_base --define=using_cuda_nvcc=true -build:rbe_linux_cuda10.1_nvcc_base --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain" -build:rbe_linux_cuda10.1_nvcc_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain" -build:rbe_linux_cuda10.1_nvcc_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain-linux-x86_64" -build:rbe_linux_cuda10.1_nvcc_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda10.1_nvcc_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda10.1_nvcc_base --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda10.1_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda" -build:rbe_linux_cuda10.1_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_tensorrt" -build:rbe_linux_cuda10.1_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_nccl" -build:rbe_linux_cuda10.1_nvcc_py2.7 --config=rbe_linux_cuda10.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python2.7" -build:rbe_linux_cuda10.1_nvcc_py3.5 --config=rbe_linux_cuda10.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.5" -build:rbe_linux_cuda10.1_nvcc_py3.6 --config=rbe_linux_cuda10.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.6" -build:rbe_linux_cuda10.1_nvcc_py3.7 --config=rbe_linux_cuda10.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.7" -build:rbe_linux_cuda10.1_nvcc_py3.8 --config=rbe_linux_cuda10.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.8" - -build:rbe_linux_cuda11.0_nvcc_base --config=rbe_linux_cuda_base -build:rbe_linux_cuda11.0_nvcc_base --define=using_cuda_nvcc=true -build:rbe_linux_cuda11.0_nvcc_base --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_cuda//crosstool:toolchain" -build:rbe_linux_cuda11.0_nvcc_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_cuda//crosstool:toolchain" -build:rbe_linux_cuda11.0_nvcc_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_cuda//crosstool:toolchain-linux-x86_64" -build:rbe_linux_cuda11.0_nvcc_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_platform//:platform" -build:rbe_linux_cuda11.0_nvcc_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_platform//:platform" -build:rbe_linux_cuda11.0_nvcc_base --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_platform//:platform" -build:rbe_linux_cuda11.0_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_cuda" -build:rbe_linux_cuda11.0_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_tensorrt" -build:rbe_linux_cuda11.0_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_nccl" -build:rbe_linux_cuda11.0_nvcc_py2.7 --config=rbe_linux_cuda11.0_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_python2.7" -build:rbe_linux_cuda11.0_nvcc_py3.5 --config=rbe_linux_cuda11.0_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_python3.5" -build:rbe_linux_cuda11.0_nvcc_py3.6 --config=rbe_linux_cuda11.0_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_python3.6" -build:rbe_linux_cuda11.0_nvcc_py3.7 --config=rbe_linux_cuda11.0_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_python3.7" -build:rbe_linux_cuda11.0_nvcc_py3.8 --config=rbe_linux_cuda11.0_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.0-cudnn8-tensorrt7.1_config_python3.8" - -# Map default to CUDA 11 for PY35 and greater. -build:rbe_linux_cuda_nvcc_py27 --config=rbe_linux_cuda10.1_nvcc_py2.7 -build:rbe_linux_cuda_nvcc_py35 --config=rbe_linux_cuda11.0_nvcc_py3.5 -build:rbe_linux_cuda_nvcc_py36 --config=rbe_linux_cuda11.0_nvcc_py3.6 -build:rbe_linux_cuda_nvcc_py37 --config=rbe_linux_cuda11.0_nvcc_py3.7 -build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda11.0_nvcc_py3.8 - -# Deprecated configs that people might still use. -build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda_nvcc_py36 -build:rbe_gpu_linux --config=rbe_linux_cuda_nvcc - -build:rbe_linux_cuda_clang_base --config=rbe_linux_cuda_base -build:rbe_linux_cuda_clang_base --crosstool_top="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain" -build:rbe_linux_cuda_clang_base --extra_toolchains="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain-linux-x86_64" -build:rbe_linux_cuda_clang_base --extra_execution_platforms="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda_clang_base --host_platform="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda_clang_base --platforms="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda_clang_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda" -build:rbe_linux_cuda_clang_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_tensorrt" -build:rbe_linux_cuda_clang_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_nccl" -build:rbe_linux_cuda_clang_base --define=using_cuda_clang=true -build:rbe_linux_cuda_clang_py27 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python2.7" -build:rbe_linux_cuda_clang_py35 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.5" -build:rbe_linux_cuda_clang_py36 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.6" -build:rbe_linux_cuda_clang_py37 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.7" -build:rbe_linux_cuda_clang_py38 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.8" - -# ROCm -build:rbe_linux_rocm_base --config=rbe_linux -build:rbe_linux_rocm_base --repo_env=TF_NEED_ROCM=1 -build:rbe_linux_rocm_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain" -build:rbe_linux_rocm_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain-linux-x86_64" -build:rbe_linux_rocm_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" -build:rbe_linux_rocm_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" -build:rbe_linux_rocm_base --platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" -build:rbe_linux_rocm_base --action_env=TF_ROCM_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm" -build:rbe_linux_rocm_base --define=using_rocm_hipcc=true -build:rbe_linux_rocm_py2.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python2.7" -build:rbe_linux_rocm_py3.5 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.5" -build:rbe_linux_rocm_py3.6 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.6" -build:rbe_linux_rocm_py3.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.7" -build:rbe_linux_rocm_py3.8 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.8" - -# Linux CPU -build:rbe_linux_py2 --config=rbe_linux -build:rbe_linux_py2 --repo_env=PYTHON_BIN_PATH="/usr/bin/python2" -build:rbe_linux_py2 --python_path="/usr/bin/python2" -build:rbe_linux_py2 --repo_env=TF_PYTHON_CONFIG_REPO="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/py" - -build:rbe_linux_py3 --config=rbe_linux -build:rbe_linux_py3 --python_path="/usr/bin/python3" -build:rbe_linux_py3 --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-manylinux2010-py3_config_python" - -build:rbe_win --config=rbe -build:rbe_win --crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/win/tf_win_08062020:toolchain" -build:rbe_win --extra_toolchains="@org_tensorflow//third_party/toolchains/preconfig/win/tf_win_08062020:cc-toolchain-x64_windows" -build:rbe_win --host_javabase="@org_tensorflow//third_party/toolchains/preconfig/win:windows_jdk8" -build:rbe_win --javabase="@org_tensorflow//third_party/toolchains/preconfig/win:windows_jdk8" -build:rbe_win --extra_execution_platforms="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" -build:rbe_win --host_platform="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" -build:rbe_win --platforms="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" -build:rbe_win --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe - -# TODO(gunan): Remove once we use MSVC 2019 with latest patches. -build:rbe_win --define=override_eigen_strong_inline=true -build:rbe_win --jobs=100 - -build:rbe_win_py37 --config=rbe -build:rbe_win_py37 --repo_env=TF_PYTHON_CONFIG_REPO="@windows_py37_config_python" -build:rbe_win_py37 --python_path=C:\\Python37\\python.exe - -build:rbe_win_py38 --config=rbe -build:rbe_win_py38 --repo_env=PYTHON_BIN_PATH=C:\\Python38\\python.exe -build:rbe_win_py38 --repo_env=PYTHON_LIB_PATH=C:\\Python38\\lib\\site-packages -build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=@org_tensorflow//third_party/toolchains/preconfig/win_1803/py38 -build:rbe_win_py38 --python_path=C:\\Python38\\python.exe - -# These you may need to change for your own GCP project. -build:tensorflow_testing_rbe --project_id=tensorflow-testing -common:tensorflow_testing_rbe_linux --remote_instance_name=projects/tensorflow-testing/instances/default_instance -build:tensorflow_testing_rbe_linux --config=tensorflow_testing_rbe -build:tensorflow_testing_rbe_linux --config=rbe -build:tensorflow_testing_rbe_linux --config=rbe_linux - -common:tensorflow_testing_rbe_win --remote_instance_name=projects/tensorflow-testing/instances/windows -build:tensorflow_testing_rbe_win --config=tensorflow_testing_rbe - -# TFLite build configs for generic embedded Linux -build:elinux --crosstool_top=@local_config_embedded_arm//:toolchain -build:elinux --host_crosstool_top=@bazel_tools//tools/cpp:toolchain -build:elinux_aarch64 --config=elinux -build:elinux_aarch64 --cpu=aarch64 -build:elinux_armhf --config=elinux -build:elinux_armhf --cpu=armhf -# END TF REMOTE BUILD EXECUTION OPTIONS - -# Default options should come above this line - -# Options from ./configure -try-import %workspace%/.tf_configure.bazelrc - -# Put user-specific options in .bazelrc.user -try-import %workspace%/.bazelrc.user - -# Here are bazelrc configs for release builds -build:release_common --config=opt -build:release_common --config=v2 -build:release_common --distinct_host_configuration=false -build:release_common --action_env TF_CONFIGURE_IOS="0" - -build:release_cpu_linux --config=release_common -build:release_cpu_linux --config=avx_linux -# We use the same toolchain for CPU/GPU packages. -# Did not add this to the defaults in case this changes. -build:release_cpu_linux --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain - -build:release_cpu_macos --config=release_common -build:release_cpu_macos --config=avx_linux - -build:release_gpu_common --config=release_common -build:release_gpu_common --config=cuda -build:release_gpu_common --config=tensorrt -build:release_gpu_common --action_env CUDA_TOOLKIT_PATH="/usr/local/cuda-11.0" -build:release_gpu_common --action_env=TF_CUDA_VERSION="11" -build:release_gpu_common --action_env=TF_CUDNN_VERSION="8" -build:release_gpu_common --action_env=TF_NEED_TENSORRT="1" -build:release_gpu_common --action_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80" -build:release_gpu_common --action_env=TENSORRT_INSTALL_PATH="/usr/local/tensorrt" -build:release_gpu_common --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/tensorrt/lib" -build:release_gpu_common --action_env=GCC_HOST_COMPILER_PATH="/usr/bin/gcc-5" - - -build:release_gpu_linux --config=release_gpu_common -build:release_gpu_linux --config=avx_linux -build:release_gpu_linux --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda11:toolchain -build:release_windows_common --config=release_common -build:release_windows_common --define=no_tensorflow_py_deps=true -build:release_windows_common --announce_rc - -build:release_cpu_windows --config=release_windows_common - -build:release_gpu_windows --config=release_windows_common - -build:release_gpu_linux_cuda_10_1 --config=release_gpu_linux -build:release_gpu_linux_cuda_10_1 --action_env CUDA_TOOLKIT_PATH="/usr/local/cuda-10.1" -build:release_gpu_linux_cuda_10_1 --action_env=TF_CUDA_VERSION="10" -build:release_gpu_linux_cuda_10_1 --action_env=TF_CUDNN_VERSION="7" - -# Address sanitizer -# CC=clang bazel build --config asan -build:asan --strip=never -build:asan --copt -fsanitize=address -build:asan --copt -DADDRESS_SANITIZER -build:asan --copt -g -build:asan --copt -O3 -build:asan --copt -fno-omit-frame-pointer -build:asan --linkopt -fsanitize=address - -# Memory sanitizer -# CC=clang bazel build --config msan -build:msan --strip=never -build:msan --copt -fsanitize=memory -build:msan --copt -DADDRESS_SANITIZER -build:msan --copt -g -build:msan --copt -O3 -build:msan --copt -fno-omit-frame-pointer -build:msan --linkopt -fsanitize=memory - -# Undefined Behavior Sanitizer -# CC=clang bazel build --config ubsan -build:ubsan --strip=never -build:ubsan --copt -fsanitize=undefined -build:ubsan --copt -g -build:ubsan --copt -O3 -build:ubsan --copt -fno-omit-frame-pointer -build:ubsan --linkopt -fsanitize=undefined -build:ubsan --linkopt -lubsan +build --remote_cache=https://storage.googleapis.com/tensorflow-sigs-jvm +build --remote_upload_local_results=false \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-api/build.sh b/tensorflow-core/tensorflow-core-api/build.sh index fdddaafa18b..b5d25277ea0 100755 --- a/tensorflow-core/tensorflow-core-api/build.sh +++ b/tensorflow-core/tensorflow-core-api/build.sh @@ -33,7 +33,7 @@ BUILD_FLAGS="$BUILD_FLAGS --experimental_repo_remote_exec --python_path="$PYTHON BUILD_FLAGS="$BUILD_FLAGS --distinct_host_configuration=true" # Build C/C++ API of TensorFlow itself including a target to generate ops for Java -bazel build $BUILD_FLAGS ${BUILD_USER_FLAGS:-} \ +bazel --bazelrc=tensorflow.bazelrc build $BUILD_FLAGS ${BUILD_USER_FLAGS:-} \ @org_tensorflow//tensorflow:tensorflow_cc \ @org_tensorflow//tensorflow/tools/lib_package:jnilicenses_generate \ :java_proto_gen_sources \ diff --git a/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc b/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc new file mode 100644 index 00000000000..7724d2a6e69 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc @@ -0,0 +1,625 @@ +# TensorFlow Bazel configuration file. +# This file tries to group and simplify build options for TensorFlow +# +# ----CONFIG OPTIONS---- +# Android options: +# android: +# android_arm: +# android_arm64: +# android_x86: +# android_x86_64: +# +# iOS options: +# ios: +# ios_armv7: +# ios_arm64: +# ios_i386: +# ios_x86_64: +# ios_fat: +# +# Macosx options +# darwin_arm64: +# +# Compiler options: +# cuda_clang: Use clang when building CUDA code. +# c++17: Build with C++17 options (links with libc++) +# c++1z: Build with C++17 options (links with libc++) +# c++17_gcc: Build with C++17 options (links with stdlibc++) +# c++1z_gcc: Build with C++17 options (links with stdlibc++) +# avx_linux: Build with avx instruction set on linux. +# avx2_linux: Build with avx2 instruction set on linux. +# native_arch_linux: Build with instruction sets available to the host machine on linux +# avx_win: Build with avx instruction set on windows +# avx2_win: Build with avx2 instruction set on windows +# +# Other build options: +# short_logs: Only log errors during build, skip warnings. +# verbose_logs: Show all compiler warnings during build. +# monolithic: Build all TF C++ code into a single shared object. +# dynamic_kernels: Try to link all kernels dynamically (experimental). +# libc++: Link against libc++ instead of stdlibc++ +# asan: Build with the clang address sanitizer +# msan: Build with the clang memory sanitizer +# ubsan: Build with the clang undefined behavior sanitizer +# dbg: Build with debug info +# +# +# TF version options; +# v1: Build TF V1 (without contrib) +# v2: Build TF v2 +# +# Feature and Third party library support options: +# xla: Build TF with XLA +# tpu: Build TF with TPU support +# cuda: Build with full cuda support. +# rocm: Build with AMD GPU support (rocm). +# mkl: Enable full mkl support. +# tensorrt: Enable Tensorrt support. +# numa: Enable numa using hwloc. +# noaws: Disable AWS S3 storage support +# nogcp: Disable GCS support. +# nohdfs: Disable hadoop hdfs support. +# nonccl: Disable nccl support. +# +# +# Remote build execution options (only configured to work with TF team projects for now.) +# rbe: General RBE options shared by all flavors. +# rbe_linux: General RBE options used on all linux builds. +# rbe_win: General RBE options used on all windows builds. +# +# rbe_cpu_linux: RBE options to build with only CPU support. +# rbe_linux_cuda_nvcc_py*: RBE options to build with GPU support using nvcc. +# +# rbe_linux_py2: Linux Python 2 RBE config. +# rbe_linux_py3: Linux Python 3 RBE config +# +# rbe_win_py37: Windows Python 3.7 RBE config +# rbe_win_py38: Windows Python 3.8 RBE config +# +# tensorflow_testing_rbe_linux: RBE options to use RBE with tensorflow-testing project on linux +# tensorflow_testing_rbe_win: RBE options to use RBE with tensorflow-testing project on windows +# +# Embedded Linux options (experimental and only tested with TFLite build yet) +# elinux: General Embedded Linux options shared by all flavors. +# elinux_aarch64: Embedded Linux options for aarch64 (ARM64) CPU support. +# elinux_armhf: Embedded Linux options for armhf (ARMv7) CPU support. +# +# Release build options (for all operating systems) +# release_base: Common options for all builds on all operating systems. +# release_gpu_base: Common options for GPU builds on Linux and Windows. +# release_cpu_linux: Toolchain and CUDA options for Linux CPU builds. +# release_cpu_macos: Toolchain and CUDA options for MacOS CPU builds. +# release_gpu_linux: Toolchain and CUDA options for Linux GPU builds. +# release_cpu_windows: Toolchain and CUDA options for Windows CPU builds. +# release_gpu_windows: Toolchain and CUDA options for Windows GPU builds. + +# Default build options. These are applied first and unconditionally. + +# For projects which use TensorFlow as part of a Bazel build process, putting +# nothing in a bazelrc will default to a monolithic build. The following line +# opts in to modular op registration support by default. +build --define framework_shared_object=true + +# For workaround https://github.com/bazelbuild/bazel/issues/8772 with Bazel >= 0.29.1 +build --java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain +build --host_java_toolchain=@tf_toolchains//toolchains/java:tf_java_toolchain + +build --define=use_fast_cpp_protos=true +build --define=allow_oversize_protos=true + +build --spawn_strategy=standalone +build -c opt + +# Make Bazel print out all options from rc files. +build --announce_rc + +build --define=grpc_no_ares=true + +# See https://github.com/bazelbuild/bazel/issues/7362 for information on what +# --incompatible_remove_legacy_whole_archive flag does. +# This flag is set to true in Bazel 1.0 and newer versions. We tried to migrate +# Tensorflow to the default, however test coverage wasn't enough to catch the +# errors. +# There is ongoing work on Bazel team's side to provide support for transitive +# shared libraries. As part of migrating to transitive shared libraries, we +# hope to provide a better mechanism for control over symbol exporting, and +# then tackle this issue again. +# +# TODO: Remove this line once TF doesn't depend on Bazel wrapping all library +# archives in -whole_archive -no_whole_archive. +build --noincompatible_remove_legacy_whole_archive + +build --enable_platform_specific_config + +# Enable XLA support by default. +build --define=with_xla_support=true + +build --config=short_logs + +build --config=v2 + +# Disable AWS/HDFS support by default +build --define=no_aws_support=true +build --define=no_hdfs_support=true + +# Default options should come above this line. + +# Allow builds using libc++ as a linker library +# This is mostly for OSSFuzz, so we also pass in the flags from environment to clean build file +build:libc++ --action_env=CC +build:libc++ --action_env=CXX +build:libc++ --action_env=CXXFLAGS=-stdlib=libc++ +build:libc++ --action_env=PATH +build:libc++ --define force_libcpp=enabled +build:libc++ --linkopt -fuse-ld=lld + +# Android configs. Bazel needs to have --cpu and --fat_apk_cpu both set to the +# target CPU to build transient dependencies correctly. See +# https://docs.bazel.build/versions/master/user-manual.html#flag--fat_apk_cpu +build:android --crosstool_top=//external:android/crosstool +build:android --host_crosstool_top=@bazel_tools//tools/cpp:toolchain +build:android_arm --config=android +build:android_arm --cpu=armeabi-v7a +build:android_arm --fat_apk_cpu=armeabi-v7a +build:android_arm64 --config=android +build:android_arm64 --cpu=arm64-v8a +build:android_arm64 --fat_apk_cpu=arm64-v8a +build:android_x86 --config=android +build:android_x86 --cpu=x86 +build:android_x86 --fat_apk_cpu=x86 +build:android_x86_64 --config=android +build:android_x86_64 --cpu=x86_64 +build:android_x86_64 --fat_apk_cpu=x86_64 + +# Sets the default Apple platform to macOS. +build:macos --apple_platform_type=macos + +# gRPC on MacOS requires this #define +build:macos --copt=-DGRPC_BAZEL_BUILD + +# Settings for MacOS on ARM CPUs. +build:macos_arm64 --cpu=darwin_arm64 + +# iOS configs for each architecture and the fat binary builds. +build:ios --apple_platform_type=ios +build:ios --apple_bitcode=embedded --copt=-fembed-bitcode +build:ios --copt=-Wno-c++11-narrowing +build:ios_armv7 --config=ios +build:ios_armv7 --cpu=ios_armv7 +build:ios_arm64 --config=ios +build:ios_arm64 --cpu=ios_arm64 +build:ios_i386 --config=ios +build:ios_i386 --cpu=ios_i386 +build:ios_x86_64 --config=ios +build:ios_x86_64 --cpu=ios_x86_64 +build:ios_fat --config=ios +build:ios_fat --ios_multi_cpus=armv7,arm64,i386,x86_64 + +# Config to use a mostly-static build and disable modular op registration +# support (this will revert to loading TensorFlow with RTLD_GLOBAL in Python). +# By default, TensorFlow will build with a dependence on +# //tensorflow:libtensorflow_framework.so. +build:monolithic --define framework_shared_object=false + +# Please note that MKL on MacOS or windows is still not supported. +# If you would like to use a local MKL instead of downloading, please set the +# environment variable "TF_MKL_ROOT" every time before build. +build:mkl --define=build_with_mkl=true --define=enable_mkl=true +build:mkl --define=tensorflow_mkldnn_contraction_kernel=0 +build:mkl --define=build_with_openmp=true +build:mkl -c opt + +# config to build OneDNN backend with a user specified threadpool. +build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true +build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0 +build:mkl_threadpool --define=build_with_mkl_opensource=true +build:mkl_threadpool -c opt + +# Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL). +# This build is for the inference regime only. +build:mkl_aarch64 --define=build_with_mkl_aarch64=true --define=enable_mkl=true +build:mkl_aarch64 --define=tensorflow_mkldnn_contraction_kernel=0 +build:mkl_aarch64 --define=build_with_mkl_opensource=true +build:mkl_aarch64 --define=build_with_openmp=true +build:mkl_aarch64 -c opt + +# This config refers to building CUDA op kernels with nvcc. +build:cuda --repo_env TF_NEED_CUDA=1 +build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain +build:cuda --@local_config_cuda//:enable_cuda + +# This config refers to building CUDA op kernels with clang. +build:cuda_clang --config=cuda +build:cuda_clang --repo_env TF_CUDA_CLANG=1 +build:cuda_clang --@local_config_cuda//:cuda_compiler=clang + +# Debug config +build:dbg -c dbg +# Only include debug info for files under tensorflow/, excluding kernels, to +# reduce the size of the debug info in the binary. This is because if the debug +# sections in the ELF binary are too large, errors can occur. See +# https://github.com/tensorflow/tensorflow/issues/48919. +# Users can still include debug info for a specific kernel, e.g. with: +# --config=dbg --per_file_copt=+tensorflow/core/kernels/identity_op.*@-g +build:dbg --per_file_copt=+.*,-tensorflow.*@-g0 +build:dbg --per_file_copt=+tensorflow/core/kernels.*@-g0 +# for now, disable arm_neon. see: https://github.com/tensorflow/tensorflow/issues/33360 +build:dbg --cxxopt -DTF_LITE_DISABLE_X86_NEON +# AWS SDK must be compiled in release mode. see: https://github.com/tensorflow/tensorflow/issues/37498 +build:dbg --copt -DDEBUG_BUILD + +# Config to build TPU backend +build:tpu --define=with_tpu_support=true + +build:tensorrt --repo_env TF_NEED_TENSORRT=1 + +build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain +build:rocm --define=using_rocm=true --define=using_rocm_hipcc=true +build:rocm --repo_env TF_NEED_ROCM=1 + +# Options extracted from configure script +build:numa --define=with_numa_support=true + +# Options to disable default on features +build:noaws --define=no_aws_support=true +build:nogcp --define=no_gcp_support=true +build:nohdfs --define=no_hdfs_support=true +build:nonccl --define=no_nccl_support=true + +build:stackdriver_support --define=stackdriver_support=true + +# Modular TF build options +build:dynamic_kernels --define=dynamic_loaded_kernels=true +build:dynamic_kernels --copt=-DAUTOLOAD_DYNAMIC_KERNELS + +# Build TF with C++ 17 features. +build:c++17 --cxxopt=-std=c++1z +build:c++17 --cxxopt=-stdlib=libc++ +build:c++1z --config=c++17 +build:c++17_gcc --cxxopt=-std=c++1z +build:c++1z_gcc --config=c++17_gcc + +# Don't trigger --config= when cross-compiling. +build:android --noenable_platform_specific_config +build:ios --noenable_platform_specific_config + +# Suppress C++ compiler warnings, otherwise build logs become 10s of MBs. +build:android --copt=-w +build:ios --copt=-w +build:linux --copt=-w +build:linux --host_copt=-w +build:macos --copt=-w +build:windows --copt=/W0 + +# Tensorflow uses M_* math constants that only get defined by MSVC headers if +# _USE_MATH_DEFINES is defined. +build:windows --copt=/D_USE_MATH_DEFINES +build:windows --host_copt=/D_USE_MATH_DEFINES + +# Default paths for TF_SYSTEM_LIBS +build:linux --define=PREFIX=/usr +build:linux --define=LIBDIR=$(PREFIX)/lib +build:linux --define=INCLUDEDIR=$(PREFIX)/include +build:linux --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include +build:macos --define=PREFIX=/usr +build:macos --define=LIBDIR=$(PREFIX)/lib +build:macos --define=INCLUDEDIR=$(PREFIX)/include +build:macos --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include +# TF_SYSTEM_LIBS do not work on windows. + +# By default, build TF in C++ 14 mode. +build:android --cxxopt=-std=c++14 +build:android --host_cxxopt=-std=c++14 +build:ios --cxxopt=-std=c++14 +build:ios --host_cxxopt=-std=c++14 +build:linux --cxxopt=-std=c++14 +build:linux --host_cxxopt=-std=c++14 +build:macos --cxxopt=-std=c++14 +build:macos --host_cxxopt=-std=c++14 +build:windows --cxxopt=/std:c++14 +build:windows --host_cxxopt=/std:c++14 + +# On windows, we still link everything into a single DLL. +build:windows --config=monolithic + +# On linux, we dynamically link small amount of kernels +build:linux --config=dynamic_kernels + +# Make sure to include as little of windows.h as possible +build:windows --copt=-DWIN32_LEAN_AND_MEAN +build:windows --host_copt=-DWIN32_LEAN_AND_MEAN +build:windows --copt=-DNOGDI +build:windows --host_copt=-DNOGDI + +# MSVC (Windows): Standards-conformant preprocessor mode +# See https://docs.microsoft.com/en-us/cpp/preprocessor/preprocessor-experimental-overview +build:windows --copt=/experimental:preprocessor +build:windows --host_copt=/experimental:preprocessor + +# Misc build options we need for windows. +build:windows --linkopt=/DEBUG +build:windows --host_linkopt=/DEBUG +build:windows --linkopt=/OPT:REF +build:windows --host_linkopt=/OPT:REF +build:windows --linkopt=/OPT:ICF +build:windows --host_linkopt=/OPT:ICF + +# Verbose failure logs when something goes wrong +build:windows --verbose_failures + +# On windows, we never cross compile +build:windows --distinct_host_configuration=false +# On linux, don't cross compile by default +build:linux --distinct_host_configuration=false + +# Configure short or long logs +build:short_logs --output_filter=DONT_MATCH_ANYTHING +build:verbose_logs --output_filter= + +# Instruction set optimizations +# TODO(gunan): Create a feature in toolchains for avx/avx2 to +# avoid having to define linux/win separately. +build:avx_linux --copt=-mavx +build:avx_linux --host_copt=-mavx +build:avx2_linux --copt=-mavx2 +build:native_arch_linux --copt=-march=native +build:avx_win --copt=/arch=AVX +build:avx2_win --copt=/arch=AVX2 + +# Options to build TensorFlow 1.x or 2.x. +build:v1 --define=tf_api_version=1 --action_env=TF2_BEHAVIOR=0 +build:v2 --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1 + +# Disable XLA on mobile. +build:xla --define=with_xla_supprt=true # TODO: remove, it's on by default. +build:android --define=with_xla_support=false +build:ios --define=with_xla_support=false + +# BEGIN TF REMOTE BUILD EXECUTION OPTIONS +# Options when using remote execution +# WARNING: THESE OPTIONS WONT WORK IF YOU DO NOT HAVE PROPER AUTHENTICATION AND PERMISSIONS + +# Flag to enable remote config +common --experimental_repo_remote_exec + +build:rbe --repo_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 +build:rbe --google_default_credentials +build:rbe --bes_backend=buildeventservice.googleapis.com +build:rbe --bes_results_url="https://source.cloud.google.com/results/invocations" +build:rbe --bes_timeout=600s +build:rbe --define=EXECUTOR=remote +build:rbe --distinct_host_configuration=false +build:rbe --flaky_test_attempts=3 +build:rbe --jobs=200 +build:rbe --remote_executor=grpcs://remotebuildexecution.googleapis.com +build:rbe --remote_timeout=3600 +build:rbe --spawn_strategy=remote,worker,standalone,local +test:rbe --test_env=USER=anon +# Attempt to minimize the amount of data transfer between bazel and the remote +# workers: +build:rbe --remote_download_toplevel + +build:rbe_linux --config=rbe +build:rbe_linux --action_env=PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin" +build:rbe_linux --host_javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 +build:rbe_linux --javabase=@bazel_toolchains//configs/ubuntu16_04_clang/1.1:jdk8 +build:rbe_linux --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 +build:rbe_linux --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8 + +# Non-rbe settings we should include because we do not run configure +build:rbe_linux --config=avx_linux +# TODO(gunan): Check why we need this specified in rbe, but not in other builds. +build:rbe_linux --linkopt=-lrt +build:rbe_linux --host_linkopt=-lrt +build:rbe_linux --linkopt=-lm +build:rbe_linux --host_linkopt=-lm + +build:rbe_cpu_linux --config=rbe_linux +build:rbe_cpu_linux --host_crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:toolchain" +build:rbe_cpu_linux --crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:toolchain" +build:rbe_cpu_linux --extra_toolchains="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010:cc-toolchain-k8" +build:rbe_cpu_linux --extra_execution_platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" +build:rbe_cpu_linux --extra_execution_platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" +build:rbe_cpu_linux --host_platform="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" +build:rbe_cpu_linux --platforms="@ubuntu16.04-manylinux2010-py3_config_platform//:platform" + +build:rbe_linux_cuda_base --config=rbe_linux +build:rbe_linux_cuda_base --config=cuda +build:rbe_linux_cuda_base --config=tensorrt +build:rbe_linux_cuda_base --action_env=TF_CUDA_VERSION=11 +build:rbe_linux_cuda_base --action_env=TF_CUDNN_VERSION=8 +build:rbe_linux_cuda_base --repo_env=REMOTE_GPU_TESTING=1 +test:rbe_linux_cuda_base --test_env=LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64" + +build:rbe_linux_cuda11.2_nvcc_base --config=rbe_linux_cuda_base +build:rbe_linux_cuda11.2_nvcc_base --host_crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" +build:rbe_linux_cuda11.2_nvcc_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" +build:rbe_linux_cuda11.2_nvcc_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64" +build:rbe_linux_cuda11.2_nvcc_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_linux_cuda11.2_nvcc_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_linux_cuda11.2_nvcc_base --platforms="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda" +build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt" +build:rbe_linux_cuda11.2_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl" +build:rbe_linux_cuda11.2_nvcc_py3.6 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6" +build:rbe_linux_cuda11.2_nvcc_py3.7 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7" +build:rbe_linux_cuda11.2_nvcc_py3.8 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8" +build:rbe_linux_cuda11.2_nvcc_py3.9 --config=rbe_linux_cuda11.2_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.9" + +# Map default to CUDA 11.2. +build:rbe_linux_cuda_nvcc_py36 --config=rbe_linux_cuda11.2_nvcc_py3.6 +build:rbe_linux_cuda_nvcc_py37 --config=rbe_linux_cuda11.2_nvcc_py3.7 +build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda11.2_nvcc_py3.8 +build:rbe_linux_cuda_nvcc_py39 --config=rbe_linux_cuda11.2_nvcc_py3.9 + +# Deprecated configs that people might still use. +build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda_nvcc_py36 +build:rbe_gpu_linux --config=rbe_linux_cuda_nvcc + +build:rbe_linux_cuda_clang_base --config=rbe_linux_cuda_base +build:rbe_linux_cuda_clang_base --repo_env TF_CUDA_CLANG=1 +build:rbe_linux_cuda_clang_base --@local_config_cuda//:cuda_compiler=clang +build:rbe_linux_cuda_clang_base --crosstool_top="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" +build:rbe_linux_cuda_clang_base --extra_toolchains="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain-linux-x86_64" +build:rbe_linux_cuda_clang_base --extra_execution_platforms="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_linux_cuda_clang_base --host_platform="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_linux_cuda_clang_base --platforms="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_platform//:platform" +build:rbe_linux_cuda_clang_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda" +build:rbe_linux_cuda_clang_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_tensorrt" +build:rbe_linux_cuda_clang_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_nccl" +build:rbe_linux_cuda_clang_py27 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python2.7" +build:rbe_linux_cuda_clang_py35 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.5" +build:rbe_linux_cuda_clang_py36 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.6" +build:rbe_linux_cuda_clang_py37 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.7" +build:rbe_linux_cuda_clang_py38 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-clang_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_python3.8" + +# ROCm +build:rbe_linux_rocm_base --config=rbe_linux +build:rbe_linux_rocm_base --repo_env=TF_NEED_ROCM=1 +build:rbe_linux_rocm_base --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain" +build:rbe_linux_rocm_base --extra_toolchains="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm//crosstool:toolchain-linux-x86_64" +build:rbe_linux_rocm_base --extra_execution_platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" +build:rbe_linux_rocm_base --host_platform="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" +build:rbe_linux_rocm_base --platforms="@ubuntu18.04-gcc7_manylinux2010-rocm_config_platform//:platform" +build:rbe_linux_rocm_base --action_env=TF_ROCM_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_rocm" +build:rbe_linux_rocm_base --define=using_rocm_hipcc=true +build:rbe_linux_rocm_py2.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python2.7" +build:rbe_linux_rocm_py3.5 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.5" +build:rbe_linux_rocm_py3.6 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.6" +build:rbe_linux_rocm_py3.7 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.7" +build:rbe_linux_rocm_py3.8 --config=rbe_linux_rocm_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu18.04-gcc7_manylinux2010-rocm_config_python3.8" + +# Linux CPU +build:rbe_linux_py2 --config=rbe_linux +build:rbe_linux_py2 --repo_env=PYTHON_BIN_PATH="/usr/bin/python2" +build:rbe_linux_py2 --python_path="/usr/bin/python2" +build:rbe_linux_py2 --repo_env=TF_PYTHON_CONFIG_REPO="@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/py" + +build:rbe_linux_py3 --config=rbe_linux +build:rbe_linux_py3 --python_path="/usr/bin/python3" +build:rbe_linux_py3 --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-manylinux2010-py3_config_python" + +build:rbe_win --config=rbe +build:rbe_win --crosstool_top="@org_tensorflow//third_party/toolchains/preconfig/win/tf_win_06242021:toolchain" +build:rbe_win --extra_toolchains="@org_tensorflow//third_party/toolchains/preconfig/win/tf_win_06242021:cc-toolchain-x64_windows" +build:rbe_win --host_javabase="@org_tensorflow//third_party/toolchains/preconfig/win:windows_jdk8" +build:rbe_win --javabase="@org_tensorflow//third_party/toolchains/preconfig/win:windows_jdk8" +build:rbe_win --extra_execution_platforms="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" +build:rbe_win --host_platform="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" +build:rbe_win --platforms="@org_tensorflow//third_party/toolchains/preconfig/win:rbe_windows_ltsc2019" +build:rbe_win --shell_executable=C:\\tools\\msys64\\usr\\bin\\bash.exe +build:rbe_win --experimental_strict_action_env=true + +# TODO(gunan): Remove once we use MSVC 2019 with latest patches. +build:rbe_win --define=override_eigen_strong_inline=true +build:rbe_win --jobs=100 + +# Don't build the python zip archive in the RBE build. +build:rbe_win --remote_download_minimal +build:rbe_win --enable_runfiles +build:rbe_win --nobuild_python_zip + +build:rbe_win_py37 --config=rbe +build:rbe_win_py37 --repo_env=TF_PYTHON_CONFIG_REPO="@windows_py37_config_python" +build:rbe_win_py37 --python_path=C:\\Python37\\python.exe + +build:rbe_win_py38 --config=rbe +build:rbe_win_py38 --repo_env=PYTHON_BIN_PATH=C:\\Python38\\python.exe +build:rbe_win_py38 --repo_env=PYTHON_LIB_PATH=C:\\Python38\\lib\\site-packages +build:rbe_win_py38 --repo_env=TF_PYTHON_CONFIG_REPO=@org_tensorflow//third_party/toolchains/preconfig/win_1803/py38 +build:rbe_win_py38 --python_path=C:\\Python38\\python.exe + +# These you may need to change for your own GCP project. +build:tensorflow_testing_rbe --project_id=tensorflow-testing +common:tensorflow_testing_rbe_linux --remote_instance_name=projects/tensorflow-testing/instances/default_instance +build:tensorflow_testing_rbe_linux --config=tensorflow_testing_rbe + +common:tensorflow_testing_rbe_win --remote_instance_name=projects/tensorflow-testing/instances/windows +build:tensorflow_testing_rbe_win --config=tensorflow_testing_rbe + +# TFLite build configs for generic embedded Linux +build:elinux --crosstool_top=@local_config_embedded_arm//:toolchain +build:elinux --host_crosstool_top=@bazel_tools//tools/cpp:toolchain +build:elinux_aarch64 --config=elinux +build:elinux_aarch64 --cpu=aarch64 +build:elinux_aarch64 --distinct_host_configuration=true +build:elinux_armhf --config=elinux +build:elinux_armhf --cpu=armhf +build:elinux_armhf --distinct_host_configuration=true +# END TF REMOTE BUILD EXECUTION OPTIONS + +# Config-specific options should come above this line. + +# Load rc file written by ./configure. +try-import %workspace%/.tf_configure.bazelrc + +# Load rc file with user-specific options. +try-import %workspace%/.bazelrc.user + +# Here are bazelrc configs for release builds +build:release_base --config=v2 +build:release_base --distinct_host_configuration=false +test:release_base --flaky_test_attempts=3 +test:release_base --test_size_filters=small,medium + +build:release_cpu_linux --config=release_base +build:release_cpu_linux --config=avx_linux +build:release_cpu_linux --crosstool_top=@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda11.2:toolchain +test:release_cpu_linux --test_env=LD_LIBRARY_PATH + +build:release_cpu_macos --config=release_base +build:release_cpu_macos --config=avx_linux + +build:release_gpu_base --config=cuda +build:release_gpu_base --action_env=TF_CUDA_VERSION="11" +build:release_gpu_base --action_env=TF_CUDNN_VERSION="8" +build:release_gpu_base --repo_env=TF_CUDA_COMPUTE_CAPABILITIES="sm_35,sm_50,sm_60,sm_70,sm_75,compute_80" + +build:release_gpu_linux --config=release_cpu_linux +build:release_gpu_linux --config=release_gpu_base +build:release_gpu_linux --config=tensorrt +build:release_gpu_linux --action_env=CUDA_TOOLKIT_PATH="/usr/local/cuda-11.2" +build:release_gpu_linux --action_env=LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/tensorrt/lib" +build:release_gpu_linux --action_env=GCC_HOST_COMPILER_PATH="/usr/bin/gcc-5" +build:release_gpu_linux --crosstool_top=@org_tensorflow//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda11.2:toolchain + +build:release_cpu_windows --config=release_base +build:release_cpu_windows --config=avx_win +build:release_cpu_windows --define=no_tensorflow_py_deps=true +# First available in VS 16.4. Speeds Windows compile times by a lot. See +# https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion +build:release_cpu_windows --copt=/d2ReducedOptimizeHugeFunctions --host_copt=/d2ReducedOptimizeHugeFunctions + +build:release_gpu_windows --config=release_cpu_windows +build:release_gpu_windows --config=release_gpu_base + +# Address sanitizer +# CC=clang bazel build --config asan +build:asan --strip=never +build:asan --copt -fsanitize=address +build:asan --copt -DADDRESS_SANITIZER +build:asan --copt -g +build:asan --copt -O3 +build:asan --copt -fno-omit-frame-pointer +build:asan --linkopt -fsanitize=address + +# Memory sanitizer +# CC=clang bazel build --config msan +build:msan --strip=never +build:msan --copt -fsanitize=memory +build:msan --copt -DMEMORY_SANITIZER +build:msan --copt -g +build:msan --copt -O3 +build:msan --copt -fno-omit-frame-pointer +build:msan --linkopt -fsanitize=memory + +# Undefined Behavior Sanitizer +# CC=clang bazel build --config ubsan +build:ubsan --strip=never +build:ubsan --copt -fsanitize=undefined +build:ubsan --copt -DUNDEFINED_BEHAVIOR_SANITIZER +build:ubsan --copt -g +build:ubsan --copt -O3 +build:ubsan --copt -fno-omit-frame-pointer +build:ubsan --linkopt -fsanitize=undefined +build:ubsan --linkopt -lubsan