Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
176 changes: 0 additions & 176 deletions .bazelrc

This file was deleted.

1 change: 0 additions & 1 deletion .bazelversion

This file was deleted.

5 changes: 1 addition & 4 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ torch_xla/csrc/generated/
# Below files are not deleted by "setup.py clean".

# Visual Studio Code files
.vscode
.vs

# Files autogenerated by docs/docs_build.sh
Expand All @@ -26,7 +27,3 @@ torch_xla/csrc/generated/

# Local terraform state
.terraform*


# Build system temporary files
/bazel-*
5 changes: 5 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[submodule "third_party/tensorflow"]
path = third_party/tensorflow
url = https://github.com/tensorflow/tensorflow.git
ignore = dirty

18 changes: 0 additions & 18 deletions .vscode/settings.json

This file was deleted.

61 changes: 0 additions & 61 deletions WORKSPACE

This file was deleted.

50 changes: 37 additions & 13 deletions build_torch_xla_libs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ if [[ "$XLA_BAZEL_VERBOSE" == "1" ]]; then
VERBOSE="-s"
fi

BUILD_STRATEGY="standalone"
SANDBOX_BASE="${XLA_SANDBOX_BASE}"
if [ -z "$XLA_SANDBOX_BASE" ]; then
SANDBOX_BASE="/tmp"
Expand All @@ -42,38 +43,61 @@ if [[ "$XLA_SANDBOX_BUILD" == "1" ]]; then
BUILD_STRATEGY="sandboxed --sandbox_base=${SANDBOX_BASE}"
else
# We can remove this after https://github.com/bazelbuild/bazel/issues/15359 is resolved
unset CC
unset CXX
BUILD_STRATEGY="local"
fi

TPUVM_FLAG=
if [[ "$TPUVM_MODE" == "1" ]]; then
OPTS+=(--config=tpu)
TPUVM_FLAG="--define=with_tpu_support=true"
fi

MAX_JOBS=
if [[ ! -z "$BAZEL_JOBS" ]]; then
MAX_JOBS="--jobs=$BAZEL_JOBS"
fi

OPTS+=(--cxxopt="-std=c++17")
if [[ $(basename -- $CC) =~ ^clang ]]; then
OPTS+=(--cxxopt="-Wno-c++11-narrowing")
OPTS+=(--cxxopt="-Wno-c++14-narrowing")
fi

if [[ "$XLA_CUDA" == "1" ]]; then
OPTS+=(--cxxopt="-DXLA_CUDA=1")
OPTS+=(--config=cuda)
fi

if [[ "$XLA_CPU_USE_ACL" == "1" ]]; then
OPTS+=(--config=acl)
OPTS+=("--define=build_with_acl=true")
fi

if [ "$CMD" == "clean" ]; then
pushd $THIRD_PARTY_DIR/tensorflow
bazel clean
exit 0
fi
popd
else
# Overlay llvm-raw secondary cache. The remote cache should be updated
# nightly with the pinned llvm archive. Note, this commands will be NO-OP if there is no match.
sed -i '/.*github.com\/llvm.*,/a "https://storage.googleapis.com/tpu-pytorch/llvm-raw/{commit}.tar.gz".format(commit = LLVM_COMMIT),' \
$THIRD_PARTY_DIR/tensorflow/third_party/llvm/workspace.bzl
sed -i 's/LLVM_COMMIT)]/LLVM_COMMIT),"https:\/\/storage.googleapis.com\/tpu-pytorch\/llvm-raw\/{commit}.tar.gz".format(commit = LLVM_COMMIT)]/g' \
$THIRD_PARTY_DIR/tensorflow/tensorflow/compiler/xla/mlir_hlo/WORKSPACE

cp -r -u -p $THIRD_PARTY_DIR/xla_client $THIRD_PARTY_DIR/tensorflow/tensorflow/compiler/xla/

# TensorFlow and its dependencies may introduce warning flags from newer compilers
# that PyTorch and PyTorch/XLA's default compilers don't recognize. They become error
# while '-Werror' is used. Therefore, surpress the warnings in .bazelrc or here.
bazel build $MAX_JOBS $VERBOSE --spawn_strategy=$BUILD_STRATEGY --show_progress_rate_limit=20 \
--define framework_shared_object=false -c "$MODE" "${OPTS[@]}" \
$XLA_CUDA_CFG //third_party/xla_client:libxla_computation_client.so
pushd $THIRD_PARTY_DIR/tensorflow
# TensorFlow and its dependencies may introduce warning flags from newer compilers
# that PyTorch and PyTorch/XLA's default compilers don't recognize. They become error
# while '-Werror' is used. Therefore, surpress the warnings.
TF_EXTRA_FLAGS="--copt=-Wno-unknown-warning-option"
bazel build $MAX_JOBS $VERBOSE $TPUVM_FLAG $TF_EXTRA_FLAGS --spawn_strategy=$BUILD_STRATEGY --show_progress_rate_limit=20 \
--define framework_shared_object=false -c "$MODE" "${OPTS[@]}" \
$XLA_CUDA_CFG //tensorflow/compiler/xla/xla_client:libxla_computation_client.so

mkdir -p torch_xla/lib
chmod 0644 bazel-bin/third_party/xla_client/libxla_computation_client.so
cp bazel-bin/third_party/xla_client/libxla_computation_client.so torch_xla/lib
popd
mkdir -p torch_xla/lib
chmod 0644 $THIRD_PARTY_DIR/tensorflow/bazel-bin/tensorflow/compiler/xla/xla_client/libxla_computation_client.so
cp $THIRD_PARTY_DIR/tensorflow/bazel-bin/tensorflow/compiler/xla/xla_client/libxla_computation_client.so torch_xla/lib
fi
10 changes: 4 additions & 6 deletions scripts/apply_patches.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,7 @@ python $CDIR/cond_patch.py \
$XDIR/torch_patches \
$PTDIR

# Apply TF patches only if requested, since bazel handles that normally.
if [[ -n "${APPLY_TF_PATCHES}" ]]; then
python $CDIR/cond_patch.py \
$XDIR/tf_patches \
$TFDIR
fi
python $CDIR/cond_patch.py \
$XDIR/tf_patches \
$TFDIR

Loading