Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 4 additions & 20 deletions test/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ message("Selected PT/XLA library folder ${PTXLA_LIBDIR}")
project(ptxla_test)

find_package(PythonLibs)
set(Torch_DIR "${PT_DIR}/torch/share/cmake/Torch")
find_package(Torch REQUIRED)

include(ExternalProject)
set_directory_properties(PROPERTIES EP_PREFIX "${GTEST_DIR}")
Expand Down Expand Up @@ -40,10 +42,6 @@ include_directories(
"${TFDIR}/bazel-tensorflow/external/protobuf_archive/src"
"${TFDIR}/bazel-tensorflow/external/eigen_archive"
"${TFDIR}/bazel-tensorflow/external/com_google_absl"
"${PT_DIR}"
"${PT_DIR}/torch/csrc"
"${PT_DIR}/torch/include"
"${PT_DIR}/torch/lib/tmp_install/include"
"${PYTHON_INCLUDE_DIR}"
)

Expand Down Expand Up @@ -95,32 +93,18 @@ execute_process(COMMAND "ln" "-s" "-f"

find_library(PTXLA_LIB "libptxla.so"
HINTS "${PTXLA_LIBDIR}")
find_library(PT_LIB "libtorch.so"
HINTS "${PT_DIR}/build/lib")
find_library(PTPY_LIB "libtorch_python.so"
HINTS "${PT_DIR}/build/lib")
find_library(C10_LIB "libc10.so"
HINTS "${PT_DIR}/build/lib")
find_library(CAFFE_LIB_DET "libcaffe2_detectron_ops.so"
HINTS "${PT_DIR}/build/lib")
find_library(CAFFE_LIB_TDYN "libcaffe2_module_test_dynamic.so"
HINTS "${PT_DIR}/build/lib")
find_library(CAFFE_LIB_OBS "libcaffe2_observers.so"
HINTS "${PT_DIR}/build/lib")
HINTS "${PT_DIR}/torch/lib")

# Use --unresolved-symbols=ignore-all to get around the c10::Half::from_bits
# undefined symbol error at link time. At runtime everything resolves correctly.
target_link_libraries(
test_ptxla
-Wl,--unresolved-symbols=ignore-in-shared-libs
"${TORCH_LIBRARIES}"
"${PTXLA_LIB}"
"${PTXLA_LIBDIR}/torch_xla/lib/libxla_computation_client.so"
"${PTPY_LIB}"
"${PT_LIB}"
"${CAFFE_LIB_DET}"
"${CAFFE_LIB_TDYN}"
"${CAFFE_LIB_OBS}"
"${C10_LIB}"
"${BINARY_DIR}/lib/${CMAKE_FIND_LIBRARY_PREFIXES}gtest.a"
"${PYTHON_LIBRARY}"
-lutil
Expand Down
15 changes: 7 additions & 8 deletions test/cpp/cpp_test_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#include <string>

#include "tensorflow/compiler/xla/xla_client/debug_macros.h"
#include "torch/csrc/autograd/variable.h"
#include "torch_xla/csrc/aten_xla_bridge.h"
#include "torch_xla/csrc/ir_dump_util.h"
#include "torch_xla/csrc/lowering_context.h"
Expand All @@ -15,14 +14,9 @@
namespace torch_xla {
namespace cpp_test {

at::Tensor ToTensor(XLATensor& xla_tensor) {
return torch_xla::ToTensor(xla_tensor.ToTensor());
}

at::Tensor ToCpuTensor(const at::Tensor& t) {
at::Tensor tensor = torch_xla::ToTensor(t);
c10::optional<XLATensor> xtensor = bridge::TryGetXlaTensor(tensor);
return xtensor ? xtensor->ToTensor() : tensor;
// t.to() implicitly triggers a sync if t.device=torch::kXLA.
return t.to(torch::kCPU);
}

bool EqualValues(at::Tensor tensor1, at::Tensor tensor2) {
Expand Down Expand Up @@ -68,6 +62,11 @@ void ForEachDevice(const std::function<void(const Device&)>& devfn) {
devfn(Device(default_device));
}

void ForEachDevice(const std::function<void(const torch::Device&)>& devfn) {
torch::Device torch_device = bridge::AtenDefaultDevice();
devfn(torch_device);
}

bool CloseValues(at::Tensor tensor1, at::Tensor tensor2, double rtol,
double atol) {
if (tensor1.sizes() != tensor2.sizes() ||
Expand Down
17 changes: 9 additions & 8 deletions test/cpp/cpp_test_util.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#pragma once

#include <ATen/ATen.h>
#include <gtest/gtest.h>
#include <torch/torch.h>

#include <cmath>
#include <functional>
Expand All @@ -16,14 +16,13 @@
namespace torch_xla {
namespace cpp_test {

// Converts an XLA ATen tensor to a CPU backend tensor. Extracts it first from
// an autograd variable, if needed. Needed because EqualValues and AllClose
// require CPU tensors on both sides. If the input tensor is already a CPU
// tensor, it will be returned.
// Converts an at::Tensor(device=torch::kXLA) to at::Tensor(device=torch::kCPU)
// This at::Tensor can be torch::Tensor which is a Variable, or at::Tensor which
// know nothing about autograd. If the input tensor is already a CPU tensor, it
// will be returned. Needed because EqualValues and AllClose require CPU tensors
// on both sides.
at::Tensor ToCpuTensor(const at::Tensor& t);

at::Tensor ToTensor(XLATensor& xla_tensor);

bool EqualValues(at::Tensor tensor1, at::Tensor tensor2);

bool EqualValuesNoElementTypeCheck(at::Tensor tensor1, at::Tensor tensor2);
Expand All @@ -38,11 +37,13 @@ static inline void AllClose(at::Tensor tensor, at::Tensor xla_tensor,

static inline void AllClose(at::Tensor tensor, XLATensor& xla_tensor,
double rtol = 1e-5, double atol = 1e-8) {
EXPECT_TRUE(CloseValues(tensor, ToTensor(xla_tensor), rtol, atol));
EXPECT_TRUE(CloseValues(tensor, xla_tensor.ToTensor(), rtol, atol));
}

void ForEachDevice(const std::function<void(const Device&)>& devfn);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we still need this method? I'd try to remove it.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please don't, test_tensor.cpp still uses it.


void ForEachDevice(const std::function<void(const torch::Device&)>& devfn);

void WithAllDevices(
DeviceType device_type,
const std::function<void(const std::vector<Device>&,
Expand Down
Loading