Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 36 additions & 4 deletions .github/workflows/_tpu_ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,25 +37,56 @@ jobs:
sparse-checkout: |
.github/workflows/setup
path: .actions

- name: Setup
if: inputs.has_code_changes == 'true'
uses: ./.actions/.github/workflows/setup
with:
torch-commit: ${{ inputs.torch-commit }}
wheels-artifact: torch-xla-wheels

- name: Install test dependencies
if: inputs.has_code_changes == 'true'
shell: bash
run: |
set -x

# TODO: Add these in setup.py
pip install --upgrade pip
pip install fsspec
pip install rich
# jax and libtpu is needed for pallas tests.
pip install --pre 'torch_xla[pallas]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html'
pip install --pre 'torch_xla[tpu]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html'

# PyTorch/XLA Optional Dependencies
# =================================
#
# Install `JAX` and `libtpu` dependencies for pallas and TPU tests.
#
# Note that we might need to install pre-release versions of both, in
# external artifact repositories.

# Retrieve the PyTorch/XLA ".whl" file.
# This assumes PyTorch/XLA wheels are downloaded in "/tmp/wheels".
WHL=$(ls /tmp/wheels/torch_xla*)

# Links for finding `jax` and `libtpu` versions.
INDEX="https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ "
LINKS="https://storage.googleapis.com/jax-releases/libtpu_releases.html"

pip install "$WHL[pallas]" --pre --index-url $INDEX --find-links $LINKS
pip install "$WHL[tpu]" --pre --index-url $INDEX --find-links $LINKS

pip install --upgrade protobuf
pip install flax

# Flax Pin
# ========
#
# Be careful when bumping the `flax` version, since it can cause tests that
# depend on `jax` to start breaking.
#
# Newer `flax` versions might pull newer `jax` versions, which might be incompatible
# with the current version of PyTorch/XLA.
pip install flax==0.11.2

- name: Run Tests (${{ matrix.test_script }})
if: inputs.has_code_changes == 'true'
env:
Expand All @@ -64,6 +95,7 @@ jobs:
run: |
cd pytorch/xla
${{ matrix.test_script }}

- name: Report no code changes
# Only report the first instance
if: inputs.has_code_changes == 'false' && strategy.job-index == 0
Expand Down
32 changes: 32 additions & 0 deletions test/cpp/test_aten_xla_tensor_1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,8 @@ TEST_F(AtenXlaTensorTest, TestSiLU) {
}

TEST_F(AtenXlaTensorTest, TestSiLUBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::silu(inputs[0]);
};
Expand Down Expand Up @@ -681,6 +683,8 @@ TEST_F(AtenXlaTensorTest, TestEinsumOuter) {
}

TEST_F(AtenXlaTensorTest, TestEinsumOuterBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
torch::Tensor a =
torch::rand({5}, torch::TensorOptions(torch::kFloat).requires_grad(true));
torch::Tensor b =
Expand Down Expand Up @@ -719,6 +723,8 @@ TEST_F(AtenXlaTensorTest, TestEinsumBatchMatMul) {
}

TEST_F(AtenXlaTensorTest, TestEinsumBatchMatMulBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
if (UsingTpu()) {
GTEST_SKIP();
}
Expand Down Expand Up @@ -759,6 +765,8 @@ TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerBilinear) {
}

TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerBilinearBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
torch::Tensor a = torch::rand(
{3, 5, 4}, torch::TensorOptions(torch::kFloat).requires_grad(true));
torch::Tensor l = torch::rand(
Expand Down Expand Up @@ -795,6 +803,8 @@ TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerDiagonal) {
}

TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerDiagonalBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
torch::Tensor input = torch::rand(
{3, 3}, torch::TensorOptions(torch::kFloat).requires_grad(true));
std::string equation = "ii->i";
Expand Down Expand Up @@ -827,6 +837,8 @@ TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerBatchDiagonal) {
}

TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerBatchDiagonalBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
torch::Tensor input = torch::rand(
{4, 3, 3}, torch::TensorOptions(torch::kFloat).requires_grad(true));
std::string equation = "...ii->...i";
Expand Down Expand Up @@ -859,6 +871,8 @@ TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerBatchPermute) {
}

TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerBatchPermuteBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
torch::Tensor input = torch::rand(
{2, 3, 4, 5}, torch::TensorOptions(torch::kFloat).requires_grad(true));
std::string equation = "...ij->...ji";
Expand Down Expand Up @@ -892,6 +906,8 @@ TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerRepeatedAxis) {
}

TEST_F(AtenXlaTensorTest, TestEinsumPyTorchLowerRepeatedAxisBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
torch::Tensor x = torch::rand(
{2, 3, 3}, torch::TensorOptions(torch::kFloat).requires_grad(true));
torch::Tensor y =
Expand Down Expand Up @@ -1036,6 +1052,8 @@ TEST_F(AtenXlaTensorTest, TestUpsampleNearest2D) {
}

TEST_F(AtenXlaTensorTest, TestUpsampleNearest2DBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int batch_size = 2;
int h = 5;
int w = 5;
Expand Down Expand Up @@ -1094,6 +1112,8 @@ TEST_F(AtenXlaTensorTest, TestUpsampleNearest2DWithScale) {
}

TEST_F(AtenXlaTensorTest, TestUpsampleNearest2DBackwardWithScale) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
struct ImageInfo {
int batch_size;
int h;
Expand Down Expand Up @@ -1223,6 +1243,8 @@ TEST_F(AtenXlaTensorTest, TestUpsampleBilinear2DWithScale) {
}

TEST_F(AtenXlaTensorTest, TestUpsampleBilinear2DBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int batch_size = 2;
int h = 5;
int w = 5;
Expand All @@ -1245,6 +1267,8 @@ TEST_F(AtenXlaTensorTest, TestUpsampleBilinear2DBackward) {
}

TEST_F(AtenXlaTensorTest, TestUpsampleBilinear2DBackwardWithScale) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
struct ImageInfo {
int batch_size;
int h;
Expand Down Expand Up @@ -1610,6 +1634,8 @@ TEST_F(AtenXlaTensorTest, TestTake) {
}

TEST_F(AtenXlaTensorTest, TestTakeBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::take(inputs[0], inputs[1]);
};
Expand Down Expand Up @@ -3499,6 +3525,8 @@ TEST_F(AtenXlaTensorTest, TestPrelu) {
}

TEST_F(AtenXlaTensorTest, TestPreluBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::prelu(inputs[0], inputs[1]);
};
Expand Down Expand Up @@ -3583,6 +3611,8 @@ TEST_F(AtenXlaTensorTest, TestHardSigmoidInPlace) {
}

TEST_F(AtenXlaTensorTest, TestHardSigmoidBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::hardsigmoid(inputs[0]);
};
Expand Down Expand Up @@ -3625,6 +3655,8 @@ TEST_F(AtenXlaTensorTest, TestHardSwishInPlace) {
}

TEST_F(AtenXlaTensorTest, TestHardSwishBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::hardswish(inputs[0]);
};
Expand Down
4 changes: 4 additions & 0 deletions test/cpp/test_aten_xla_tensor_2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1536,6 +1536,8 @@ TEST_F(AtenXlaTensorTest, TestGroupNorm) {
}

TEST_F(AtenXlaTensorTest, TestGroupNormBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int num_channels = 6;
torch::Tensor input =
torch::rand({20, num_channels, 10, 10},
Expand Down Expand Up @@ -1642,6 +1644,8 @@ TEST_F(AtenXlaTensorTest, TestLayerNorm) {
}

TEST_F(AtenXlaTensorTest, TestLayerNormBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
torch::Tensor input = torch::rand(
{2, 3, 3, 3}, torch::TensorOptions(torch::kFloat).requires_grad(true));
double eps = 1e-05;
Expand Down
30 changes: 30 additions & 0 deletions test/cpp/test_aten_xla_tensor_3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -664,6 +664,8 @@ TEST_F(AtenXlaTensorTest, TestReflectionPad1dRank3) {
}

TEST_F(AtenXlaTensorTest, TestReflectionPad1dBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
std::vector<int64_t> pad{2, 2};
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::reflection_pad1d(inputs[0], pad);
Expand Down Expand Up @@ -709,6 +711,8 @@ TEST_F(AtenXlaTensorTest, TestReflectionPad2dRank4) {
}

TEST_F(AtenXlaTensorTest, TestReflectionPad2dBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
std::vector<int64_t> pad{2, 3, 1, 2};
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::reflection_pad2d(inputs[0], pad);
Expand Down Expand Up @@ -754,6 +758,8 @@ TEST_F(AtenXlaTensorTest, TestReflectionPad3dRank4) {
}

TEST_F(AtenXlaTensorTest, TestReflectionPad3dBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
std::vector<int64_t> pad{1, 1, 1, 1, 1, 1};
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::reflection_pad3d(inputs[0], pad);
Expand Down Expand Up @@ -801,6 +807,8 @@ TEST_F(AtenXlaTensorTest, TestReplicationPad1dZeroPad) {
}

TEST_F(AtenXlaTensorTest, TestReplicationPad1dBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
std::vector<int64_t> pad{2, 3};
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::replication_pad1d(inputs[0], pad);
Expand Down Expand Up @@ -848,6 +856,8 @@ TEST_F(AtenXlaTensorTest, TestReplicationPad2dZeroPad) {
}

TEST_F(AtenXlaTensorTest, TestReplicationPad2dBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
std::vector<int64_t> pad{2, 3, 1, 1};
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::replication_pad2d(inputs[0], pad);
Expand Down Expand Up @@ -895,6 +905,8 @@ TEST_F(AtenXlaTensorTest, TestReplicationPad3dZeroPad) {
}

TEST_F(AtenXlaTensorTest, TestReplicationPad3dBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
std::vector<int64_t> pad{2, 3, 1, 1, 1, 1};
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
return torch::replication_pad3d(inputs[0], pad);
Expand Down Expand Up @@ -1131,6 +1143,8 @@ TEST_F(AtenXlaTensorTest, TestAsStridedMultipleDimMismatch) {
}

TEST_F(AtenXlaTensorTest, TestAvgPool2DBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int kernel_size = 2;
for (int stride = 1; stride <= 2; ++stride) {
for (int padding = 0; padding <= 1; ++padding) {
Expand Down Expand Up @@ -1161,6 +1175,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool2DBackward) {
}

TEST_F(AtenXlaTensorTest, TestAvgPool3DBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int kernel_size = 2;
for (int stride = 1; stride <= 2; ++stride) {
for (int padding = 0; padding <= 1; ++padding) {
Expand Down Expand Up @@ -1192,6 +1208,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool3DBackward) {
}

TEST_F(AtenXlaTensorTest, TestAvgPool2DNoBatchBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int kernel_size = 2;
for (int stride = 1; stride <= 2; ++stride) {
for (int padding = 0; padding <= 1; ++padding) {
Expand Down Expand Up @@ -1222,6 +1240,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool2DNoBatchBackward) {
}

TEST_F(AtenXlaTensorTest, TestAvgPool3DNoBatchBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int kernel_size = 2;
for (int stride = 1; stride <= 2; ++stride) {
for (int padding = 0; padding <= 1; ++padding) {
Expand Down Expand Up @@ -1253,6 +1273,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool3DNoBatchBackward) {
}

TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool3DNoBatchBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
for (int64_t output_size : {7, 4}) {
auto testfn =
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
Expand All @@ -1273,6 +1295,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool3DNoBatchBackward) {
}

TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool3DBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
for (int64_t output_size : {7, 4}) {
auto testfn =
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
Expand All @@ -1293,6 +1317,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool3DBackward) {
}

TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool2DBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
for (int64_t output_size : {7, 8}) {
auto testfn =
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
Expand All @@ -1312,6 +1338,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool2DBackward) {
}

TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool2DNoBatchBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
for (int64_t output_size : {7, 8}) {
auto testfn =
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
Expand All @@ -1329,6 +1357,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool2DNoBatchBackward) {
}

TEST_F(AtenXlaTensorTest, TestConv3DBackward) {
GTEST_SKIP() << "failing due to PyTorch upstream changes. "
<< "See: https://github.com/pytorch/xla/issues/9651.";
int in_channels = 4;
int out_channels = 8;
int kernel_size = 5;
Expand Down
Loading