From 090ffcafd47916027fa8d73b7c3f98c8bd6c11e1 Mon Sep 17 00:00:00 2001 From: Xiongfei Wei Date: Wed, 22 Feb 2023 23:08:19 +0000 Subject: [PATCH 1/3] implement has_hint --- test/test_dynamic_shapes.py | 1 - torch_xla/csrc/tensor.cpp | 50 +++++++++++++++++++++++++++++++++++++ torch_xla/csrc/tensor.h | 23 +++++++++++++++++ 3 files changed, 73 insertions(+), 1 deletion(-) diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py index 26f9ee0aa551..a6d03f467760 100644 --- a/test/test_dynamic_shapes.py +++ b/test/test_dynamic_shapes.py @@ -12,7 +12,6 @@ class TestDynamicShapes(test_utils.XlaTestCase): - @unittest.skip("Regression") def test_simple_expand(self): size1 = 5 size2 = 2 diff --git a/torch_xla/csrc/tensor.cpp b/torch_xla/csrc/tensor.cpp index 728f822be19a..69c03330315b 100644 --- a/torch_xla/csrc/tensor.cpp +++ b/torch_xla/csrc/tensor.cpp @@ -751,6 +751,52 @@ c10::SymNode XLASymNodeImpl::sym_max(const c10::SymNode& other) { << " has not been implemented."; } + c10::SymNode XLASymNodeImpl::sym_and(const c10::SymNode& other) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + c10::SymNode XLASymNodeImpl::sym_not() { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + // NB: self is ignored here, only the arguments are used + c10::SymNode XLASymNodeImpl::is_contiguous( + at::ArrayRef sizes, + at::ArrayRef strides) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + c10::SymNode XLASymNodeImpl::is_channels_last_contiguous_2d( + at::ArrayRef sizes, + at::ArrayRef strides) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + c10::SymNode XLASymNodeImpl::is_channels_last_contiguous_3d( + at::ArrayRef sizes, + at::ArrayRef strides) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + c10::SymNode XLASymNodeImpl::is_channels_last_strides_2d( + at::ArrayRef sizes, + at::ArrayRef strides) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + c10::SymNode XLASymNodeImpl::is_channels_last_strides_3d( + at::ArrayRef sizes, + at::ArrayRef strides) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + c10::SymNode XLASymNodeImpl::is_non_overlapping_and_dense( + at::ArrayRef sizes, + at::ArrayRef strides) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} + c10::SymNode XLASymNodeImpl::clone() { TORCH_LAZY_FN_COUNTER("xla::size_"); return c10::make_intrusive(node()); @@ -796,6 +842,10 @@ bool XLASymNodeImpl::bool_() { return dn->getDynamicValue() != 0; } +bool XLASymNodeImpl::has_hint() { + return true; +} + std::string XLASymNodeImpl::str() { return "<=" + std::to_string(DimCast(node().get())->getStaticValue()); } diff --git a/torch_xla/csrc/tensor.h b/torch_xla/csrc/tensor.h index 8cf86aa05f6c..3776db99e613 100644 --- a/torch_xla/csrc/tensor.h +++ b/torch_xla/csrc/tensor.h @@ -51,6 +51,28 @@ class TORCH_API XLASymNodeImpl : public c10::SymNodeImpl { c10::SymNode neg() override; c10::SymNode sym_min(const c10::SymNode& other) override; c10::SymNode sym_max(const c10::SymNode& other) override; + c10::SymNode sym_or(const c10::SymNode& other) override; + c10::SymNode sym_and(const c10::SymNode& other) override; + c10::SymNode sym_not() override; + // NB: self is ignored here, only the arguments are used + c10::SymNode is_contiguous( + at::ArrayRef sizes, + at::ArrayRef strides) override; + c10::SymNode is_channels_last_contiguous_2d( + at::ArrayRef sizes, + at::ArrayRef strides) override; + c10::SymNode is_channels_last_contiguous_3d( + at::ArrayRef sizes, + at::ArrayRef strides) override; + c10::SymNode is_channels_last_strides_2d( + at::ArrayRef sizes, + at::ArrayRef strides) override; + c10::SymNode is_channels_last_strides_3d( + at::ArrayRef sizes, + at::ArrayRef strides) override; + c10::SymNode is_non_overlapping_and_dense( + at::ArrayRef sizes, + at::ArrayRef strides) override; c10::SymNode clone() override; c10::SymNode sym_float() override; c10::SymNode wrap_int(int64_t num) override; @@ -60,6 +82,7 @@ class TORCH_API XLASymNodeImpl : public c10::SymNodeImpl { bool guard_bool(const char* file, int64_t line) override; int64_t int_() override; bool bool_() override; + bool has_hint() override; std::string str() override; torch::lazy::NodePtr node() { return node_; } From ea836b86c83de4d16ab00ba63dead7bfedffc658 Mon Sep 17 00:00:00 2001 From: Xiongfei Wei Date: Wed, 22 Feb 2023 23:33:14 +0000 Subject: [PATCH 2/3] fix linter --- torch_xla/csrc/tensor.cpp | 46 +++++++++++++++++++-------------------- torch_xla/csrc/tensor.h | 9 ++++---- 2 files changed, 26 insertions(+), 29 deletions(-) diff --git a/torch_xla/csrc/tensor.cpp b/torch_xla/csrc/tensor.cpp index 69c03330315b..6249c67c9607 100644 --- a/torch_xla/csrc/tensor.cpp +++ b/torch_xla/csrc/tensor.cpp @@ -751,48 +751,48 @@ c10::SymNode XLASymNodeImpl::sym_max(const c10::SymNode& other) { << " has not been implemented."; } - c10::SymNode XLASymNodeImpl::sym_and(const c10::SymNode& other) { +c10::SymNode XLASymNodeImpl::sym_or(const c10::SymNode& other) { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } - c10::SymNode XLASymNodeImpl::sym_not() { + +c10::SymNode XLASymNodeImpl::sym_and(const c10::SymNode& other) { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } - // NB: self is ignored here, only the arguments are used - c10::SymNode XLASymNodeImpl::is_contiguous( - at::ArrayRef sizes, - at::ArrayRef strides) { + +c10::SymNode XLASymNodeImpl::sym_not() { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } - c10::SymNode XLASymNodeImpl::is_channels_last_contiguous_2d( - at::ArrayRef sizes, - at::ArrayRef strides) { +// NB: self is ignored here, only the arguments are used +c10::SymNode XLASymNodeImpl::is_contiguous(at::ArrayRef sizes, + at::ArrayRef strides) { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } - c10::SymNode XLASymNodeImpl::is_channels_last_contiguous_3d( - at::ArrayRef sizes, - at::ArrayRef strides) { +c10::SymNode XLASymNodeImpl::is_channels_last_contiguous_2d( + at::ArrayRef sizes, at::ArrayRef strides) { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } - c10::SymNode XLASymNodeImpl::is_channels_last_strides_2d( - at::ArrayRef sizes, - at::ArrayRef strides) { +c10::SymNode XLASymNodeImpl::is_channels_last_contiguous_3d( + at::ArrayRef sizes, at::ArrayRef strides) { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } - c10::SymNode XLASymNodeImpl::is_channels_last_strides_3d( - at::ArrayRef sizes, - at::ArrayRef strides) { +c10::SymNode XLASymNodeImpl::is_channels_last_strides_2d( + at::ArrayRef sizes, at::ArrayRef strides) { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } - c10::SymNode XLASymNodeImpl::is_non_overlapping_and_dense( - at::ArrayRef sizes, - at::ArrayRef strides) { +c10::SymNode XLASymNodeImpl::is_channels_last_strides_3d( + at::ArrayRef sizes, at::ArrayRef strides) { + XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ + << " has not been implemented."; +} +c10::SymNode XLASymNodeImpl::is_non_overlapping_and_dense( + at::ArrayRef sizes, at::ArrayRef strides) { XLA_CHECK(false) << "XLASymNodeImpl::" << __FUNCTION__ << " has not been implemented."; } @@ -842,9 +842,7 @@ bool XLASymNodeImpl::bool_() { return dn->getDynamicValue() != 0; } -bool XLASymNodeImpl::has_hint() { - return true; -} +bool XLASymNodeImpl::has_hint() { return true; } std::string XLASymNodeImpl::str() { return "<=" + std::to_string(DimCast(node().get())->getStaticValue()); diff --git a/torch_xla/csrc/tensor.h b/torch_xla/csrc/tensor.h index 3776db99e613..a3c4accca125 100644 --- a/torch_xla/csrc/tensor.h +++ b/torch_xla/csrc/tensor.h @@ -55,18 +55,17 @@ class TORCH_API XLASymNodeImpl : public c10::SymNodeImpl { c10::SymNode sym_and(const c10::SymNode& other) override; c10::SymNode sym_not() override; // NB: self is ignored here, only the arguments are used - c10::SymNode is_contiguous( - at::ArrayRef sizes, - at::ArrayRef strides) override; + c10::SymNode is_contiguous(at::ArrayRef sizes, + at::ArrayRef strides) override; c10::SymNode is_channels_last_contiguous_2d( at::ArrayRef sizes, - at::ArrayRef strides) override; + at::ArrayRef strides) override; c10::SymNode is_channels_last_contiguous_3d( at::ArrayRef sizes, at::ArrayRef strides) override; c10::SymNode is_channels_last_strides_2d( at::ArrayRef sizes, - at::ArrayRef strides) override; + at::ArrayRef strides) override; c10::SymNode is_channels_last_strides_3d( at::ArrayRef sizes, at::ArrayRef strides) override; From 11559f0622061ddb14a0fe04777e8c4d8ac80951 Mon Sep 17 00:00:00 2001 From: Xiongfei Wei Date: Wed, 22 Feb 2023 23:38:19 +0000 Subject: [PATCH 3/3] reenable DS tests. --- test/test_dynamic_shape_models.py | 2 -- test/test_dynamic_shapes.py | 5 ----- test/test_operations.py | 1 - 3 files changed, 8 deletions(-) diff --git a/test/test_dynamic_shape_models.py b/test/test_dynamic_shape_models.py index 0a8037f2015f..3895d920d548 100644 --- a/test/test_dynamic_shape_models.py +++ b/test/test_dynamic_shape_models.py @@ -45,7 +45,6 @@ def forward(self, x): ) class TestDynamicShapeModels(unittest.TestCase): - @unittest.skip("Regresssion") def test_forward_pass_dynamic_input_correctness(self): losses = [] for _ in range(2): @@ -67,7 +66,6 @@ def test_forward_pass_dynamic_input_correctness(self): np.testing.assert_allclose(losses[0], losses[1], rtol=1e-2, atol=1e-2) print('Test passed.') - @unittest.skip("Regresssion") def test_forward_pass_dynamic_input_compile_once(self): met.clear_metrics() num_compilation_recorded = False diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py index a6d03f467760..82bbc88a2ea1 100644 --- a/test/test_dynamic_shapes.py +++ b/test/test_dynamic_shapes.py @@ -26,7 +26,6 @@ def test_simple_expand(self): t6_cpu = t6.cpu() self.assertEqual(t6_cpu.shape[0], 2) - @unittest.skip("Regression") def test_simple_expand_on_2d_tensor(self): size1 = 5 size2 = 2 @@ -61,7 +60,6 @@ def test_simple_expand_on_2d_tensor(self): # the python dispatcher. self.assertGreater(met.counter_value("xla::size_clone"), 0) - @unittest.skip("Regression") def test_simple_expand_add_dimension(self): size1 = 5 size2 = 2 @@ -87,7 +85,6 @@ def test_wrap(self): a3 = a2.shape[0] + 3 # tests wrap self.assertIsInstance(a3, torch.SymInt) - @unittest.skip("Regression") def test_sizeAdd(self): size1 = 5 size2 = 2 @@ -108,7 +105,6 @@ def test_sizeAdd(self): t4 = t3.expand(dyn_size) self.assertEqual(t4.size(0), 3) - @unittest.skip("Regression") def test_sizeSub(self): size1 = 5 size2 = 2 @@ -169,7 +165,6 @@ def test_nonzero_cast(self): t2 = torch.nonzero(t1.int()).float() xm.mark_step() - @unittest.skip("Regression") def test_expand_symint_correctness(self): dev = xm.xla_device() size1 = 5 diff --git a/test/test_operations.py b/test/test_operations.py index 90254911c396..d723ad60568f 100644 --- a/test/test_operations.py +++ b/test/test_operations.py @@ -1119,7 +1119,6 @@ def test_scatter_add_bool(self): xla_b.scatter_add_(0, xla_index, xla_a) self.assertEqual(b, xla_b) - @unittest.skip("DS Regressions") def test_squeeze_nonzero(self): def test_fn(a):