From 2ab367f3de77313c513e359a4d1f8cba61d054b9 Mon Sep 17 00:00:00 2001 From: lucylq Date: Thu, 14 Nov 2024 11:22:49 -0800 Subject: [PATCH] disable llm module test --- extension/llm/modules/test/test_attention.py | 1 + extension/llm/modules/test/test_position_embeddings.py | 1 + 2 files changed, 2 insertions(+) diff --git a/extension/llm/modules/test/test_attention.py b/extension/llm/modules/test/test_attention.py index b792fd9b1ef..bd0c44d8b5f 100644 --- a/extension/llm/modules/test/test_attention.py +++ b/extension/llm/modules/test/test_attention.py @@ -146,6 +146,7 @@ def test_attention_export(self): assert_close(et_res, tt_res) + @unittest.skip(reason="TODO(T207740932): test is flaky") def test_attention_aoti(self): # Self attention. diff --git a/extension/llm/modules/test/test_position_embeddings.py b/extension/llm/modules/test/test_position_embeddings.py index 05f43527bef..039cc798b19 100644 --- a/extension/llm/modules/test/test_position_embeddings.py +++ b/extension/llm/modules/test/test_position_embeddings.py @@ -163,6 +163,7 @@ def test_tiled_token_positional_embedding_export(self): assert_close(y, ref_y) + @unittest.skip(reason="TODO(T207740932): test is flaky") def test_tiled_token_positional_embedding_aoti(self): tpe_ep = torch.export.export( self.tpe,