From 1d3a52534afbea33596c693f90a9db1b96128257 Mon Sep 17 00:00:00 2001 From: Saqlain Date: Fri, 26 Jul 2024 11:52:44 +0530 Subject: [PATCH 1/2] Reduced model size for lumina-tests --- tests/pipelines/lumina/test_lumina_nextdit.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pipelines/lumina/test_lumina_nextdit.py b/tests/pipelines/lumina/test_lumina_nextdit.py index a53758ce2808..24084778bfb2 100644 --- a/tests/pipelines/lumina/test_lumina_nextdit.py +++ b/tests/pipelines/lumina/test_lumina_nextdit.py @@ -34,19 +34,19 @@ class LuminaText2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterM def get_dummy_components(self): torch.manual_seed(0) transformer = LuminaNextDiT2DModel( - sample_size=16, + sample_size=4, patch_size=2, in_channels=4, - hidden_size=24, + hidden_size=8, num_layers=2, - num_attention_heads=3, + num_attention_heads=2, num_kv_heads=1, multiple_of=16, ffn_dim_multiplier=None, norm_eps=1e-5, learn_sigma=True, qk_norm=True, - cross_attention_dim=32, + cross_attention_dim=8, scaling_factor=1.0, ) torch.manual_seed(0) @@ -58,9 +58,9 @@ def get_dummy_components(self): torch.manual_seed(0) config = GemmaConfig( head_dim=4, - hidden_size=32, + hidden_size=8, intermediate_size=37, - num_attention_heads=4, + num_attention_heads=1, num_hidden_layers=2, num_key_value_heads=4, ) From 249666d7b8732f6c7efcbce65b5dc8a87e27f6da Mon Sep 17 00:00:00 2001 From: Saqlain Date: Fri, 26 Jul 2024 12:19:58 +0530 Subject: [PATCH 2/2] Handled failing tests --- tests/pipelines/lumina/test_lumina_nextdit.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/pipelines/lumina/test_lumina_nextdit.py b/tests/pipelines/lumina/test_lumina_nextdit.py index 24084778bfb2..d02ff7429046 100644 --- a/tests/pipelines/lumina/test_lumina_nextdit.py +++ b/tests/pipelines/lumina/test_lumina_nextdit.py @@ -37,9 +37,9 @@ def get_dummy_components(self): sample_size=4, patch_size=2, in_channels=4, - hidden_size=8, + hidden_size=4, num_layers=2, - num_attention_heads=2, + num_attention_heads=1, num_kv_heads=1, multiple_of=16, ffn_dim_multiplier=None, @@ -57,10 +57,10 @@ def get_dummy_components(self): torch.manual_seed(0) config = GemmaConfig( - head_dim=4, + head_dim=2, hidden_size=8, intermediate_size=37, - num_attention_heads=1, + num_attention_heads=4, num_hidden_layers=2, num_key_value_heads=4, )