From 80fae829ca7f90ce8ac84cc4da279bdacaa129fa Mon Sep 17 00:00:00 2001 From: Aryan Date: Thu, 13 Feb 2025 21:48:34 +0100 Subject: [PATCH] update --- .../models/transformers/cogvideox_transformer_3d.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/diffusers/models/transformers/cogvideox_transformer_3d.py b/src/diffusers/models/transformers/cogvideox_transformer_3d.py index 53ec148209e0..6b4f38dc04a1 100644 --- a/src/diffusers/models/transformers/cogvideox_transformer_3d.py +++ b/src/diffusers/models/transformers/cogvideox_transformer_3d.py @@ -503,14 +503,7 @@ def forward( attention_kwargs=attention_kwargs, ) - if not self.config.use_rotary_positional_embeddings: - # CogVideoX-2B - hidden_states = self.norm_final(hidden_states) - else: - # CogVideoX-5B - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - hidden_states = self.norm_final(hidden_states) - hidden_states = hidden_states[:, text_seq_length:] + hidden_states = self.norm_final(hidden_states) # 4. Final block hidden_states = self.norm_out(hidden_states, temb=emb)