Skip to content

Commit

Permalink
[PEFT] Fix scale unscale with LoRA adapters (huggingface#5417)
Browse files Browse the repository at this point in the history
* fix scale unscale v1

* final fixes + CI

* fix slow trst

* oops

* fix copies

* oops

* oops

* fix

* style

* fix copies

---------

Co-authored-by: Sayak Paul <spsayakpaul@gmail.com>
  • Loading branch information
younesbelkada and sayakpaul committed Oct 21, 2023
1 parent ca096b4 commit 9dbed6a
Show file tree
Hide file tree
Showing 39 changed files with 58 additions and 50 deletions.
2 changes: 1 addition & 1 deletion models/unet_2d_condition.py
Original file line number Diff line number Diff line change
Expand Up @@ -1153,7 +1153,7 @@ def forward(

if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self)
unscale_lora_layers(self, lora_scale)

if not return_dict:
return (sample,)
Expand Down
2 changes: 1 addition & 1 deletion pipelines/alt_diffusion/pipeline_alt_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/controlnet/pipeline_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/controlnet/pipeline_controlnet_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/controlnet/pipeline_controlnet_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
4 changes: 2 additions & 2 deletions pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,12 +476,12 @@ def encode_prompt(
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2)
unscale_lora_layers(self.text_encoder_2, lora_scale)

return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds

Expand Down
4 changes: 2 additions & 2 deletions pipelines/controlnet/pipeline_controlnet_sd_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,12 +444,12 @@ def encode_prompt(
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2)
unscale_lora_layers(self.text_encoder_2, lora_scale)

return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds

Expand Down
4 changes: 2 additions & 2 deletions pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,12 +488,12 @@ def encode_prompt(
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2)
unscale_lora_layers(self.text_encoder_2, lora_scale)

return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/stable_diffusion/pipeline_cycle_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/stable_diffusion/pipeline_stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/stable_diffusion/pipeline_stable_unclip.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
4 changes: 2 additions & 2 deletions pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,12 +436,12 @@ def encode_prompt(
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2)
unscale_lora_layers(self.text_encoder_2, lora_scale)

return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -440,12 +440,12 @@ def encode_prompt(
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2)
unscale_lora_layers(self.text_encoder_2, lora_scale)

return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -590,12 +590,12 @@ def encode_prompt(
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2)
unscale_lora_layers(self.text_encoder_2, lora_scale)

return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
4 changes: 2 additions & 2 deletions pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,12 +450,12 @@ def encode_prompt(
if self.text_encoder is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

if self.text_encoder_2 is not None:
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder_2)
unscale_lora_layers(self.text_encoder_2, lora_scale)

return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/unidiffuser/pipeline_unidiffuser.py
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ def encode_prompt(

if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder)
unscale_lora_layers(self.text_encoder, lora_scale)

return prompt_embeds, negative_prompt_embeds

Expand Down
2 changes: 1 addition & 1 deletion pipelines/versatile_diffusion/modeling_text_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -1371,7 +1371,7 @@ def forward(

if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self)
unscale_lora_layers(self, lora_scale)

if not return_dict:
return (sample,)
Expand Down
Loading

0 comments on commit 9dbed6a

Please sign in to comment.