@@ -88,6 +88,128 @@ AnimateDiff tends to work better with finetuned Stable Diffusion models. If you
8888
8989</Tip >
9090
91+ ## Using Motion LoRAs
92+
93+ Motion LoRAs are a collection of LoRAs that work with the ` guoyww/animatediff-motion-adapter-v1-5-2 ` checkpoint. These LoRAs are responsible for adding specific types of motion to the animations.
94+
95+ ``` python
96+ import torch
97+ from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
98+ from diffusers.utils import export_to_gif
99+
100+ # Load the motion adapter
101+ adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" )
102+ # load SD 1.5 based finetuned model
103+ model_id = " SG161222/Realistic_Vision_V5.1_noVAE"
104+ pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter)
105+ pipe.load_lora_weights(" guoyww/animatediff-motion-lora-zoom-out" , adapter_name = " zoom-out" )
106+
107+ scheduler = DDIMScheduler.from_pretrained(
108+ model_id, subfolder = " scheduler" , clip_sample = False , timestep_spacing = " linspace" , steps_offset = 1
109+ )
110+ pipe.scheduler = scheduler
111+
112+ # enable memory savings
113+ pipe.enable_vae_slicing()
114+ pipe.enable_model_cpu_offload()
115+
116+ output = pipe(
117+ prompt = (
118+ " masterpiece, bestquality, highlydetailed, ultradetailed, sunset, "
119+ " orange sky, warm lighting, fishing boats, ocean waves seagulls, "
120+ " rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, "
121+ " golden hour, coastal landscape, seaside scenery"
122+ ),
123+ negative_prompt = " bad quality, worse quality" ,
124+ num_frames = 16 ,
125+ guidance_scale = 7.5 ,
126+ num_inference_steps = 25 ,
127+ generator = torch.Generator(" cpu" ).manual_seed(42 ),
128+ )
129+ frames = output.frames[0 ]
130+ export_to_gif(frames, " animation.gif" )
131+ ```
132+
133+ <table >
134+ <tr>
135+ <td><center>
136+ masterpiece, bestquality, sunset.
137+ <br>
138+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-zoom-out-lora.gif"
139+ alt="masterpiece, bestquality, sunset"
140+ style="width: 300px;" />
141+ </center></td>
142+ </tr>
143+ </table >
144+
145+ ## Using Motion LoRAs with PEFT
146+
147+ You can also leverage the [ PEFT] ( https://github.com/huggingface/peft ) backend to combine Motion LoRA's and create more complex animations.
148+
149+ First install PEFT with
150+
151+ ``` shell
152+ pip install peft
153+ ```
154+
155+ Then you can use the following code to combine Motion LoRAs.
156+
157+ ``` python
158+
159+ ```python
160+ import torch
161+ from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
162+ from diffusers.utils import export_to_gif
163+
164+ # Load the motion adapter
165+ adapter = MotionAdapter.from_pretrained(" guoyww/animatediff-motion-adapter-v1-5-2" )
166+ # load SD 1.5 based finetuned model
167+ model_id = " SG161222/Realistic_Vision_V5.1_noVAE"
168+ pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter = adapter)
169+
170+ pipe.load_lora_weights(" diffusers/animatediff-motion-lora-zoom-out" , adapter_name = " zoom-out" )
171+ pipe.load_lora_weights(" diffusers/animatediff-motion-lora-pan-left" , adapter_name = " pan-left" )
172+ pipe.set_adapters([" zoom-out" , " pan-left" ], adapter_weights = [1.0 , 1.0 ])
173+
174+ scheduler = DDIMScheduler.from_pretrained(
175+ model_id, subfolder = " scheduler" , clip_sample = False , timestep_spacing = " linspace" , steps_offset = 1
176+ )
177+ pipe.scheduler = scheduler
178+
179+ # enable memory savings
180+ pipe.enable_vae_slicing()
181+ pipe.enable_model_cpu_offload()
182+
183+ output = pipe(
184+ prompt = (
185+ " masterpiece, bestquality, highlydetailed, ultradetailed, sunset, "
186+ " orange sky, warm lighting, fishing boats, ocean waves seagulls, "
187+ " rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, "
188+ " golden hour, coastal landscape, seaside scenery"
189+ ),
190+ negative_prompt = " bad quality, worse quality" ,
191+ num_frames = 16 ,
192+ guidance_scale = 7.5 ,
193+ num_inference_steps = 25 ,
194+ generator = torch.Generator(" cpu" ).manual_seed(42 ),
195+ )
196+ frames = output.frames[0 ]
197+ export_to_gif(frames, " animation.gif" )
198+ ```
199+
200+ <table >
201+ <tr>
202+ <td><center>
203+ masterpiece, bestquality, sunset.
204+ <br>
205+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-zoom-out-pan-left-lora.gif"
206+ alt="masterpiece, bestquality, sunset"
207+ style="width: 300px;" />
208+ </center></td>
209+ </tr>
210+ </table >
211+
212+
91213## AnimateDiffPipeline
92214[[ autodoc]] AnimateDiffPipeline
93215 - all
0 commit comments