Skip to content

Commit

Permalink
support for more devices
Browse files Browse the repository at this point in the history
including mps
  • Loading branch information
kabachuha committed Mar 20, 2023
1 parent 340f3ba commit ab1c4e7
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 3 deletions.
3 changes: 2 additions & 1 deletion scripts/modelscope-text2vid.py
Expand Up @@ -17,6 +17,7 @@
import cv2
from base64 import b64encode
import os, subprocess, time
from modules import devices

outdir = os.path.join(opts.outdir_img2img_samples, 'text2video-modelscope')
outdir = os.path.join(os.getcwd(), outdir)
Expand Down Expand Up @@ -57,7 +58,7 @@ def process(skip_video_creation, ffmpeg_location, ffmpeg_crf, ffmpeg_preset, fps
pipe = setup_pipeline()
print('Starting text2video')

samples, _ = pipe.infer(prompt, n_prompt, steps, frames, cfg_scale, width, height, eta, cpu_vae, latents)
samples, _ = pipe.infer(prompt, n_prompt, steps, frames, cfg_scale, width, height, eta, cpu_vae, devices.get_optimal_device(), latents)

print(f'text2video finished, saving frames to {outdir_current}')

Expand Down
4 changes: 2 additions & 2 deletions scripts/t2v_pipeline.py
Expand Up @@ -150,7 +150,7 @@ def __init__(self, model_dir):
self.clip_encoder.to("cpu")

#@torch.compile()
def infer(self, prompt, n_prompt, steps, frames, scale, width=256, height=256, eta=0.0, cpu_vae='GPU (half precision)', latents=None):
def infer(self, prompt, n_prompt, steps, frames, scale, width=256, height=256, eta=0.0, cpu_vae='GPU (half precision)', device = torch.device('cpu'), latents=None):
r"""
The entry function of text to image synthesis task.
1. Using diffusion model to generate the video's latent representation.
Expand All @@ -164,7 +164,7 @@ def infer(self, prompt, n_prompt, steps, frames, scale, width=256, height=256, e
"""
print(self.sd_model.use_fps_condition)
self.sd_model.use_fps_condition = False
self.device = torch.device('cuda')
self.device = device
self.clip_encoder.to(self.device)
y, zero_y = self.preprocess(prompt, n_prompt)
self.clip_encoder.to("cpu")
Expand Down

0 comments on commit ab1c4e7

Please sign in to comment.