Skip to content

Commit

Permalink
Untested SXL
Browse files Browse the repository at this point in the history
  • Loading branch information
JimothyJohn committed Aug 19, 2023
1 parent 926e86f commit cb27919
Show file tree
Hide file tree
Showing 5 changed files with 121 additions and 19 deletions.
6 changes: 4 additions & 2 deletions cog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@ build:

# Download models to cache locations
run:
- "wget -P /root/.cache/torch/hub/checkpoints/ https://download.pytorch.org/models/alexnet-owt-7be5be79.pth && wget -P /root/.cache/torch/hub/checkpoints/ https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt && wget -P /root/.cache/huggingface/hub/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/snapshots/94a64189c3535c1cb44acfcccd7b0908c1c8eb23/ https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/resolve/main/open_clip_pytorch_model.bin"
- "wget -P /root/.cache/torch/hub/checkpoints/ https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/resolve/main/open_clip_pytorch_model.bin && wget -P /root/.cache/huggingface/hub/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/snapshots/94a64189c3535c1cb44acfcccd7b0908c1c8eb23/ https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/resolve/main/open_clip_pytorch_model.bin"
- "wget -P /root/.cache/torch/hub/checkpoints/ https://download.pytorch.org/models/alexnet-owt-7be5be79.pth"
# - "python /src/install.py"
# - "wget -P /root/.cache/torch/hub/checkpoints/ https://download.pytorch.org/models/alexnet-owt-7be5be79.pth && wget -P /root/.cache/torch/hub/checkpoints/ https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt && wget -P /root/.cache/huggingface/hub/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/snapshots/94a64189c3535c1cb44acfcccd7b0908c1c8eb23/ https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/resolve/main/open_clip_pytorch_model.bin"
# - "wget -P /root/.cache/torch/hub/checkpoints/ https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/resolve/main/open_clip_pytorch_model.bin && wget -P /root/.cache/huggingface/hub/models--laion--CLIP-ViT-H-14-laion2B-s32B-b79K/snapshots/94a64189c3535c1cb44acfcccd7b0908c1c8eb23/ https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/resolve/main/open_clip_pytorch_model.bin"

image: "r8.im/jimothyjohn/latentblending"
# predict.py defines how predictions are run on your model
Expand Down
13 changes: 6 additions & 7 deletions my_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import warnings

warnings.filterwarnings("ignore")
import warnings
from latent_blending import LatentBlending
from diffusers_holder import DiffusersHolder
from diffusers import StableDiffusionXLPipeline
Expand Down Expand Up @@ -51,18 +50,18 @@
duration_transition = 2 # In seconds

# Spawn latent blending
lb = LatentBlending(dh)
lb.set_prompt1(prompt1)
lb.set_prompt2(prompt2)
lb.set_dimensions(1536, 1024)
self.lb = LatentBlending(dh)
self.lb.set_prompt1(prompt1)
self.lb.set_prompt2(prompt2)
self.lb.set_dimensions(1536, 1024)

# Run latent blending
lb.run_transition(
self.lb.run_transition(
depth_strength=depth_strength,
num_inference_steps=num_inference_steps,
t_compute_max_allowed=t_compute_max_allowed,
fixed_seeds=fixed_seeds,
)

# Save movie
lb.write_movie_transition(fp_movie, duration_transition)
self.lb.write_movie_transition(fp_movie, duration_transition)
39 changes: 29 additions & 10 deletions predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,51 +16,69 @@
# https://github.com/replicate/cog/blob/main/docs/python.md

from cog import BasePredictor, Input, Path
from stable_diffusion_holder import StableDiffusionHolder

# from stable_diffusion_holder import StableDiffusionHolder
from diffusers_holder import DiffusersHolder
from diffusers import StableDiffusionXLPipeline
from movie_util import concatenate_movies
import torch
from tqdm import tqdm
from huggingface_hub import hf_hub_download

torch.backends.cudnn.benchmark = False
torch.set_grad_enabled(False)
import warnings

warnings.filterwarnings("ignore")
import warnings
from latent_blending import LatentBlending

T_COMPUTE_MAX_ALLOWED = 15
RESOLUTION = 768
T_COMPUTE_MAX_ALLOWED = 10
WIDTH = 768
HEIGHT = 768
INFERENCE_STEPS = 50
FPS = 60
TITLE = "output.mp4"
CROSSFEED_POWER = 0.8
CROSSFEED_RANGE = 0.8
CROSSFEED_DECAY = 0.5
DEPTH_STRENGTH = 0.55


class Predictor(BasePredictor):
def setup(self) -> None:
# Load checkpoint from pre-downloaded location
# TODO: Evaluate fastest approach to do this
"""
fp_ckpt = "/root/.cache/torch/hub/checkpoints/v2-1_768-ema-pruned.ckpt"
fp_ckpt = hf_hub_download(
repo_id="stabilityai/stable-diffusion-xl-refiner-1.0",
filename="sd_xl_refiner_1.0.safetensors",
)
# Load stable diffusion model from pre-downloaded checkpoint
self.sdh = StableDiffusionHolder(fp_ckpt)
self.sself.dh = StableDiffusionHolder(fp_ckpt)
# Spawn latent blending
self.lb = LatentBlending(self.sdh)
self.lb = LatentBlending(self.sself.dh)
# Dimensions must match model fine-tuning
self.lb.set_width(RESOLUTION)
self.lb.set_height(RESOLUTION)
"""
pretrained_model_name_or_path = "stabilityai/stable-diffusion-xl-base-1.0"
self.pipe = StableDiffusionXLPipeline.from_pretrained(
pretrained_model_name_or_path,
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
)
self.pipe.to("cuda")

self.dh = DiffusersHolder(self.pipe)
# Spawn latent blending
self.lb = LatentBlending(self.dh)
self.lb.set_dimensions(WIDTH, HEIGHT)

def predict(
self,
caption: str = Input(description="What to dream"),
text: str = Input(description="What to dream"),
# caption2: str = Input(description="Image caption to end with"),
transition_time: int = Input(
description="Time to transition to next image", ge=1, le=15, default=2
Expand Down Expand Up @@ -90,16 +108,17 @@ def predict(
for i in tqdm(range(transitions)):
# For a multi transition we can save some computation time and recycle the latents
if i == 0:
self.lb.set_prompt1(caption)
self.lb.set_prompt1(text)
recycle_img1 = False
else:
self.lb.swap_forward()
recycle_img1 = True

self.lb.set_prompt2(caption)
self.lb.set_prompt2(text)
fp_movie_part = f"tmp_part_{str(i).zfill(3)}.mp4"
self.lb.run_transition(
recycle_img1=recycle_img1,
depth_strength=DEPTH_STRENGTH,
fixed_seeds=[seed + i, seed + i + 1],
t_compute_max_allowed=T_COMPUTE_MAX_ALLOWED,
num_inference_steps=INFERENCE_STEPS,
Expand Down
41 changes: 41 additions & 0 deletions utils/Ping.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/usr/bin/env bash

PIO=$HOME/.platformio/penv/bin/pio

set -o errexit
set -o nounset
set -o pipefail
if [[ "${TRACE-0}" == "1" ]]; then
set -o xtrace
fi

help_function() {
echo "Usage: $0 [OPTION]"
echo
echo "This script checks and installs PlatformIO and Docker if not present."
echo
echo "-h Show this help message and exit."
}

install_cog() {
if ! command -v cog &> /dev/null; then
echo "Cog not detected. Installing..."
sudo curl -o /usr/local/bin/cog -L "https://github.com/replicate/cog/releases/latest/download/cog_$(uname -s)_$(uname -m)"
sudo chmod +x /usr/local/bin/cog
else
echo "Cog is already installed."
fi
}

main() {
if [[ "$#" -gt 0 && "$1" == "-h" ]]; then
help_function
exit 0
fi

install_cog
cog build
docker run -d -p 80:5000 r8.im/jimothyjohn/latentblending
}

main "$@"
41 changes: 41 additions & 0 deletions utils/Quickstart.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/usr/bin/env bash

PIO=$HOME/.platformio/penv/bin/pio

set -o errexit
set -o nounset
set -o pipefail
if [[ "${TRACE-0}" == "1" ]]; then
set -o xtrace
fi

help_function() {
echo "Usage: $0 [OPTION]"
echo
echo "This script checks and installs PlatformIO and Docker if not present."
echo
echo "-h Show this help message and exit."
}

install_cog() {
if ! command -v cog &> /dev/null; then
echo "Cog not detected. Installing..."
sudo curl -o /usr/local/bin/cog -L "https://github.com/replicate/cog/releases/latest/download/cog_$(uname -s)_$(uname -m)"
sudo chmod +x /usr/local/bin/cog
else
echo "Cog is already installed."
fi
}

main() {
if [[ "$#" -gt 0 && "$1" == "-h" ]]; then
help_function
exit 0
fi

install_cog
cog build
docker run -d -p 80:5000 --gpus all r8.im/jimothyjohn/latentblending
}

main "$@"

0 comments on commit cb27919

Please sign in to comment.