From d11e43706c1f0abb2f1f5505ef61a10d190f213b Mon Sep 17 00:00:00 2001 From: philschmid Date: Sat, 26 Nov 2022 10:21:58 +0100 Subject: [PATCH 1/4] updated version --- dockerfiles/starlette/pytorch/Dockerfile.cpu | 2 +- dockerfiles/starlette/pytorch/Dockerfile.gpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dockerfiles/starlette/pytorch/Dockerfile.cpu b/dockerfiles/starlette/pytorch/Dockerfile.cpu index 04d0a1db..012f93f7 100644 --- a/dockerfiles/starlette/pytorch/Dockerfile.cpu +++ b/dockerfiles/starlette/pytorch/Dockerfile.cpu @@ -5,7 +5,7 @@ COPY starlette_requirements.txt /tmp/requirements.txt RUN pip install --no-cache-dir -r /tmp/requirements.txt && rm /tmp/requirements.txt # Think about a better solution -> base contaienr has pt 1.13. thats why need below 0.14 -RUN pip install --no-cache-dir sentence_transformers torchvision~="0.14.0" diffusers=="0.8.1" accelerate=="0.14.0" +RUN pip install --no-cache-dir sentence_transformers torchvision~="0.14.0" diffusers=="0.9.0" accelerate=="0.14.0" # copy application COPY src/huggingface_inference_toolkit huggingface_inference_toolkit diff --git a/dockerfiles/starlette/pytorch/Dockerfile.gpu b/dockerfiles/starlette/pytorch/Dockerfile.gpu index 33e02056..74047e72 100644 --- a/dockerfiles/starlette/pytorch/Dockerfile.gpu +++ b/dockerfiles/starlette/pytorch/Dockerfile.gpu @@ -5,7 +5,7 @@ COPY starlette_requirements.txt /tmp/requirements.txt RUN pip install --no-cache-dir -r /tmp/requirements.txt && rm /tmp/requirements.txt # Think about a better solution -> base contaienr has pt 1.13. thats why need below 0.14 -RUN pip install --no-cache-dir sentence_transformers torchvision~="0.14.0" diffusers=="0.8.1" accelerate=="0.14.0" +RUN pip install --no-cache-dir sentence_transformers torchvision~="0.14.0" diffusers=="0.9.0" accelerate=="0.14.0" # copy application COPY src/huggingface_inference_toolkit huggingface_inference_toolkit From 4909be060eb198981f6e29a112ae3065c5acf540 Mon Sep 17 00:00:00 2001 From: philschmid Date: Sat, 26 Nov 2022 11:19:58 +0100 Subject: [PATCH 2/4] make DPMSolverMultistepScheduler default --- src/huggingface_inference_toolkit/diffusers_utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/huggingface_inference_toolkit/diffusers_utils.py b/src/huggingface_inference_toolkit/diffusers_utils.py index 3db96940..5f10fac0 100644 --- a/src/huggingface_inference_toolkit/diffusers_utils.py +++ b/src/huggingface_inference_toolkit/diffusers_utils.py @@ -13,7 +13,7 @@ def is_diffusers_available(): if is_diffusers_available(): import torch - from diffusers import StableDiffusionPipeline + from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler def check_supported_pipeline(model_dir): @@ -30,7 +30,13 @@ def check_supported_pipeline(model_dir): class DiffusersPipelineImageToText: def __init__(self, model_dir: str, device: str = None): # needs "cuda" for GPU + self.pipeline = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16) + # try to use DPMSolverMultistepScheduler + try: + self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config) + except Exception: + pass self.pipeline.to(device) def __call__(self, prompt, **kwargs): From d5b78f5f8d81d55ad63f056546d9107d77cc2efe Mon Sep 17 00:00:00 2001 From: philschmid Date: Sat, 26 Nov 2022 11:21:35 +0100 Subject: [PATCH 3/4] make style --- src/huggingface_inference_toolkit/diffusers_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/huggingface_inference_toolkit/diffusers_utils.py b/src/huggingface_inference_toolkit/diffusers_utils.py index 5f10fac0..c4311a8a 100644 --- a/src/huggingface_inference_toolkit/diffusers_utils.py +++ b/src/huggingface_inference_toolkit/diffusers_utils.py @@ -13,7 +13,7 @@ def is_diffusers_available(): if is_diffusers_available(): import torch - from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler + from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline def check_supported_pipeline(model_dir): From e0fdcdd7097944f177a98dbcaecba3ebb574e1e8 Mon Sep 17 00:00:00 2001 From: philschmid Date: Sun, 27 Nov 2022 09:38:58 +0000 Subject: [PATCH 4/4] added default paramter --- .../diffusers_utils.py | 30 ++++++++++++++----- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/src/huggingface_inference_toolkit/diffusers_utils.py b/src/huggingface_inference_toolkit/diffusers_utils.py index c4311a8a..92e7ed06 100644 --- a/src/huggingface_inference_toolkit/diffusers_utils.py +++ b/src/huggingface_inference_toolkit/diffusers_utils.py @@ -39,14 +39,30 @@ def __init__(self, model_dir: str, device: str = None): # needs "cuda" for GPU pass self.pipeline.to(device) - def __call__(self, prompt, **kwargs): + def __call__( + self, + prompt, + num_inference_steps=25, + guidance_scale=7.5, + num_images_per_prompt=1, + height=None, + width=None, + negative_prompt=None, + ): + # TODO: add support for more images (Reason is correct output) + num_images_per_prompt = 1 + + # Call pipeline with parameters + out = self.pipeline( + prompt, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + num_images_per_prompt=num_images_per_prompt, + negative_prompt=negative_prompt, + height=height, + width=width, + ) - if kwargs: - out = self.pipeline(prompt, **kwargs) - else: - out = self.pipeline(prompt) - - # TODO: return more than 1 image if requested return out.images[0]