Skip to content
11 changes: 6 additions & 5 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
# libavcodec-extra : libavcodec-extra includes additional codecs for ffmpeg

install_requires = [
"transformers[sklearn,sentencepiece,audio,vision]==4.41.1",
"orjson",
"transformers[sklearn,sentencepiece,audio,vision,sentencepiece]==4.44.0",
"huggingface_hub[hf_transfer]==0.24.5",
"peft==0.12.0",
# vision
"Pillow",
"librosa",
Expand All @@ -26,13 +27,13 @@
"starlette",
"uvicorn",
"pandas",
"peft==0.11.1",
"orjson",
]

extras = {}

extras["st"] = ["sentence_transformers==2.7.0"]
extras["diffusers"] = ["diffusers==0.26.3", "accelerate==0.27.2"]
extras["diffusers"] = ["diffusers==0.30.0", "accelerate==0.33.0"]
extras["torch"] = ["torch==2.2.2", "torchvision", "torchaudio"]
extras["test"] = [
"pytest==7.2.1",
Expand All @@ -53,7 +54,7 @@
setup(
name="huggingface-inference-toolkit",
version=VERSION,
author="HuggingFace",
author="Hugging Face",
description="Hugging Face Inference Toolkit is for serving 🤗 Transformers models in containers.",
url="",
package_dir={"": "src"},
Expand Down
4 changes: 1 addition & 3 deletions src/huggingface_inference_toolkit/diffusers_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(
dtype = torch.float32
if device == "cuda":
dtype = torch.bfloat16 if is_torch_bf16_gpu_available() else torch.float16
device_map = "auto" if device == "cuda" else None
device_map = "balanced" if device == "cuda" else None

self.pipeline = AutoPipelineForText2Image.from_pretrained(
model_dir, torch_dtype=dtype, device_map=device_map, **kwargs
Expand All @@ -42,8 +42,6 @@ def __init__(
except Exception:
pass

self.pipeline.to(device)

def __call__(
self,
prompt,
Expand Down
8 changes: 2 additions & 6 deletions src/huggingface_inference_toolkit/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,7 @@
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.pipelines import Pipeline

from huggingface_inference_toolkit.const import (
HF_DEFAULT_PIPELINE_NAME,
HF_MODULE_NAME,
HF_TRUST_REMOTE_CODE,
)
from huggingface_inference_toolkit.const import HF_DEFAULT_PIPELINE_NAME, HF_MODULE_NAME
from huggingface_inference_toolkit.diffusers_utils import (
get_diffusers_pipeline,
is_diffusers_available,
Expand Down Expand Up @@ -240,7 +236,7 @@ def get_pipeline(
"zero-shot-image-classification",
}:
kwargs["feature_extractor"] = model_dir
elif task in {"image-to-text"}:
elif task in {"image-to-text", "text-to-image"}:
pass
elif task == "conversational":
task = "text-generation"
Expand Down