Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion backend/python/diffusers/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,19 @@
from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKLWan
from diffusers.pipelines.stable_diffusion import safety_checker
from diffusers.utils import load_image, export_to_video
from compel import Compel, ReturnedEmbeddingsType
# TODO: re-enable compel as a hard dependency once it supports transformers >= 5.
# Tracking upstream: https://github.com/damian0815/compel/pull/129
# and https://github.com/damian0815/compel/issues/128
# Until then compel pins transformers ~= 4.25, which forces the pip resolver into
# multi-hour backtracking storms in CI when DEPS_REFRESH rotates the cache.
# Keep the import optional and gate usage on the COMPEL env var (set COMPEL=1 to opt in).
try:
from compel import Compel, ReturnedEmbeddingsType
COMPEL_AVAILABLE = True
except ImportError:
Compel = None
ReturnedEmbeddingsType = None
COMPEL_AVAILABLE = False
from optimum.quanto import freeze, qfloat8, quantize
from transformers import T5EncoderModel
from safetensors.torch import load_file
Expand All @@ -66,6 +78,9 @@

_ONE_DAY_IN_SECONDS = 60 * 60 * 24
COMPEL = os.environ.get("COMPEL", "0") == "1"
if COMPEL and not COMPEL_AVAILABLE:
print("WARNING: COMPEL is enabled but the compel module is not installed. Install it manually (`pip install compel`) or unset COMPEL. Falling back to standard prompt processing.", file=sys.stderr)
COMPEL = False
SD_EMBED = os.environ.get("SD_EMBED", "0") == "1"
# Warn if SD_EMBED is enabled but the module is not available
if SD_EMBED and not SD_EMBED_AVAILABLE:
Expand Down
9 changes: 7 additions & 2 deletions backend/python/diffusers/requirements-cpu.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@ opencv-python
transformers
torchvision==0.22.1
accelerate
compel
git+https://github.com/xhinker/sd_embed
peft
sentencepiece
torch==2.7.1
optimum-quanto
ftfy
ftfy
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
7 changes: 6 additions & 1 deletion backend/python/diffusers/requirements-cublas12.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@ opencv-python
transformers
torchvision
accelerate
compel
git+https://github.com/xhinker/sd_embed
peft
sentencepiece
torch
ftfy
optimum-quanto
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
7 changes: 6 additions & 1 deletion backend/python/diffusers/requirements-cublas13.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@ opencv-python
transformers
torchvision
accelerate
compel
git+https://github.com/xhinker/sd_embed
peft
sentencepiece
torch
ftfy
optimum-quanto
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
9 changes: 7 additions & 2 deletions backend/python/diffusers/requirements-hipblas.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,13 @@ git+https://github.com/huggingface/diffusers
opencv-python
transformers
accelerate
compel
peft
sentencepiece
optimum-quanto
ftfy
ftfy
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
9 changes: 7 additions & 2 deletions backend/python/diffusers/requirements-intel.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,14 @@ git+https://github.com/huggingface/diffusers
opencv-python
transformers
accelerate
compel
git+https://github.com/xhinker/sd_embed
peft
sentencepiece
optimum-quanto
ftfy
ftfy
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
7 changes: 6 additions & 1 deletion backend/python/diffusers/requirements-l4t12.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,15 @@ torch
git+https://github.com/huggingface/diffusers
transformers
accelerate
compel
peft
optimum-quanto
numpy<2
sentencepiece
torchvision
ftfy
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
7 changes: 6 additions & 1 deletion backend/python/diffusers/requirements-l4t13.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,16 @@ torch
git+https://github.com/huggingface/diffusers
transformers
accelerate
compel
peft
optimum-quanto
numpy<2
sentencepiece
torchvision
ftfy
chardet
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
9 changes: 7 additions & 2 deletions backend/python/diffusers/requirements-mps.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,13 @@ git+https://github.com/huggingface/diffusers
opencv-python
transformers
accelerate
compel
peft
sentencepiece
optimum-quanto
ftfy
ftfy
# TODO: re-add compel once it supports transformers >= 5.
# Tracking: https://github.com/damian0815/compel/pull/129
# https://github.com/damian0815/compel/issues/128
# compel currently pins transformers~=4.25, which forced pip into multi-hour
# resolver backtracking storms in CI. backend.py imports it lazily and gates
# the COMPEL=1 env var on the import succeeding, so dropping it here is safe.
Loading