diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index c9ad3b0bd9c9..00b292292949 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -40,7 +40,19 @@ from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKLWan from diffusers.pipelines.stable_diffusion import safety_checker from diffusers.utils import load_image, export_to_video -from compel import Compel, ReturnedEmbeddingsType +# TODO: re-enable compel as a hard dependency once it supports transformers >= 5. +# Tracking upstream: https://github.com/damian0815/compel/pull/129 +# and https://github.com/damian0815/compel/issues/128 +# Until then compel pins transformers ~= 4.25, which forces the pip resolver into +# multi-hour backtracking storms in CI when DEPS_REFRESH rotates the cache. +# Keep the import optional and gate usage on the COMPEL env var (set COMPEL=1 to opt in). +try: + from compel import Compel, ReturnedEmbeddingsType + COMPEL_AVAILABLE = True +except ImportError: + Compel = None + ReturnedEmbeddingsType = None + COMPEL_AVAILABLE = False from optimum.quanto import freeze, qfloat8, quantize from transformers import T5EncoderModel from safetensors.torch import load_file @@ -66,6 +78,9 @@ _ONE_DAY_IN_SECONDS = 60 * 60 * 24 COMPEL = os.environ.get("COMPEL", "0") == "1" +if COMPEL and not COMPEL_AVAILABLE: + print("WARNING: COMPEL is enabled but the compel module is not installed. Install it manually (`pip install compel`) or unset COMPEL. Falling back to standard prompt processing.", file=sys.stderr) + COMPEL = False SD_EMBED = os.environ.get("SD_EMBED", "0") == "1" # Warn if SD_EMBED is enabled but the module is not available if SD_EMBED and not SD_EMBED_AVAILABLE: diff --git a/backend/python/diffusers/requirements-cpu.txt b/backend/python/diffusers/requirements-cpu.txt index 2b76224d9695..8db419b292a7 100644 --- a/backend/python/diffusers/requirements-cpu.txt +++ b/backend/python/diffusers/requirements-cpu.txt @@ -4,10 +4,15 @@ opencv-python transformers torchvision==0.22.1 accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece torch==2.7.1 optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file diff --git a/backend/python/diffusers/requirements-cublas12.txt b/backend/python/diffusers/requirements-cublas12.txt index 5a1e947f26e7..e3351ae75a60 100644 --- a/backend/python/diffusers/requirements-cublas12.txt +++ b/backend/python/diffusers/requirements-cublas12.txt @@ -4,10 +4,15 @@ opencv-python transformers torchvision accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece torch ftfy optimum-quanto +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-cublas13.txt b/backend/python/diffusers/requirements-cublas13.txt index 354c6df070a2..546998ba41bc 100644 --- a/backend/python/diffusers/requirements-cublas13.txt +++ b/backend/python/diffusers/requirements-cublas13.txt @@ -4,10 +4,15 @@ opencv-python transformers torchvision accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece torch ftfy optimum-quanto +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-hipblas.txt b/backend/python/diffusers/requirements-hipblas.txt index 712510827b99..3480d1fd6168 100644 --- a/backend/python/diffusers/requirements-hipblas.txt +++ b/backend/python/diffusers/requirements-hipblas.txt @@ -5,8 +5,13 @@ git+https://github.com/huggingface/diffusers opencv-python transformers accelerate -compel peft sentencepiece optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file diff --git a/backend/python/diffusers/requirements-intel.txt b/backend/python/diffusers/requirements-intel.txt index 3fd3cde74466..c78f5ef230fa 100644 --- a/backend/python/diffusers/requirements-intel.txt +++ b/backend/python/diffusers/requirements-intel.txt @@ -7,9 +7,14 @@ git+https://github.com/huggingface/diffusers opencv-python transformers accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file diff --git a/backend/python/diffusers/requirements-l4t12.txt b/backend/python/diffusers/requirements-l4t12.txt index 9f77a9d09014..15857c4b0751 100644 --- a/backend/python/diffusers/requirements-l4t12.txt +++ b/backend/python/diffusers/requirements-l4t12.txt @@ -3,10 +3,15 @@ torch git+https://github.com/huggingface/diffusers transformers accelerate -compel peft optimum-quanto numpy<2 sentencepiece torchvision ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-l4t13.txt b/backend/python/diffusers/requirements-l4t13.txt index 560858e354f4..226033a61123 100644 --- a/backend/python/diffusers/requirements-l4t13.txt +++ b/backend/python/diffusers/requirements-l4t13.txt @@ -3,7 +3,6 @@ torch git+https://github.com/huggingface/diffusers transformers accelerate -compel peft optimum-quanto numpy<2 @@ -11,3 +10,9 @@ sentencepiece torchvision ftfy chardet +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-mps.txt b/backend/python/diffusers/requirements-mps.txt index 8b7c2413bffa..58eb65f02766 100644 --- a/backend/python/diffusers/requirements-mps.txt +++ b/backend/python/diffusers/requirements-mps.txt @@ -4,8 +4,13 @@ git+https://github.com/huggingface/diffusers opencv-python transformers accelerate -compel peft sentencepiece optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file