From 98c12da1b62bb85747b1cd824bfa076b6c604dc2 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 1 May 2026 12:42:53 +0000 Subject: [PATCH] fix(diffusers): drop compel from requirements to unblock pip resolver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit compel 2.3.1 (latest, Nov 2025) declares transformers~=4.25 in its metadata, i.e. >=4.25,<5.0. After transformers 5.0 (2026-01-26) and huggingface-hub 1.0 (2025-10-27) shipped, the weekly DEPS_REFRESH cache rotation in CI started seeing the new majors and pip's resolver went into multi-hour backtracking storms walking every transformers 4.x candidate against every accelerate/hf-hub/tokenizers combination to find a set compel would accept. The 2026-04-29 backend-build for the diffusers backend (darwin-mps + l4t + cublas13-turboquant matrix cells) hit the GitHub Actions 6h job timeout still inside pip install — the build itself never started. compel is the only hard upper bound on transformers in this stack (diffusers, accelerate, peft, optimum-quanto are all flexible), and upstream support for transformers 5 is still in flight: damian0815/ compel#129 ("Modernize Compel for Transformers 5") and #128 ("Bump transformers version to >5.0") are both open as of today. backend.py only constructs Compel() when COMPEL=1 is set in the env (default off), so make compel a true optional extra: - Wrap the top-level `from compel import ...` in try/except ImportError, mirroring the existing sd_embed pattern. - Auto-disable COMPEL with a warning when the module isn't installed, instead of crashing on module load. - Drop compel from all eight requirements-*.txt variants so the resolver no longer has to satisfy its transformers cap. - Leave a TODO in backend.py and in each requirements file pointing at the upstream PR/issue, so the dependency can be reinstated once compel supports transformers >= 5. Users who rely on weighted-prompt embeddings can opt in with a manual `pip install compel` alongside COMPEL=1; the warning emitted on startup tells them how. Assisted-by: Claude:claude-opus-4-7 [Bash Read Edit WebFetch] Signed-off-by: Ettore Di Giacinto --- backend/python/diffusers/backend.py | 17 ++++++++++++++++- backend/python/diffusers/requirements-cpu.txt | 9 +++++++-- .../python/diffusers/requirements-cublas12.txt | 7 ++++++- .../python/diffusers/requirements-cublas13.txt | 7 ++++++- .../python/diffusers/requirements-hipblas.txt | 9 +++++++-- backend/python/diffusers/requirements-intel.txt | 9 +++++++-- backend/python/diffusers/requirements-l4t12.txt | 7 ++++++- backend/python/diffusers/requirements-l4t13.txt | 7 ++++++- backend/python/diffusers/requirements-mps.txt | 9 +++++++-- 9 files changed, 68 insertions(+), 13 deletions(-) diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index c9ad3b0bd9c9..00b292292949 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -40,7 +40,19 @@ from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKLWan from diffusers.pipelines.stable_diffusion import safety_checker from diffusers.utils import load_image, export_to_video -from compel import Compel, ReturnedEmbeddingsType +# TODO: re-enable compel as a hard dependency once it supports transformers >= 5. +# Tracking upstream: https://github.com/damian0815/compel/pull/129 +# and https://github.com/damian0815/compel/issues/128 +# Until then compel pins transformers ~= 4.25, which forces the pip resolver into +# multi-hour backtracking storms in CI when DEPS_REFRESH rotates the cache. +# Keep the import optional and gate usage on the COMPEL env var (set COMPEL=1 to opt in). +try: + from compel import Compel, ReturnedEmbeddingsType + COMPEL_AVAILABLE = True +except ImportError: + Compel = None + ReturnedEmbeddingsType = None + COMPEL_AVAILABLE = False from optimum.quanto import freeze, qfloat8, quantize from transformers import T5EncoderModel from safetensors.torch import load_file @@ -66,6 +78,9 @@ _ONE_DAY_IN_SECONDS = 60 * 60 * 24 COMPEL = os.environ.get("COMPEL", "0") == "1" +if COMPEL and not COMPEL_AVAILABLE: + print("WARNING: COMPEL is enabled but the compel module is not installed. Install it manually (`pip install compel`) or unset COMPEL. Falling back to standard prompt processing.", file=sys.stderr) + COMPEL = False SD_EMBED = os.environ.get("SD_EMBED", "0") == "1" # Warn if SD_EMBED is enabled but the module is not available if SD_EMBED and not SD_EMBED_AVAILABLE: diff --git a/backend/python/diffusers/requirements-cpu.txt b/backend/python/diffusers/requirements-cpu.txt index 2b76224d9695..8db419b292a7 100644 --- a/backend/python/diffusers/requirements-cpu.txt +++ b/backend/python/diffusers/requirements-cpu.txt @@ -4,10 +4,15 @@ opencv-python transformers torchvision==0.22.1 accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece torch==2.7.1 optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file diff --git a/backend/python/diffusers/requirements-cublas12.txt b/backend/python/diffusers/requirements-cublas12.txt index 5a1e947f26e7..e3351ae75a60 100644 --- a/backend/python/diffusers/requirements-cublas12.txt +++ b/backend/python/diffusers/requirements-cublas12.txt @@ -4,10 +4,15 @@ opencv-python transformers torchvision accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece torch ftfy optimum-quanto +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-cublas13.txt b/backend/python/diffusers/requirements-cublas13.txt index 354c6df070a2..546998ba41bc 100644 --- a/backend/python/diffusers/requirements-cublas13.txt +++ b/backend/python/diffusers/requirements-cublas13.txt @@ -4,10 +4,15 @@ opencv-python transformers torchvision accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece torch ftfy optimum-quanto +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-hipblas.txt b/backend/python/diffusers/requirements-hipblas.txt index 712510827b99..3480d1fd6168 100644 --- a/backend/python/diffusers/requirements-hipblas.txt +++ b/backend/python/diffusers/requirements-hipblas.txt @@ -5,8 +5,13 @@ git+https://github.com/huggingface/diffusers opencv-python transformers accelerate -compel peft sentencepiece optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file diff --git a/backend/python/diffusers/requirements-intel.txt b/backend/python/diffusers/requirements-intel.txt index 3fd3cde74466..c78f5ef230fa 100644 --- a/backend/python/diffusers/requirements-intel.txt +++ b/backend/python/diffusers/requirements-intel.txt @@ -7,9 +7,14 @@ git+https://github.com/huggingface/diffusers opencv-python transformers accelerate -compel git+https://github.com/xhinker/sd_embed peft sentencepiece optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file diff --git a/backend/python/diffusers/requirements-l4t12.txt b/backend/python/diffusers/requirements-l4t12.txt index 9f77a9d09014..15857c4b0751 100644 --- a/backend/python/diffusers/requirements-l4t12.txt +++ b/backend/python/diffusers/requirements-l4t12.txt @@ -3,10 +3,15 @@ torch git+https://github.com/huggingface/diffusers transformers accelerate -compel peft optimum-quanto numpy<2 sentencepiece torchvision ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-l4t13.txt b/backend/python/diffusers/requirements-l4t13.txt index 560858e354f4..226033a61123 100644 --- a/backend/python/diffusers/requirements-l4t13.txt +++ b/backend/python/diffusers/requirements-l4t13.txt @@ -3,7 +3,6 @@ torch git+https://github.com/huggingface/diffusers transformers accelerate -compel peft optimum-quanto numpy<2 @@ -11,3 +10,9 @@ sentencepiece torchvision ftfy chardet +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. diff --git a/backend/python/diffusers/requirements-mps.txt b/backend/python/diffusers/requirements-mps.txt index 8b7c2413bffa..58eb65f02766 100644 --- a/backend/python/diffusers/requirements-mps.txt +++ b/backend/python/diffusers/requirements-mps.txt @@ -4,8 +4,13 @@ git+https://github.com/huggingface/diffusers opencv-python transformers accelerate -compel peft sentencepiece optimum-quanto -ftfy \ No newline at end of file +ftfy +# TODO: re-add compel once it supports transformers >= 5. +# Tracking: https://github.com/damian0815/compel/pull/129 +# https://github.com/damian0815/compel/issues/128 +# compel currently pins transformers~=4.25, which forced pip into multi-hour +# resolver backtracking storms in CI. backend.py imports it lazily and gates +# the COMPEL=1 env var on the import succeeding, so dropping it here is safe. \ No newline at end of file