diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 5fd3669911f..8118e28abbd 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -20,29 +20,22 @@ import re from contextlib import nullcontext from io import BytesIO -from typing import Optional, Union from pathlib import Path +from typing import Optional, Union import requests import torch -from transformers import ( - AutoFeatureExtractor, - BertTokenizerFast, - CLIPImageProcessor, - CLIPTextConfig, - CLIPTextModel, - CLIPTextModelWithProjection, - CLIPTokenizer, - CLIPVisionConfig, - CLIPVisionModelWithProjection, -) - from diffusers.models import ( AutoencoderKL, ControlNetModel, PriorTransformer, UNet2DConditionModel, ) +from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel +from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.schedulers import ( DDIMScheduler, DDPMScheduler, @@ -54,18 +47,23 @@ PNDMScheduler, UnCLIPScheduler, ) -from diffusers.utils import is_accelerate_available, is_omegaconf_available, is_safetensors_available +from diffusers.utils import is_accelerate_available, is_omegaconf_available from diffusers.utils.import_utils import BACKENDS_MAPPING -from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel -from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder -from diffusers.pipelines.pipeline_utils import DiffusionPipeline -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer +from picklescan.scanner import scan_file_path +from transformers import ( + AutoFeatureExtractor, + BertTokenizerFast, + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) -from invokeai.backend.util.logging import InvokeAILogger from invokeai.app.services.config import InvokeAIAppConfig - -from picklescan.scanner import scan_file_path +from invokeai.backend.util.logging import InvokeAILogger from .models import BaseModelType, ModelVariantType try: @@ -1221,9 +1219,6 @@ def download_from_original_stable_diffusion_ckpt( raise ValueError(BACKENDS_MAPPING["omegaconf"][1]) if from_safetensors: - if not is_safetensors_available(): - raise ValueError(BACKENDS_MAPPING["safetensors"][1]) - from safetensors.torch import load_file as safe_load checkpoint = safe_load(checkpoint_path, device="cpu") @@ -1662,9 +1657,6 @@ def download_controlnet_from_original_ckpt( from omegaconf import OmegaConf if from_safetensors: - if not is_safetensors_available(): - raise ValueError(BACKENDS_MAPPING["safetensors"][1]) - from safetensors import safe_open checkpoint = {} @@ -1741,7 +1733,7 @@ def convert_ckpt_to_diffusers( pipe.save_pretrained( dump_path, - safe_serialization=use_safetensors and is_safetensors_available(), + safe_serialization=use_safetensors, ) @@ -1757,7 +1749,4 @@ def convert_controlnet_to_diffusers( """ pipe = download_controlnet_from_original_ckpt(checkpoint_path, **kwargs) - pipe.save_pretrained( - dump_path, - safe_serialization=is_safetensors_available(), - ) + pipe.save_pretrained(dump_path, safe_serialization=True) diff --git a/invokeai/backend/model_management/models/vae.py b/invokeai/backend/model_management/models/vae.py index cf7622a9aae..f5dc11b27b1 100644 --- a/invokeai/backend/model_management/models/vae.py +++ b/invokeai/backend/model_management/models/vae.py @@ -5,7 +5,6 @@ import safetensors import torch -from diffusers.utils import is_safetensors_available from omegaconf import OmegaConf from invokeai.app.services.config import InvokeAIAppConfig @@ -175,5 +174,5 @@ def _convert_vae_ckpt_and_cache( vae_config=config, image_size=image_size, ) - vae_model.save_pretrained(output_path, safe_serialization=is_safetensors_available()) + vae_model.save_pretrained(output_path, safe_serialization=True) return output_path diff --git a/pyproject.toml b/pyproject.toml index 980cf498b77..02e53f066ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ dependencies = [ "controlnet-aux>=0.0.6", "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", - "diffusers[torch]~=0.19.3", + "diffusers[torch]~=0.20.0", "dnspython~=2.4.0", "dynamicprompts", "easing-functions", @@ -49,7 +49,7 @@ dependencies = [ "fastapi==0.88.0", "fastapi-events==0.8.0", "fastapi-socketio==0.0.10", - "huggingface-hub>=0.11.1", + "huggingface-hub~=0.16.4", "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids "matplotlib", # needed for plotting of Penner easing functions "mediapipe", # needed for "mediapipeface" controlnet model