Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 22 additions & 33 deletions invokeai/backend/model_management/convert_ckpt_to_diffusers.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,29 +20,22 @@
import re
from contextlib import nullcontext
from io import BytesIO
from typing import Optional, Union
from pathlib import Path
from typing import Optional, Union

import requests
import torch
from transformers import (
AutoFeatureExtractor,
BertTokenizerFast,
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)

from diffusers.models import (
AutoencoderKL,
ControlNetModel,
PriorTransformer,
UNet2DConditionModel,
)
from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.schedulers import (
DDIMScheduler,
DDPMScheduler,
Expand All @@ -54,18 +47,23 @@
PNDMScheduler,
UnCLIPScheduler,
)
from diffusers.utils import is_accelerate_available, is_omegaconf_available, is_safetensors_available
from diffusers.utils import is_accelerate_available, is_omegaconf_available
from diffusers.utils.import_utils import BACKENDS_MAPPING
from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from picklescan.scanner import scan_file_path
from transformers import (
AutoFeatureExtractor,
BertTokenizerFast,
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)

from invokeai.backend.util.logging import InvokeAILogger
from invokeai.app.services.config import InvokeAIAppConfig

from picklescan.scanner import scan_file_path
from invokeai.backend.util.logging import InvokeAILogger
from .models import BaseModelType, ModelVariantType

try:
Expand Down Expand Up @@ -1221,9 +1219,6 @@ def download_from_original_stable_diffusion_ckpt(
raise ValueError(BACKENDS_MAPPING["omegaconf"][1])

if from_safetensors:
if not is_safetensors_available():
raise ValueError(BACKENDS_MAPPING["safetensors"][1])

from safetensors.torch import load_file as safe_load

checkpoint = safe_load(checkpoint_path, device="cpu")
Expand Down Expand Up @@ -1662,9 +1657,6 @@ def download_controlnet_from_original_ckpt(
from omegaconf import OmegaConf

if from_safetensors:
if not is_safetensors_available():
raise ValueError(BACKENDS_MAPPING["safetensors"][1])

from safetensors import safe_open

checkpoint = {}
Expand Down Expand Up @@ -1741,7 +1733,7 @@ def convert_ckpt_to_diffusers(

pipe.save_pretrained(
dump_path,
safe_serialization=use_safetensors and is_safetensors_available(),
safe_serialization=use_safetensors,
)


Expand All @@ -1757,7 +1749,4 @@ def convert_controlnet_to_diffusers(
"""
pipe = download_controlnet_from_original_ckpt(checkpoint_path, **kwargs)

pipe.save_pretrained(
dump_path,
safe_serialization=is_safetensors_available(),
)
pipe.save_pretrained(dump_path, safe_serialization=True)
3 changes: 1 addition & 2 deletions invokeai/backend/model_management/models/vae.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import safetensors
import torch
from diffusers.utils import is_safetensors_available
from omegaconf import OmegaConf

from invokeai.app.services.config import InvokeAIAppConfig
Expand Down Expand Up @@ -175,5 +174,5 @@ def _convert_vae_ckpt_and_cache(
vae_config=config,
image_size=image_size,
)
vae_model.save_pretrained(output_path, safe_serialization=is_safetensors_available())
vae_model.save_pretrained(output_path, safe_serialization=True)
return output_path
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ dependencies = [
"controlnet-aux>=0.0.6",
"timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26
"datasets",
"diffusers[torch]~=0.19.3",
"diffusers[torch]~=0.20.0",
"dnspython~=2.4.0",
"dynamicprompts",
"easing-functions",
Expand All @@ -49,7 +49,7 @@ dependencies = [
"fastapi==0.88.0",
"fastapi-events==0.8.0",
"fastapi-socketio==0.0.10",
"huggingface-hub>=0.11.1",
"huggingface-hub~=0.16.4",
"invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids
"matplotlib", # needed for plotting of Penner easing functions
"mediapipe", # needed for "mediapipeface" controlnet model
Expand Down