From 13d08aab57b7fb13ebbc18b8a2c2d6fb0a7774ea Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 14 Dec 2023 07:55:16 +0000 Subject: [PATCH 01/17] deprecate pipe --- src/diffusers/pipelines/__init__.py | 100 ++++++------ .../pipelines/deprecated/__init__.py | 152 ++++++++++++++++++ .../alt_diffusion/__init__.py | 6 +- .../alt_diffusion/modeling_roberta_series.py | 0 .../alt_diffusion/pipeline_alt_diffusion.py | 20 +-- .../pipeline_alt_diffusion_img2img.py | 20 +-- .../alt_diffusion/pipeline_output.py | 2 +- .../audio_diffusion/__init__.py | 2 +- .../{ => deprecated}/audio_diffusion/mel.py | 4 +- .../pipeline_audio_diffusion.py | 8 +- .../latent_diffusion_uncond/__init__.py | 2 +- .../pipeline_latent_diffusion_uncond.py | 8 +- .../{ => deprecated}/pndm/__init__.py | 2 +- .../{ => deprecated}/pndm/pipeline_pndm.py | 8 +- .../{ => deprecated}/repaint/__init__.py | 2 +- .../repaint/pipeline_repaint.py | 10 +- .../{ => deprecated}/score_sde_ve/__init__.py | 2 +- .../score_sde_ve/pipeline_score_sde_ve.py | 8 +- .../spectrogram_diffusion/__init__.py | 12 +- .../continuous_encoder.py | 4 +- .../spectrogram_diffusion/midi_utils.py | 2 +- .../spectrogram_diffusion/notes_encoder.py | 4 +- .../pipeline_spectrogram_diffusion.py | 12 +- .../stable_diffusion_variants/__init__.py | 55 +++++++ .../pipeline_cycle_diffusion.py | 22 +-- ...ne_onnx_stable_diffusion_inpaint_legacy.py | 12 +- ...ipeline_stable_diffusion_inpaint_legacy.py | 22 +-- ...pipeline_stable_diffusion_model_editing.py | 22 +-- .../pipeline_stable_diffusion_paradigms.py | 20 +-- .../pipeline_stable_diffusion_pix2pix_zero.py | 24 +-- .../stochastic_karras_ve/__init__.py | 2 +- .../pipeline_stochastic_karras_ve.py | 8 +- .../versatile_diffusion/__init__.py | 6 +- .../versatile_diffusion/modeling_text_unet.py | 20 +-- .../pipeline_versatile_diffusion.py | 8 +- ...ipeline_versatile_diffusion_dual_guided.py | 12 +- ...ine_versatile_diffusion_image_variation.py | 12 +- ...eline_versatile_diffusion_text_to_image.py | 12 +- .../{ => deprecated}/vq_diffusion/__init__.py | 6 +- .../vq_diffusion/pipeline_vq_diffusion.py | 10 +- .../pipelines/stable_diffusion/__init__.py | 19 +-- 41 files changed, 433 insertions(+), 249 deletions(-) create mode 100644 src/diffusers/pipelines/deprecated/__init__.py rename src/diffusers/pipelines/{ => deprecated}/alt_diffusion/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/alt_diffusion/modeling_roberta_series.py (100%) rename src/diffusers/pipelines/{ => deprecated}/alt_diffusion/pipeline_alt_diffusion.py (98%) rename src/diffusers/pipelines/{ => deprecated}/alt_diffusion/pipeline_alt_diffusion_img2img.py (98%) rename src/diffusers/pipelines/{ => deprecated}/alt_diffusion/pipeline_output.py (97%) rename src/diffusers/pipelines/{ => deprecated}/audio_diffusion/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/audio_diffusion/mel.py (97%) rename src/diffusers/pipelines/{ => deprecated}/audio_diffusion/pipeline_audio_diffusion.py (98%) rename src/diffusers/pipelines/{ => deprecated}/latent_diffusion_uncond/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py (96%) rename src/diffusers/pipelines/{ => deprecated}/pndm/__init__.py (86%) rename src/diffusers/pipelines/{ => deprecated}/pndm/pipeline_pndm.py (95%) rename src/diffusers/pipelines/{ => deprecated}/repaint/__init__.py (86%) rename src/diffusers/pipelines/{ => deprecated}/repaint/pipeline_repaint.py (97%) rename src/diffusers/pipelines/{ => deprecated}/score_sde_ve/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/score_sde_ve/pipeline_score_sde_ve.py (95%) rename src/diffusers/pipelines/{ => deprecated}/spectrogram_diffusion/__init__.py (85%) rename src/diffusers/pipelines/{ => deprecated}/spectrogram_diffusion/continuous_encoder.py (96%) rename src/diffusers/pipelines/{ => deprecated}/spectrogram_diffusion/midi_utils.py (99%) rename src/diffusers/pipelines/{ => deprecated}/spectrogram_diffusion/notes_encoder.py (96%) rename src/diffusers/pipelines/{ => deprecated}/spectrogram_diffusion/pipeline_spectrogram_diffusion.py (97%) create mode 100644 src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py rename src/diffusers/pipelines/{stable_diffusion => deprecated/stable_diffusion_variants}/pipeline_cycle_diffusion.py (98%) rename src/diffusers/pipelines/{stable_diffusion => deprecated/stable_diffusion_variants}/pipeline_onnx_stable_diffusion_inpaint_legacy.py (98%) rename src/diffusers/pipelines/{stable_diffusion => deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_inpaint_legacy.py (98%) rename src/diffusers/pipelines/{stable_diffusion => deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_model_editing.py (98%) rename src/diffusers/pipelines/{stable_diffusion => deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_paradigms.py (98%) rename src/diffusers/pipelines/{stable_diffusion => deprecated/stable_diffusion_variants}/pipeline_stable_diffusion_pix2pix_zero.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stochastic_karras_ve/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stochastic_karras_ve/pipeline_stochastic_karras_ve.py (96%) rename src/diffusers/pipelines/{ => deprecated}/versatile_diffusion/__init__.py (94%) rename src/diffusers/pipelines/{ => deprecated}/versatile_diffusion/modeling_text_unet.py (99%) rename src/diffusers/pipelines/{ => deprecated}/versatile_diffusion/pipeline_versatile_diffusion.py (99%) rename src/diffusers/pipelines/{ => deprecated}/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py (98%) rename src/diffusers/pipelines/{ => deprecated}/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py (98%) rename src/diffusers/pipelines/{ => deprecated}/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py (98%) rename src/diffusers/pipelines/{ => deprecated}/vq_diffusion/__init__.py (90%) rename src/diffusers/pipelines/{ => deprecated}/vq_diffusion/pipeline_vq_diffusion.py (98%) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 5e7b2e285f73..64e7d2531c00 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -23,6 +23,7 @@ "latent_diffusion": [], "stable_diffusion": [], "stable_diffusion_xl": [], + "deprecated": [], } try: @@ -44,16 +45,18 @@ _import_structure["ddpm"] = ["DDPMPipeline"] _import_structure["dit"] = ["DiTPipeline"] _import_structure["latent_diffusion"].extend(["LDMSuperResolutionPipeline"]) - _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] _import_structure["pipeline_utils"] = [ "AudioPipelineOutput", "DiffusionPipeline", "ImagePipelineOutput", ] - _import_structure["pndm"] = ["PNDMPipeline"] - _import_structure["repaint"] = ["RePaintPipeline"] - _import_structure["score_sde_ve"] = ["ScoreSdeVePipeline"] - _import_structure["stochastic_karras_ve"] = ["KarrasVePipeline"] + _import_structure["deprecated"].extend([ + "PNDMPipeline", + "LDMPipeline", + "RePaintPipeline", + "ScoreSdeVePipeline", + "KarrasVePipeline", + ]) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() @@ -62,7 +65,7 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) else: - _import_structure["audio_diffusion"] = ["AudioDiffusionPipeline", "Mel"] + _import_structure["deprecated"].extend(["AudioDiffusionPipeline", "Mel"]) try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() @@ -71,10 +74,21 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: - _import_structure["alt_diffusion"] = [ - "AltDiffusionImg2ImgPipeline", + _import_structure["deprecated"].extend([ + "VQDiffusionPipeline", "AltDiffusionPipeline", - ] + "AltDiffusionImg2ImgPipeline", + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + "SpectrogramDiffusionPipeline", + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ]) _import_structure["animatediff"] = ["AnimateDiffPipeline"] _import_structure["audioldm"] = ["AudioLDMPipeline"] _import_structure["audioldm2"] = [ @@ -146,7 +160,6 @@ _import_structure["stable_diffusion"].extend( [ "CLIPImageProjection", - "CycleDiffusionPipeline", "StableDiffusionAttendAndExcitePipeline", "StableDiffusionDepth2ImgPipeline", "StableDiffusionDiffEditPipeline", @@ -156,15 +169,11 @@ "StableDiffusionImageVariationPipeline", "StableDiffusionImg2ImgPipeline", "StableDiffusionInpaintPipeline", - "StableDiffusionInpaintPipelineLegacy", "StableDiffusionInstructPix2PixPipeline", "StableDiffusionLatentUpscalePipeline", "StableDiffusionLDM3DPipeline", - "StableDiffusionModelEditingPipeline", "StableDiffusionPanoramaPipeline", - "StableDiffusionParadigmsPipeline", "StableDiffusionPipeline", - "StableDiffusionPix2PixZeroPipeline", "StableDiffusionSAGPipeline", "StableDiffusionUpscalePipeline", "StableUnCLIPImg2ImgPipeline", @@ -198,13 +207,6 @@ "UniDiffuserPipeline", "UniDiffuserTextDecoder", ] - _import_structure["versatile_diffusion"] = [ - "VersatileDiffusionDualGuidedPipeline", - "VersatileDiffusionImageVariationPipeline", - "VersatileDiffusionPipeline", - "VersatileDiffusionTextToImagePipeline", - ] - _import_structure["vq_diffusion"] = ["VQDiffusionPipeline"] _import_structure["wuerstchen"] = [ "WuerstchenCombinedPipeline", "WuerstchenDecoderPipeline", @@ -231,7 +233,6 @@ [ "OnnxStableDiffusionImg2ImgPipeline", "OnnxStableDiffusionInpaintPipeline", - "OnnxStableDiffusionInpaintPipelineLegacy", "OnnxStableDiffusionPipeline", "OnnxStableDiffusionUpscalePipeline", "StableDiffusionOnnxPipeline", @@ -279,18 +280,6 @@ "FlaxStableDiffusionXLPipeline", ] ) -try: - if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ..utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 - - _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) -else: - _import_structure["spectrogram_diffusion"] = [ - "MidiProcessor", - "SpectrogramDiffusionPipeline", - ] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: @@ -309,18 +298,19 @@ from .dance_diffusion import DanceDiffusionPipeline from .ddim import DDIMPipeline from .ddpm import DDPMPipeline + from .deprecated import ( + KarrasVePipeline, + PNDMPipeline, + RePaintPipeline, + ScoreSdeVePipeline, + ) from .dit import DiTPipeline from .latent_diffusion import LDMSuperResolutionPipeline - from .latent_diffusion_uncond import LDMPipeline from .pipeline_utils import ( AudioPipelineOutput, DiffusionPipeline, ImagePipelineOutput, ) - from .pndm import PNDMPipeline - from .repaint import RePaintPipeline - from .score_sde_ve import ScoreSdeVePipeline - from .stochastic_karras_ve import KarrasVePipeline try: if not (is_torch_available() and is_librosa_available()): @@ -328,7 +318,7 @@ except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_librosa_objects import * else: - from .audio_diffusion import AudioDiffusionPipeline, Mel + from .deprecated import AudioDiffusionPipeline, Mel try: if not (is_torch_available() and is_transformers_available()): @@ -336,7 +326,6 @@ except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_transformers_objects import * else: - from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline from .animatediff import AnimateDiffPipeline from .audioldm import AudioLDMPipeline from .audioldm2 import ( @@ -366,6 +355,20 @@ IFPipeline, IFSuperResolutionPipeline, ) + from .deprecated import ( + AltDiffusionImg2ImgPipeline, + AltDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionModelEditingPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPix2PixZeroPipeline, + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + VQDiffusionPipeline, + ) from .kandinsky import ( KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, @@ -403,7 +406,6 @@ from .shap_e import ShapEImg2ImgPipeline, ShapEPipeline from .stable_diffusion import ( CLIPImageProjection, - CycleDiffusionPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionDepth2ImgPipeline, StableDiffusionDiffEditPipeline, @@ -412,15 +414,11 @@ StableDiffusionImageVariationPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, - StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPix2PixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDM3DPipeline, - StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, - StableDiffusionParadigmsPipeline, StableDiffusionPipeline, - StableDiffusionPix2PixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImg2ImgPipeline, @@ -451,13 +449,6 @@ UniDiffuserPipeline, UniDiffuserTextDecoder, ) - from .versatile_diffusion import ( - VersatileDiffusionDualGuidedPipeline, - VersatileDiffusionImageVariationPipeline, - VersatileDiffusionPipeline, - VersatileDiffusionTextToImagePipeline, - ) - from .vq_diffusion import VQDiffusionPipeline from .wuerstchen import ( WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, @@ -482,7 +473,6 @@ from .stable_diffusion import ( OnnxStableDiffusionImg2ImgPipeline, OnnxStableDiffusionInpaintPipeline, - OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, @@ -527,7 +517,7 @@ from ..utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: - from .spectrogram_diffusion import ( + from .deprecated import ( MidiProcessor, SpectrogramDiffusionPipeline, ) diff --git a/src/diffusers/pipelines/deprecated/__init__.py b/src/diffusers/pipelines/deprecated/__init__.py new file mode 100644 index 000000000000..672c3d55db02 --- /dev/null +++ b/src/diffusers/pipelines/deprecated/__init__.py @@ -0,0 +1,152 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_librosa_available, + is_note_seq_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_pt_objects + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["repaint"] = ["RePaintPipeline"] + _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] + _import_structure["score_sde_ve"] = ["ScoreSdeVePipeline"] + _import_structure["stochastic_karras_ve"] = ["KarrasVePipeline"] + _import_structure["pndm"] = ["PNDMPipeline"] + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["alt_diffusion"] = [ + "AltDiffusionImg2ImgPipeline", + "AltDiffusionPipeline", + "AltDiffusionPipelineOutput", + ] + _import_structure["versatile_diffusion"] = [ + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ] + _import_structure["vq_diffusion"] = ["VQDiffusionPipeline"] + _import_structure["stable_diffusion_variants"] = [ + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + ] + +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_librosa_objects # noqa F403 + _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) + +else: + _import_structure["audio_diffusion"] = ["AudioDiffusionPipeline", "Mel"] + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) + +else: + _import_structure["spectrogram_diffusion"] = ["SpectrogramDiffusionPipeline", "MidiProcessor"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_pt_objects import * + + else: + from .latent_diffusion_uncond import LDMPipeline + from .pndm import PNDMPipeline + from .repaint import RePaintPipeline + from .score_sde_ve import ScoreSdeVePipeline + from .stochastic_karras_ve import KarrasVePipeline + + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AltDiffusionPipelineOutput + from .audio_diffusion import AudioDiffusionPipeline, Mel + from .spectrogram_diffusion import SpectrogramDiffusionPipeline + from .stable_diffusion_variants import ( + CycleDiffusionPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionModelEditingPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPix2PixZero, + ) + from .stochastic_karras_ve import KarrasVePipeline + from .versatile_diffusion import ( + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + ) + from .vq_diffusion import VQDiffusionPipeline + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_librosa_objects import * + else: + from .audio_diffusion import AudioDiffusionPipeline, Mel + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + else: + from .spectrogram_diffusion import ( + MidiProcessor, + SpectrogramDiffusionPipeline, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/pipelines/alt_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py similarity index 91% rename from src/diffusers/pipelines/alt_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py index 05c86f0a160e..71fa15b3feff 100644 --- a/src/diffusers/pipelines/alt_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects + from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -32,7 +32,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .modeling_roberta_series import RobertaSeriesModelWithTransformation diff --git a/src/diffusers/pipelines/alt_diffusion/modeling_roberta_series.py b/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py similarity index 100% rename from src/diffusers/pipelines/alt_diffusion/modeling_roberta_series.py rename to src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py similarity index 98% rename from src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py rename to src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py index 2121e9b81509..a0a670cd0e6f 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py @@ -19,13 +19,13 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer -from ...configuration_utils import FrozenDict -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, logging, @@ -33,9 +33,9 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_output import AltDiffusionPipelineOutput diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py similarity index 98% rename from src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py rename to src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py index 401e6aef82b1..4963e504d2a5 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -21,13 +21,13 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer -from ...configuration_utils import FrozenDict -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, @@ -36,9 +36,9 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_output import AltDiffusionPipelineOutput diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_output.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py similarity index 97% rename from src/diffusers/pipelines/alt_diffusion/pipeline_output.py rename to src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py index 997e187af6c1..dd174ae3c21f 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_output.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py @@ -4,7 +4,7 @@ import numpy as np import PIL.Image -from ...utils import ( +from ....utils import ( BaseOutput, ) diff --git a/src/diffusers/pipelines/audio_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py similarity index 88% rename from src/diffusers/pipelines/audio_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py index d41c166a1ccb..3127951863a7 100644 --- a/src/diffusers/pipelines/audio_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = { diff --git a/src/diffusers/pipelines/audio_diffusion/mel.py b/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py similarity index 97% rename from src/diffusers/pipelines/audio_diffusion/mel.py rename to src/diffusers/pipelines/deprecated/audio_diffusion/mel.py index 38a11cdaab7d..0e33825787bb 100644 --- a/src/diffusers/pipelines/audio_diffusion/mel.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py @@ -15,8 +15,8 @@ import numpy as np # noqa: E402 -from ...configuration_utils import ConfigMixin, register_to_config -from ...schedulers.scheduling_utils import SchedulerMixin +from ....configuration_utils import ConfigMixin, register_to_config +from ....schedulers.scheduling_utils import SchedulerMixin try: diff --git a/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py similarity index 98% rename from src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py rename to src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py index 6c4ae88b228d..731d22f3def8 100644 --- a/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -20,10 +20,10 @@ import torch from PIL import Image -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import DDIMScheduler, DDPMScheduler -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import DDIMScheduler, DDPMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel diff --git a/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py similarity index 87% rename from src/diffusers/pipelines/latent_diffusion_uncond/__init__.py rename to src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py index 8bb291f1b4fd..214f5bbca969 100644 --- a/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py +++ b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_latent_diffusion_uncond": ["LDMPipeline"]} diff --git a/src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py similarity index 96% rename from src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py rename to src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py index ffcc8129d19f..4e14d1708ccf 100644 --- a/src/diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +++ b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py @@ -17,10 +17,10 @@ import torch -from ...models import UNet2DModel, VQModel -from ...schedulers import DDIMScheduler -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....models import UNet2DModel, VQModel +from ....schedulers import DDIMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class LDMPipeline(DiffusionPipeline): diff --git a/src/diffusers/pipelines/pndm/__init__.py b/src/diffusers/pipelines/deprecated/pndm/__init__.py similarity index 86% rename from src/diffusers/pipelines/pndm/__init__.py rename to src/diffusers/pipelines/deprecated/pndm/__init__.py index d904abe76800..5e3bdba74079 100644 --- a/src/diffusers/pipelines/pndm/__init__.py +++ b/src/diffusers/pipelines/deprecated/pndm/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_pndm": ["PNDMPipeline"]} diff --git a/src/diffusers/pipelines/pndm/pipeline_pndm.py b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py similarity index 95% rename from src/diffusers/pipelines/pndm/pipeline_pndm.py rename to src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py index 78690997223a..c988e8292987 100644 --- a/src/diffusers/pipelines/pndm/pipeline_pndm.py +++ b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py @@ -17,10 +17,10 @@ import torch -from ...models import UNet2DModel -from ...schedulers import PNDMScheduler -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....models import UNet2DModel +from ....schedulers import PNDMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class PNDMPipeline(DiffusionPipeline): diff --git a/src/diffusers/pipelines/repaint/__init__.py b/src/diffusers/pipelines/deprecated/repaint/__init__.py similarity index 86% rename from src/diffusers/pipelines/repaint/__init__.py rename to src/diffusers/pipelines/deprecated/repaint/__init__.py index b1b42f7a115e..2c6b04af52d4 100644 --- a/src/diffusers/pipelines/repaint/__init__.py +++ b/src/diffusers/pipelines/deprecated/repaint/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_repaint": ["RePaintPipeline"]} diff --git a/src/diffusers/pipelines/repaint/pipeline_repaint.py b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py similarity index 97% rename from src/diffusers/pipelines/repaint/pipeline_repaint.py rename to src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py index 1bbd3d1d03d4..eeea28d4d06f 100644 --- a/src/diffusers/pipelines/repaint/pipeline_repaint.py +++ b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py @@ -19,11 +19,11 @@ import PIL.Image import torch -from ...models import UNet2DModel -from ...schedulers import RePaintScheduler -from ...utils import PIL_INTERPOLATION, deprecate, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....models import UNet2DModel +from ....schedulers import RePaintScheduler +from ....utils import PIL_INTERPOLATION, deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/score_sde_ve/__init__.py b/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py similarity index 87% rename from src/diffusers/pipelines/score_sde_ve/__init__.py rename to src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py index 0001394ded5c..87c167c3dbd2 100644 --- a/src/diffusers/pipelines/score_sde_ve/__init__.py +++ b/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]} diff --git a/src/diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py similarity index 95% rename from src/diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py rename to src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py index 11d1af710355..b9b3eb08f845 100644 --- a/src/diffusers/pipelines/score_sde_ve/pipeline_score_sde_ve.py +++ b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py @@ -16,10 +16,10 @@ import torch -from ...models import UNet2DModel -from ...schedulers import ScoreSdeVeScheduler -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....models import UNet2DModel +from ....schedulers import ScoreSdeVeScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class ScoreSdeVePipeline(DiffusionPipeline): diff --git a/src/diffusers/pipelines/spectrogram_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py similarity index 85% rename from src/diffusers/pipelines/spectrogram_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py index 2444191368d4..150954baa0eb 100644 --- a/src/diffusers/pipelines/spectrogram_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py @@ -1,7 +1,7 @@ # flake8: noqa from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT -from ...utils import ( +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, _LazyModule, is_note_seq_available, OptionalDependencyNotAvailable, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -32,7 +32,7 @@ if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_transformers_and_torch_and_note_seq_objects + from ....utils import dummy_transformers_and_torch_and_note_seq_objects _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) else: @@ -45,7 +45,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_spectrogram_diffusion import SpectrogramDiffusionPipeline from .pipeline_spectrogram_diffusion import SpectrogramContEncoder @@ -56,7 +56,7 @@ if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * + from ....utils.dummy_transformers_and_torch_and_note_seq_objects import * else: from .midi_utils import MidiProcessor diff --git a/src/diffusers/pipelines/spectrogram_diffusion/continuous_encoder.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py similarity index 96% rename from src/diffusers/pipelines/spectrogram_diffusion/continuous_encoder.py rename to src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py index 556136d4023d..4d4582924144 100644 --- a/src/diffusers/pipelines/spectrogram_diffusion/continuous_encoder.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py @@ -22,8 +22,8 @@ T5LayerNorm, ) -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): diff --git a/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py similarity index 99% rename from src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py rename to src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py index 08d0878db588..a91233edfe30 100644 --- a/src/diffusers/pipelines/spectrogram_diffusion/midi_utils.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py @@ -22,7 +22,7 @@ import torch import torch.nn.functional as F -from ...utils import is_note_seq_available +from ....utils import is_note_seq_available from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH diff --git a/src/diffusers/pipelines/spectrogram_diffusion/notes_encoder.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py similarity index 96% rename from src/diffusers/pipelines/spectrogram_diffusion/notes_encoder.py rename to src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py index 94eaa176f3e5..f2a1ca24f5ff 100644 --- a/src/diffusers/pipelines/spectrogram_diffusion/notes_encoder.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py @@ -18,8 +18,8 @@ from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): diff --git a/src/diffusers/pipelines/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py similarity index 97% rename from src/diffusers/pipelines/spectrogram_diffusion/pipeline_spectrogram_diffusion.py rename to src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py index 88725af452c2..b803d921a388 100644 --- a/src/diffusers/pipelines/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -19,16 +19,16 @@ import numpy as np import torch -from ...models import T5FilmDecoder -from ...schedulers import DDPMScheduler -from ...utils import is_onnx_available, logging -from ...utils.torch_utils import randn_tensor +from ....models import T5FilmDecoder +from ....schedulers import DDPMScheduler +from ....utils import is_onnx_available, logging +from ....utils.torch_utils import randn_tensor if is_onnx_available(): - from ..onnx_utils import OnnxRuntimeModel + from ...onnx_utils import OnnxRuntimeModel -from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline +from ...pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continuous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py new file mode 100644 index 000000000000..36cf1a33ce6a --- /dev/null +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py @@ -0,0 +1,55 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] + _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] + + _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] + _import_structure["pipeline_stable_diffusion_pix2pix_zero"] = ["StableDiffusionPix2PixZeroPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_cycle_diffusion import CycleDiffusionPipeline + from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy + from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline + from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline + from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py index e5c2c78720d5..7bb709bae6ac 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -21,17 +21,17 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...configuration_utils import FrozenDict -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import DDIMScheduler -from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline -from .pipeline_output import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler +from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py index 40abc477e7c0..0aa5e68bfcb4 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -6,12 +6,12 @@ import torch from transformers import CLIPImageProcessor, CLIPTokenizer -from ...configuration_utils import FrozenDict -from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from ...utils import deprecate, logging -from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput +from ....configuration_utils import FrozenDict +from ....schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ....utils import deprecate, logging +from ...onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py index 15e6f60569a3..4daa1c07f0c6 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -21,17 +21,17 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...configuration_utils import FrozenDict -from ...image_processor import VaeImageProcessor -from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker +from ....configuration_utils import FrozenDict +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py index c6364891e445..b5ec477d34d6 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_model_editing.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -18,17 +18,17 @@ import torch from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import PNDMScheduler -from ...schedulers.scheduling_utils import SchedulerMixin -from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker +from ....image_processor import VaeImageProcessor +from ....loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import PNDMScheduler +from ....schedulers.scheduling_utils import SchedulerMixin +from ....utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_paradigms.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_paradigms.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py index f0368b4ca305..3c9d744c6dfa 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_paradigms.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -18,12 +18,12 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, logging, @@ -31,10 +31,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index df9849ead723..f8d0603a10c3 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -28,14 +28,14 @@ CLIPTokenizer, ) -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention_processor import Attention -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler -from ...schedulers.scheduling_ddim_inverse import DDIMInverseScheduler -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention_processor import Attention +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler +from ....schedulers.scheduling_ddim_inverse import DDIMInverseScheduler +from ....utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, BaseOutput, @@ -45,10 +45,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/stochastic_karras_ve/__init__.py b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py similarity index 87% rename from src/diffusers/pipelines/stochastic_karras_ve/__init__.py rename to src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py index 539e920e6dec..15c9a8c27f98 100644 --- a/src/diffusers/pipelines/stochastic_karras_ve/__init__.py +++ b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_stochastic_karras_ve": ["KarrasVePipeline"]} diff --git a/src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py similarity index 96% rename from src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py rename to src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py index d850f5a73351..55ca6186626d 100644 --- a/src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +++ b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py @@ -16,10 +16,10 @@ import torch -from ...models import UNet2DModel -from ...schedulers import KarrasVeScheduler -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....models import UNet2DModel +from ....schedulers import KarrasVeScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class KarrasVePipeline(DiffusionPipeline): diff --git a/src/diffusers/pipelines/versatile_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py similarity index 94% rename from src/diffusers/pipelines/versatile_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py index 6eafd5125e32..8ea6ef6e2f65 100644 --- a/src/diffusers/pipelines/versatile_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, @@ -45,7 +45,7 @@ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, diff --git a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py similarity index 99% rename from src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py rename to src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py index 8ac63636df86..112aa42323f9 100644 --- a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py @@ -7,10 +7,10 @@ from diffusers.utils import deprecate -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin -from ...models.activations import get_activation -from ...models.attention_processor import ( +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin +from ....models.activations import get_activation +from ....models.attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, @@ -19,8 +19,8 @@ AttnAddedKVProcessor2_0, AttnProcessor, ) -from ...models.dual_transformer_2d import DualTransformer2DModel -from ...models.embeddings import ( +from ....models.dual_transformer_2d import DualTransformer2DModel +from ....models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, @@ -31,10 +31,10 @@ TimestepEmbedding, Timesteps, ) -from ...models.transformer_2d import Transformer2DModel -from ...models.unet_2d_condition import UNet2DConditionOutput -from ...utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers -from ...utils.torch_utils import apply_freeu +from ....models.transformer_2d import Transformer2DModel +from ....models.unet_2d_condition import UNet2DConditionOutput +from ....utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import apply_freeu logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py similarity index 99% rename from src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py rename to src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py index 68c720ab2ad0..4455d20df213 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -5,10 +5,10 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import logging -from ..pipeline_utils import DiffusionPipeline +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import logging +from ...pipeline_utils import DiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py similarity index 98% rename from src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py rename to src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py index 8f8bf260ca56..168e6a44a5c9 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -26,12 +26,12 @@ CLIPVisionModelWithProjection, ) -from ...image_processor import VaeImageProcessor -from ...models import AutoencoderKL, DualTransformer2DModel, Transformer2DModel, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import deprecate, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, DualTransformer2DModel, Transformer2DModel, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py similarity index 98% rename from src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py rename to src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index bcad6f93ef96..a2111283a6dd 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -21,12 +21,12 @@ import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection -from ...image_processor import VaeImageProcessor -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import deprecate, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py similarity index 98% rename from src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py rename to src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py index d8f947e64af7..de6ab3891214 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -19,12 +19,12 @@ import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import deprecate, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel diff --git a/src/diffusers/pipelines/vq_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py similarity index 90% rename from src/diffusers/pipelines/vq_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py index c2a22e76ae45..070903377c71 100644 --- a/src/diffusers/pipelines/vq_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -16,7 +16,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline, ) @@ -36,7 +36,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline, ) diff --git a/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py similarity index 98% rename from src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py rename to src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py index 1abe50a9b6b6..d7c2945b463c 100644 --- a/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py +++ b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py @@ -17,11 +17,11 @@ import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin, Transformer2DModel, VQModel -from ...schedulers import VQDiffusionScheduler -from ...utils import logging -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin, Transformer2DModel, VQModel +from ....schedulers import VQDiffusionScheduler +from ....utils import logging +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/stable_diffusion/__init__.py b/src/diffusers/pipelines/stable_diffusion/__init__.py index d81831082e2f..bb4ea55a24a1 100644 --- a/src/diffusers/pipelines/stable_diffusion/__init__.py +++ b/src/diffusers/pipelines/stable_diffusion/__init__.py @@ -133,8 +133,10 @@ from ...utils.dummy_torch_and_transformers_objects import * else: + from ..deprecated.stable_diffusion_variants.pipeline_stable_diffusion_inpaint_legacy import ( + StableDiffusionInpaintPipelineLegacy, + ) from .clip_image_project_model import CLIPImageProjection - from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import ( StableDiffusionPipeline, StableDiffusionPipelineOutput, @@ -149,9 +151,6 @@ ) from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline - from .pipeline_stable_diffusion_inpaint_legacy import ( - StableDiffusionInpaintPipelineLegacy, - ) from .pipeline_stable_diffusion_instruct_pix2pix import ( StableDiffusionInstructPix2PixPipeline, ) @@ -159,13 +158,7 @@ StableDiffusionLatentUpscalePipeline, ) from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline - from .pipeline_stable_diffusion_model_editing import ( - StableDiffusionModelEditingPipeline, - ) from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline - from .pipeline_stable_diffusion_paradigms import ( - StableDiffusionParadigmsPipeline, - ) from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline @@ -199,9 +192,6 @@ StableDiffusionDepth2ImgPipeline, ) from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline - from .pipeline_stable_diffusion_pix2pix_zero import ( - StableDiffusionPix2PixZeroPipeline, - ) try: if not ( @@ -234,9 +224,6 @@ from .pipeline_onnx_stable_diffusion_inpaint import ( OnnxStableDiffusionInpaintPipeline, ) - from .pipeline_onnx_stable_diffusion_inpaint_legacy import ( - OnnxStableDiffusionInpaintPipelineLegacy, - ) from .pipeline_onnx_stable_diffusion_upscale import ( OnnxStableDiffusionUpscalePipeline, ) From f11217c02dbc095bd9cd4a08e50cb9b0a50fe277 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 14 Dec 2023 07:58:15 +0000 Subject: [PATCH 02/17] make style --- src/diffusers/pipelines/__init__.py | 50 ++++++++++--------- .../pipelines/deprecated/__init__.py | 9 ++-- 2 files changed, 32 insertions(+), 27 deletions(-) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 64e7d2531c00..304ddec9245c 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -20,10 +20,10 @@ _import_structure = { "controlnet": [], "controlnet_xs": [], + "deprecated": [], "latent_diffusion": [], "stable_diffusion": [], "stable_diffusion_xl": [], - "deprecated": [], } try: @@ -50,13 +50,15 @@ "DiffusionPipeline", "ImagePipelineOutput", ] - _import_structure["deprecated"].extend([ - "PNDMPipeline", - "LDMPipeline", - "RePaintPipeline", - "ScoreSdeVePipeline", - "KarrasVePipeline", - ]) + _import_structure["deprecated"].extend( + [ + "PNDMPipeline", + "LDMPipeline", + "RePaintPipeline", + "ScoreSdeVePipeline", + "KarrasVePipeline", + ] + ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() @@ -74,21 +76,23 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: - _import_structure["deprecated"].extend([ - "VQDiffusionPipeline", - "AltDiffusionPipeline", - "AltDiffusionImg2ImgPipeline", - "CycleDiffusionPipeline", - "StableDiffusionInpaintPipelineLegacy", - "StableDiffusionPix2PixZeroPipeline", - "StableDiffusionParadigmsPipeline", - "StableDiffusionModelEditingPipeline", - "SpectrogramDiffusionPipeline", - "VersatileDiffusionDualGuidedPipeline", - "VersatileDiffusionImageVariationPipeline", - "VersatileDiffusionPipeline", - "VersatileDiffusionTextToImagePipeline", - ]) + _import_structure["deprecated"].extend( + [ + "VQDiffusionPipeline", + "AltDiffusionPipeline", + "AltDiffusionImg2ImgPipeline", + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + "SpectrogramDiffusionPipeline", + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ] + ) _import_structure["animatediff"] = ["AnimateDiffPipeline"] _import_structure["audioldm"] = ["AudioLDMPipeline"] _import_structure["audioldm2"] = [ diff --git a/src/diffusers/pipelines/deprecated/__init__.py b/src/diffusers/pipelines/deprecated/__init__.py index 672c3d55db02..2a86421dee8c 100644 --- a/src/diffusers/pipelines/deprecated/__init__.py +++ b/src/diffusers/pipelines/deprecated/__init__.py @@ -23,11 +23,11 @@ _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: - _import_structure["repaint"] = ["RePaintPipeline"] _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] + _import_structure["pndm"] = ["PNDMPipeline"] + _import_structure["repaint"] = ["RePaintPipeline"] _import_structure["score_sde_ve"] = ["ScoreSdeVePipeline"] _import_structure["stochastic_karras_ve"] = ["KarrasVePipeline"] - _import_structure["pndm"] = ["PNDMPipeline"] try: if not (is_transformers_available() and is_torch_available()): @@ -62,6 +62,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_librosa_objects # noqa F403 + _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) else: @@ -72,14 +73,14 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) else: - _import_structure["spectrogram_diffusion"] = ["SpectrogramDiffusionPipeline", "MidiProcessor"] + _import_structure["spectrogram_diffusion"] = ["MidiProcessor", "SpectrogramDiffusionPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: - try: if not is_torch_available(): raise OptionalDependencyNotAvailable() From 8edd9f27f98030472c84cc60f4ddb2cbdd034917 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 14 Dec 2023 08:05:02 +0000 Subject: [PATCH 03/17] update --- src/diffusers/pipelines/__init__.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 304ddec9245c..38c93d84b525 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -68,6 +68,22 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) else: _import_structure["deprecated"].extend(["AudioDiffusionPipeline", "Mel"]) + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) +else: + _import_structure["deprecated"].extend( + [ + "MidiProcessor", + "SpectrogramDiffusionPipeline", + ] + ) + try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() @@ -86,7 +102,6 @@ "StableDiffusionPix2PixZeroPipeline", "StableDiffusionParadigmsPipeline", "StableDiffusionModelEditingPipeline", - "SpectrogramDiffusionPipeline", "VersatileDiffusionDualGuidedPipeline", "VersatileDiffusionImageVariationPipeline", "VersatileDiffusionPipeline", From 237bf591f28ce3f3e79b64e3c1da94bf4230a86a Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 14 Dec 2023 08:33:07 +0000 Subject: [PATCH 04/17] add deprecation message --- .../deprecated/alt_diffusion/pipeline_alt_diffusion.py | 2 +- .../deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py | 1 + .../deprecated/audio_diffusion/pipeline_audio_diffusion.py | 3 ++- .../pipeline_latent_diffusion_uncond.py | 2 ++ src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py | 2 ++ src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py | 1 + .../pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py | 2 ++ .../spectrogram_diffusion/pipeline_spectrogram_diffusion.py | 3 ++- .../stable_diffusion_variants/pipeline_cycle_diffusion.py | 1 + .../pipeline_onnx_stable_diffusion_inpaint_legacy.py | 1 + .../pipeline_stable_diffusion_inpaint_legacy.py | 1 + .../pipeline_stable_diffusion_model_editing.py | 1 + .../pipeline_stable_diffusion_paradigms.py | 1 + .../pipeline_stable_diffusion_pix2pix_zero.py | 1 + .../stochastic_karras_ve/pipeline_stochastic_karras_ve.py | 2 ++ .../versatile_diffusion/pipeline_versatile_diffusion.py | 3 ++- .../pipeline_versatile_diffusion_dual_guided.py | 1 + .../pipeline_versatile_diffusion_image_variation.py | 1 + .../pipeline_versatile_diffusion_text_to_image.py | 1 + .../pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py | 3 ++- 20 files changed, 28 insertions(+), 5 deletions(-) diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py index a0a670cd0e6f..17621f251d8c 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py @@ -117,7 +117,6 @@ def retrieve_timesteps( timesteps = scheduler.timesteps return timesteps, num_inference_steps - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline with Stable->Alt, CLIPTextModel->RobertaSeriesModelWithTransformation, CLIPTokenizer->XLMRobertaTokenizer, AltDiffusionSafetyChecker->StableDiffusionSafetyChecker class AltDiffusionPipeline( DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin @@ -154,6 +153,7 @@ class AltDiffusionPipeline( feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("AltDiffusionPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py index 4963e504d2a5..ea5e71e4424f 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -194,6 +194,7 @@ class AltDiffusionImg2ImgPipeline( feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("AltDiffusionImg2ImgPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] diff --git a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py index 731d22f3def8..e77c8f25c248 100644 --- a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -22,6 +22,7 @@ from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import DDIMScheduler, DDPMScheduler +from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel @@ -45,7 +46,7 @@ class AudioDiffusionPipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`] or [`DDPMScheduler`]. """ - + deprecate("AudioDiffusionPipeline", "1.0.0", "") _optional_components = ["vqvae"] def __init__( diff --git a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py index 4e14d1708ccf..9f4719941f48 100644 --- a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +++ b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py @@ -19,6 +19,7 @@ from ....models import UNet2DModel, VQModel from ....schedulers import DDIMScheduler +from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -38,6 +39,7 @@ class LDMPipeline(DiffusionPipeline): scheduler ([`SchedulerMixin`]): [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. """ + deprecate("LDMPipeline", "1.0.0", "") def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): super().__init__() diff --git a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py index c988e8292987..2f8e149de8b2 100644 --- a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +++ b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py @@ -19,6 +19,7 @@ from ....models import UNet2DModel from ....schedulers import PNDMScheduler +from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -36,6 +37,7 @@ class PNDMPipeline(DiffusionPipeline): scheduler ([`PNDMScheduler`]): A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image. """ + deprecate("PNDMPipeline", "1.0.0", "") unet: UNet2DModel scheduler: PNDMScheduler diff --git a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py index eeea28d4d06f..810088c94c1a 100644 --- a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +++ b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py @@ -86,6 +86,7 @@ class RePaintPipeline(DiffusionPipeline): scheduler ([`RePaintScheduler`]): A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image. """ + deprecate("RePaintPipeline", "1.0.0", "") unet: UNet2DModel scheduler: RePaintScheduler diff --git a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py index b9b3eb08f845..5d60e434dd0f 100644 --- a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +++ b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py @@ -18,6 +18,7 @@ from ....models import UNet2DModel from ....schedulers import ScoreSdeVeScheduler +from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -35,6 +36,7 @@ class ScoreSdeVePipeline(DiffusionPipeline): scheduler ([`ScoreSdeVeScheduler`]): A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. """ + deprecate("ScoreSdeVePipeline", "1.0.0", "") unet: UNet2DModel scheduler: ScoreSdeVeScheduler diff --git a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py index b803d921a388..78f53212bcdc 100644 --- a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -21,7 +21,7 @@ from ....models import T5FilmDecoder from ....schedulers import DDPMScheduler -from ....utils import is_onnx_available, logging +from ....utils import deprecate, is_onnx_available, logging from ....utils.torch_utils import randn_tensor @@ -54,6 +54,7 @@ class SpectrogramDiffusionPipeline(DiffusionPipeline): A scheduler to be used in combination with `decoder` to denoise the encoded audio latents. melgan ([`OnnxRuntimeModel`]): """ + deprecate("SpectrogramDiffusionPipeline", "1.0.0", "") _optional_components = ["melgan"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py index 7bb709bae6ac..b883bbe08430 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -162,6 +162,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("CycleDiffusionPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py index 0aa5e68bfcb4..8192807ee80a 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -66,6 +66,7 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ + deprecate("OnnxStableDiffusionInpaintPipelineLegacy", "1.0.0", "") _optional_components = ["safety_checker", "feature_extractor"] _is_onnx = True diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py index 4daa1c07f0c6..e31d6b7ce0ce 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -115,6 +115,7 @@ class StableDiffusionInpaintPipelineLegacy( feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ + deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py index b5ec477d34d6..50e90b8b211a 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -66,6 +66,7 @@ class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoa with_augs ([`list`]): Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations. """ + deprecate("StableDiffusionModelEditingPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py index 3c9d744c6dfa..d5448215f3e8 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -96,6 +96,7 @@ class StableDiffusionParadigmsPipeline( feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("StableDiffusionParadigmsPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index f8d0603a10c3..0f656ab2f984 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -310,6 +310,7 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline): Whether the pipeline requires a safety checker. We recommend setting it to True if you're using the pipeline publicly. """ + deprecate("StableDiffusionPix2PixZeroPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = [ diff --git a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py index 55ca6186626d..d6c31aaf0f34 100644 --- a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +++ b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py @@ -18,6 +18,7 @@ from ....models import UNet2DModel from ....schedulers import KarrasVeScheduler +from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -32,6 +33,7 @@ class KarrasVePipeline(DiffusionPipeline): scheduler ([`KarrasVeScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image. """ + deprecate("KarrasVePipeline", "1.0.0", "") # add type hints for linting unet: UNet2DModel diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py index 4455d20df213..a7c274f995ca 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -7,7 +7,7 @@ from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers -from ....utils import logging +from ....utils import deprecate, logging from ...pipeline_utils import DiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline @@ -43,6 +43,7 @@ class VersatileDiffusionPipeline(DiffusionPipeline): feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("VersatileDiffusionPipeline", "1.0.0", "") tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py index 168e6a44a5c9..ca9d7065732a 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -58,6 +58,7 @@ class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ + deprecate("VersatileDiffusionDualGuidedPipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index a2111283a6dd..c4d8951a1500 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -52,6 +52,7 @@ class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ + deprecate("VersatileDiffusionImageVariationPipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py index de6ab3891214..a9712f7a1d2d 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -51,6 +51,7 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ + deprecate("VersatileDiffusionTextToImagePipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py index d7c2945b463c..f6650b0cce8a 100644 --- a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +++ b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py @@ -20,7 +20,7 @@ from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin, Transformer2DModel, VQModel from ....schedulers import VQDiffusionScheduler -from ....utils import logging +from ....utils import deprecate, logging from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -69,6 +69,7 @@ class VQDiffusionPipeline(DiffusionPipeline): scheduler ([`VQDiffusionScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ + deprecate("VQDiffusionPipeline", "1.0.0", "") vqvae: VQModel text_encoder: CLIPTextModel From 4a92f3412a074d302f57df69da204d817cf32ebc Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 14 Dec 2023 08:46:32 +0000 Subject: [PATCH 05/17] format --- .../deprecated/alt_diffusion/pipeline_alt_diffusion.py | 3 ++- .../deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py | 2 +- .../deprecated/audio_diffusion/pipeline_audio_diffusion.py | 1 + .../pipeline_latent_diffusion_uncond.py | 1 + src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py | 1 + src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py | 1 + .../pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py | 1 + .../spectrogram_diffusion/pipeline_spectrogram_diffusion.py | 1 + .../stable_diffusion_variants/pipeline_cycle_diffusion.py | 1 + .../pipeline_onnx_stable_diffusion_inpaint_legacy.py | 1 + .../pipeline_stable_diffusion_inpaint_legacy.py | 1 + .../pipeline_stable_diffusion_model_editing.py | 1 + .../pipeline_stable_diffusion_paradigms.py | 1 + .../pipeline_stable_diffusion_pix2pix_zero.py | 1 + .../stochastic_karras_ve/pipeline_stochastic_karras_ve.py | 1 + .../versatile_diffusion/pipeline_versatile_diffusion.py | 1 + .../pipeline_versatile_diffusion_dual_guided.py | 1 + .../pipeline_versatile_diffusion_image_variation.py | 1 + .../pipeline_versatile_diffusion_text_to_image.py | 1 + .../pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py | 1 + 20 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py index 17621f251d8c..304b87b2c1da 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py @@ -117,7 +117,7 @@ def retrieve_timesteps( timesteps = scheduler.timesteps return timesteps, num_inference_steps -# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline with Stable->Alt, CLIPTextModel->RobertaSeriesModelWithTransformation, CLIPTokenizer->XLMRobertaTokenizer, AltDiffusionSafetyChecker->StableDiffusionSafetyChecker + class AltDiffusionPipeline( DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin ): @@ -153,6 +153,7 @@ class AltDiffusionPipeline( feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("AltDiffusionPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py index ea5e71e4424f..263f72c86177 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -158,7 +158,6 @@ def retrieve_timesteps( return timesteps, num_inference_steps -# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline with Stable->Alt, CLIPTextModel->RobertaSeriesModelWithTransformation, CLIPTokenizer->XLMRobertaTokenizer, AltDiffusionSafetyChecker->StableDiffusionSafetyChecker class AltDiffusionImg2ImgPipeline( DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin ): @@ -194,6 +193,7 @@ class AltDiffusionImg2ImgPipeline( feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("AltDiffusionImg2ImgPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" diff --git a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py index e77c8f25c248..7bb288ea893c 100644 --- a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -46,6 +46,7 @@ class AudioDiffusionPipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`] or [`DDPMScheduler`]. """ + deprecate("AudioDiffusionPipeline", "1.0.0", "") _optional_components = ["vqvae"] diff --git a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py index 9f4719941f48..7abab3d43e58 100644 --- a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +++ b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py @@ -39,6 +39,7 @@ class LDMPipeline(DiffusionPipeline): scheduler ([`SchedulerMixin`]): [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. """ + deprecate("LDMPipeline", "1.0.0", "") def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): diff --git a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py index 2f8e149de8b2..1b103d6c476a 100644 --- a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +++ b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py @@ -37,6 +37,7 @@ class PNDMPipeline(DiffusionPipeline): scheduler ([`PNDMScheduler`]): A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image. """ + deprecate("PNDMPipeline", "1.0.0", "") unet: UNet2DModel diff --git a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py index 810088c94c1a..b87641a9c7a4 100644 --- a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +++ b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py @@ -86,6 +86,7 @@ class RePaintPipeline(DiffusionPipeline): scheduler ([`RePaintScheduler`]): A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image. """ + deprecate("RePaintPipeline", "1.0.0", "") unet: UNet2DModel diff --git a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py index 5d60e434dd0f..419b379b7b82 100644 --- a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +++ b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py @@ -36,6 +36,7 @@ class ScoreSdeVePipeline(DiffusionPipeline): scheduler ([`ScoreSdeVeScheduler`]): A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. """ + deprecate("ScoreSdeVePipeline", "1.0.0", "") unet: UNet2DModel diff --git a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py index 78f53212bcdc..46f272503bd2 100644 --- a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -54,6 +54,7 @@ class SpectrogramDiffusionPipeline(DiffusionPipeline): A scheduler to be used in combination with `decoder` to denoise the encoded audio latents. melgan ([`OnnxRuntimeModel`]): """ + deprecate("SpectrogramDiffusionPipeline", "1.0.0", "") _optional_components = ["melgan"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py index b883bbe08430..ebe8a6a59c7b 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -162,6 +162,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("CycleDiffusionPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py index 8192807ee80a..d7ed6b228e98 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -66,6 +66,7 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ + deprecate("OnnxStableDiffusionInpaintPipelineLegacy", "1.0.0", "") _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py index e31d6b7ce0ce..a9f6883e0b1a 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -115,6 +115,7 @@ class StableDiffusionInpaintPipelineLegacy( feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ + deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py index 50e90b8b211a..d80dd87c52c4 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -66,6 +66,7 @@ class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoa with_augs ([`list`]): Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations. """ + deprecate("StableDiffusionModelEditingPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py index d5448215f3e8..edc8a5efcca3 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -96,6 +96,7 @@ class StableDiffusionParadigmsPipeline( feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("StableDiffusionParadigmsPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index 0f656ab2f984..b6191890c0c7 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -310,6 +310,7 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline): Whether the pipeline requires a safety checker. We recommend setting it to True if you're using the pipeline publicly. """ + deprecate("StableDiffusionPix2PixZeroPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" diff --git a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py index d6c31aaf0f34..d59a2742fbd0 100644 --- a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +++ b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py @@ -33,6 +33,7 @@ class KarrasVePipeline(DiffusionPipeline): scheduler ([`KarrasVeScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image. """ + deprecate("KarrasVePipeline", "1.0.0", "") # add type hints for linting diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py index a7c274f995ca..727a607022a5 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -43,6 +43,7 @@ class VersatileDiffusionPipeline(DiffusionPipeline): feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ + deprecate("VersatileDiffusionPipeline", "1.0.0", "") tokenizer: CLIPTokenizer diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py index ca9d7065732a..c9dea29af0ec 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -58,6 +58,7 @@ class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ + deprecate("VersatileDiffusionDualGuidedPipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index c4d8951a1500..2cbd74a3a7a4 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -52,6 +52,7 @@ class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ + deprecate("VersatileDiffusionImageVariationPipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py index a9712f7a1d2d..8e1a0617f1b5 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -51,6 +51,7 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ + deprecate("VersatileDiffusionTextToImagePipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py index f6650b0cce8a..212b6796b39a 100644 --- a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +++ b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py @@ -69,6 +69,7 @@ class VQDiffusionPipeline(DiffusionPipeline): scheduler ([`VQDiffusionScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ + deprecate("VQDiffusionPipeline", "1.0.0", "") vqvae: VQModel From f13665e8f4d1fdff3de360a9778c2c0198a5a192 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 14 Dec 2023 08:52:26 +0000 Subject: [PATCH 06/17] remove tests for deprecated pipelines --- tests/pipelines/altdiffusion/__init__.py | 0 .../altdiffusion/test_alt_diffusion.py | 260 --------------- .../test_alt_diffusion_img2img.py | 309 ------------------ tests/pipelines/audio_diffusion/__init__.py | 0 .../audio_diffusion/test_audio_diffusion.py | 203 ------------ .../test_latent_diffusion_uncond.py | 116 ------- tests/pipelines/repaint/__init__.py | 0 tests/pipelines/repaint/test_repaint.py | 169 ---------- tests/pipelines/score_sde_ve/__init__.py | 0 .../score_sde_ve/test_score_sde_ve.py | 91 ------ .../spectrogram_diffusion/__init__.py | 0 .../test_spectrogram_diffusion.py | 246 -------------- .../pipelines/versatile_diffusion/__init__.py | 0 .../test_versatile_diffusion_dual_guided.py | 107 ------ ...est_versatile_diffusion_image_variation.py | 57 ---- .../test_versatile_diffusion_mega.py | 129 -------- .../test_versatile_diffusion_text_to_image.py | 87 ----- tests/pipelines/vq_diffusion/__init__.py | 0 .../vq_diffusion/test_vq_diffusion.py | 227 ------------- 19 files changed, 2001 deletions(-) delete mode 100644 tests/pipelines/altdiffusion/__init__.py delete mode 100644 tests/pipelines/altdiffusion/test_alt_diffusion.py delete mode 100644 tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py delete mode 100644 tests/pipelines/audio_diffusion/__init__.py delete mode 100644 tests/pipelines/audio_diffusion/test_audio_diffusion.py delete mode 100644 tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py delete mode 100644 tests/pipelines/repaint/__init__.py delete mode 100644 tests/pipelines/repaint/test_repaint.py delete mode 100644 tests/pipelines/score_sde_ve/__init__.py delete mode 100644 tests/pipelines/score_sde_ve/test_score_sde_ve.py delete mode 100644 tests/pipelines/spectrogram_diffusion/__init__.py delete mode 100644 tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py delete mode 100644 tests/pipelines/versatile_diffusion/__init__.py delete mode 100644 tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py delete mode 100644 tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py delete mode 100644 tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py delete mode 100644 tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py delete mode 100644 tests/pipelines/vq_diffusion/__init__.py delete mode 100644 tests/pipelines/vq_diffusion/test_vq_diffusion.py diff --git a/tests/pipelines/altdiffusion/__init__.py b/tests/pipelines/altdiffusion/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion.py b/tests/pipelines/altdiffusion/test_alt_diffusion.py deleted file mode 100644 index b4a2847bb84d..000000000000 --- a/tests/pipelines/altdiffusion/test_alt_diffusion.py +++ /dev/null @@ -1,260 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer - -from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNet2DConditionModel -from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( - RobertaSeriesConfig, - RobertaSeriesModelWithTransformation, -) -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device - -from ..pipeline_params import ( - TEXT_TO_IMAGE_BATCH_PARAMS, - TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, - TEXT_TO_IMAGE_IMAGE_PARAMS, - TEXT_TO_IMAGE_PARAMS, -) -from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin - - -enable_full_determinism() - - -class AltDiffusionPipelineFastTests( - PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase -): - pipeline_class = AltDiffusionPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = TEXT_TO_IMAGE_IMAGE_PARAMS - image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS - callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - - # TODO: address the non-deterministic text encoder (fails for save-load tests) - # torch.manual_seed(0) - # text_encoder_config = RobertaSeriesConfig( - # hidden_size=32, - # project_dim=32, - # intermediate_size=37, - # layer_norm_eps=1e-05, - # num_attention_heads=4, - # num_hidden_layers=5, - # vocab_size=5002, - # ) - # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) - - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - projection_dim=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=5002, - ) - text_encoder = CLIPTextModel(text_encoder_config) - - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - components = { - "unet": unet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - "image_encoder": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "prompt": "A painting of a squirrel eating a burger", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - } - return inputs - - def test_attention_slicing_forward_pass(self): - super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) - - def test_inference_batch_single_identical(self): - super().test_inference_batch_single_identical(expected_max_diff=3e-3) - - def test_alt_diffusion_ddim(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - - components = self.get_dummy_components() - torch.manual_seed(0) - text_encoder_config = RobertaSeriesConfig( - hidden_size=32, - project_dim=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - vocab_size=5002, - ) - # TODO: remove after fixing the non-deterministic text encoder - text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) - components["text_encoder"] = text_encoder - - alt_pipe = AltDiffusionPipeline(**components) - alt_pipe = alt_pipe.to(device) - alt_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - inputs["prompt"] = "A photo of an astronaut" - output = alt_pipe(**inputs) - image = output.images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] - ) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_alt_diffusion_pndm(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - - components = self.get_dummy_components() - components["scheduler"] = PNDMScheduler(skip_prk_steps=True) - torch.manual_seed(0) - text_encoder_config = RobertaSeriesConfig( - hidden_size=32, - project_dim=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - vocab_size=5002, - ) - # TODO: remove after fixing the non-deterministic text encoder - text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) - components["text_encoder"] = text_encoder - alt_pipe = AltDiffusionPipeline(**components) - alt_pipe = alt_pipe.to(device) - alt_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - output = alt_pipe(**inputs) - image = output.images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] - ) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - -@nightly -@require_torch_gpu -class AltDiffusionPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_alt_diffusion(self): - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", safety_checker=None) - alt_pipe = alt_pipe.to(torch_device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np") - - image = output.images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_alt_diffusion_fast_ddim(self): - scheduler = DDIMScheduler.from_pretrained("BAAI/AltDiffusion", subfolder="scheduler") - - alt_pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion", scheduler=scheduler, safety_checker=None) - alt_pipe = alt_pipe.to(torch_device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - - output = alt_pipe([prompt], generator=generator, num_inference_steps=2, output_type="numpy") - image = output.images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py deleted file mode 100644 index 3fd1a90172ca..000000000000 --- a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py +++ /dev/null @@ -1,309 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import random -import unittest - -import numpy as np -import torch -from transformers import XLMRobertaTokenizer - -from diffusers import ( - AltDiffusionImg2ImgPipeline, - AutoencoderKL, - PNDMScheduler, - UNet2DConditionModel, -) -from diffusers.image_processor import VaeImageProcessor -from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( - RobertaSeriesConfig, - RobertaSeriesModelWithTransformation, -) -from diffusers.utils import load_image -from diffusers.utils.testing_utils import ( - enable_full_determinism, - floats_tensor, - load_numpy, - nightly, - require_torch_gpu, - torch_device, -) - - -enable_full_determinism() - - -class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_cond_unet(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vae(self): - torch.manual_seed(0) - model = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - return model - - @property - def dummy_text_encoder(self): - torch.manual_seed(0) - config = RobertaSeriesConfig( - hidden_size=32, - project_dim=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=5006, - ) - return RobertaSeriesModelWithTransformation(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract - - def test_stable_diffusion_img2img_default_case(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - init_image = self.dummy_image.to(device) - init_image = init_image / 2 + 0.5 - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - image_encoder=None, - ) - alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=True) - alt_pipe = alt_pipe.to(device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - return_dict=False, - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img2img_fp16(self): - """Test that stable diffusion img2img works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - init_image = self.dummy_image.to(torch_device) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - image_encoder=None, - ) - alt_pipe.image_processor = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=False) - alt_pipe = alt_pipe.to(torch_device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - image = alt_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images - - assert image.shape == (1, 32, 32, 3) - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/img2img/sketch-mountains-input.jpg" - ) - # resize to resolution that is divisible by 8 but not 16 or 32 - init_image = init_image.resize((760, 504)) - - model_id = "BAAI/AltDiffusion" - pipe = AltDiffusionImg2ImgPipeline.from_pretrained( - model_id, - safety_checker=None, - ) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - prompt = "A fantasy landscape, trending on artstation" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=init_image, - strength=0.75, - guidance_scale=7.5, - generator=generator, - output_type="np", - ) - image = output.images[0] - - image_slice = image[255:258, 383:386, -1] - - assert image.shape == (504, 760, 3) - expected_slice = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - -@nightly -@require_torch_gpu -class AltDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_stable_diffusion_img2img_pipeline_default(self): - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/img2img/sketch-mountains-input.jpg" - ) - init_image = init_image.resize((768, 512)) - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" - ) - - model_id = "BAAI/AltDiffusion" - pipe = AltDiffusionImg2ImgPipeline.from_pretrained( - model_id, - safety_checker=None, - ) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - prompt = "A fantasy landscape, trending on artstation" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=init_image, - strength=0.75, - guidance_scale=7.5, - generator=generator, - output_type="np", - ) - image = output.images[0] - - assert image.shape == (512, 768, 3) - # img2img is flaky across GPUs even in fp32, so using MAE here - assert np.abs(expected_image - image).max() < 1e-2 diff --git a/tests/pipelines/audio_diffusion/__init__.py b/tests/pipelines/audio_diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/pipelines/audio_diffusion/test_audio_diffusion.py b/tests/pipelines/audio_diffusion/test_audio_diffusion.py deleted file mode 100644 index 2cf3e4a95609..000000000000 --- a/tests/pipelines/audio_diffusion/test_audio_diffusion.py +++ /dev/null @@ -1,203 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch - -from diffusers import ( - AudioDiffusionPipeline, - AutoencoderKL, - DDIMScheduler, - DDPMScheduler, - DiffusionPipeline, - Mel, - UNet2DConditionModel, - UNet2DModel, -) -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device - - -enable_full_determinism() - - -class PipelineFastTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - sample_size=(32, 64), - in_channels=1, - out_channels=1, - layers_per_block=2, - block_out_channels=(128, 128), - down_block_types=("AttnDownBlock2D", "DownBlock2D"), - up_block_types=("UpBlock2D", "AttnUpBlock2D"), - ) - return model - - @property - def dummy_unet_condition(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - sample_size=(64, 32), - in_channels=1, - out_channels=1, - layers_per_block=2, - block_out_channels=(128, 128), - down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), - up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), - cross_attention_dim=10, - ) - return model - - @property - def dummy_vqvae_and_unet(self): - torch.manual_seed(0) - vqvae = AutoencoderKL( - sample_size=(128, 64), - in_channels=1, - out_channels=1, - latent_channels=1, - layers_per_block=2, - block_out_channels=(128, 128), - down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), - up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), - ) - unet = UNet2DModel( - sample_size=(64, 32), - in_channels=1, - out_channels=1, - layers_per_block=2, - block_out_channels=(128, 128), - down_block_types=("AttnDownBlock2D", "DownBlock2D"), - up_block_types=("UpBlock2D", "AttnUpBlock2D"), - ) - return vqvae, unet - - @nightly - def test_audio_diffusion(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - mel = Mel( - x_res=self.dummy_unet.config.sample_size[1], - y_res=self.dummy_unet.config.sample_size[0], - ) - - scheduler = DDPMScheduler() - pipe = AudioDiffusionPipeline(vqvae=None, unet=self.dummy_unet, mel=mel, scheduler=scheduler) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=device).manual_seed(42) - output = pipe(generator=generator, steps=4) - audio = output.audios[0] - image = output.images[0] - - generator = torch.Generator(device=device).manual_seed(42) - output = pipe(generator=generator, steps=4, return_dict=False) - image_from_tuple = output[0][0] - - assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) - assert ( - image.height == self.dummy_unet.config.sample_size[0] - and image.width == self.dummy_unet.config.sample_size[1] - ) - image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] - image_from_tuple_slice = np.frombuffer(image_from_tuple.tobytes(), dtype="uint8")[:10] - expected_slice = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127]) - - assert np.abs(image_slice.flatten() - expected_slice).max() == 0 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0 - - mel = Mel( - x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], - y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], - ) - - scheduler = DDIMScheduler() - dummy_vqvae_and_unet = self.dummy_vqvae_and_unet - pipe = AudioDiffusionPipeline( - vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=mel, scheduler=scheduler - ) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - np.random.seed(0) - raw_audio = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,)) - generator = torch.Generator(device=device).manual_seed(42) - output = pipe(raw_audio=raw_audio, generator=generator, start_step=5, steps=10) - image = output.images[0] - - assert ( - image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] - and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] - ) - image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] - expected_slice = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121]) - - assert np.abs(image_slice.flatten() - expected_slice).max() == 0 - - dummy_unet_condition = self.dummy_unet_condition - pipe = AudioDiffusionPipeline( - vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_unet_condition, mel=mel, scheduler=scheduler - ) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - np.random.seed(0) - encoding = torch.rand((1, 1, 10)) - output = pipe(generator=generator, encoding=encoding) - image = output.images[0] - image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] - expected_slice = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111]) - - assert np.abs(image_slice.flatten() - expected_slice).max() == 0 - - -@nightly -@require_torch_gpu -class PipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_audio_diffusion(self): - device = torch_device - - pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256") - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=device).manual_seed(42) - output = pipe(generator=generator) - audio = output.audios[0] - image = output.images[0] - - assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) - assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] - image_slice = np.frombuffer(image.tobytes(), dtype="uint8")[:10] - expected_slice = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26]) - - assert np.abs(image_slice.flatten() - expected_slice).max() == 0 diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py deleted file mode 100644 index 4d284a494fba..000000000000 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel - -from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device - - -enable_full_determinism() - - -class LDMPipelineFastTests(unittest.TestCase): - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, - ) - return model - - @property - def dummy_text_encoder(self): - torch.manual_seed(0) - config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - return CLIPTextModel(config) - - def test_inference_uncond(self): - unet = self.dummy_uncond_unet - scheduler = DDIMScheduler() - vae = self.dummy_vq_model - - ldm = LDMPipeline(unet=unet, vqvae=vae, scheduler=scheduler) - ldm.to(torch_device) - ldm.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - image = ldm(generator=generator, num_inference_steps=2, output_type="numpy").images - - generator = torch.manual_seed(0) - image_from_tuple = ldm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - expected_slice = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172]) - tolerance = 1e-2 if torch_device != "mps" else 3e-2 - - assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance - - -@nightly -@require_torch -class LDMPipelineIntegrationTests(unittest.TestCase): - def test_inference_uncond(self): - ldm = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256") - ldm.to(torch_device) - ldm.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - image = ldm(generator=generator, num_inference_steps=5, output_type="numpy").images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 256, 256, 3) - expected_slice = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447]) - tolerance = 1e-2 if torch_device != "mps" else 3e-2 - - assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance diff --git a/tests/pipelines/repaint/__init__.py b/tests/pipelines/repaint/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/pipelines/repaint/test_repaint.py b/tests/pipelines/repaint/test_repaint.py deleted file mode 100644 index 607827854bf7..000000000000 --- a/tests/pipelines/repaint/test_repaint.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch - -from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel -from diffusers.utils.testing_utils import ( - enable_full_determinism, - load_image, - load_numpy, - nightly, - require_torch_gpu, - skip_mps, - torch_device, -) - -from ..pipeline_params import IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_INPAINTING_PARAMS -from ..test_pipelines_common import PipelineTesterMixin - - -enable_full_determinism() - - -class RepaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = RePaintPipeline - params = IMAGE_INPAINTING_PARAMS - {"width", "height", "guidance_scale"} - required_optional_params = PipelineTesterMixin.required_optional_params - { - "latents", - "num_images_per_prompt", - "callback", - "callback_steps", - } - batch_params = IMAGE_INPAINTING_BATCH_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - torch.manual_seed(0) - unet = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - scheduler = RePaintScheduler() - components = {"unet": unet, "scheduler": scheduler} - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - image = np.random.RandomState(seed).standard_normal((1, 3, 32, 32)) - image = torch.from_numpy(image).to(device=device, dtype=torch.float32) - mask = (image > 0).to(device=device, dtype=torch.float32) - inputs = { - "image": image, - "mask_image": mask, - "generator": generator, - "num_inference_steps": 5, - "eta": 0.0, - "jump_length": 2, - "jump_n_sample": 2, - "output_type": "numpy", - } - return inputs - - def test_repaint(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = RePaintPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([1.0000, 0.5426, 0.5497, 0.2200, 1.0000, 1.0000, 0.5623, 1.0000, 0.6274]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - - @skip_mps - def test_save_load_local(self): - return super().test_save_load_local() - - # RePaint can hardly be made deterministic since the scheduler is currently always - # nondeterministic - @unittest.skip("non-deterministic pipeline") - def test_inference_batch_single_identical(self): - return super().test_inference_batch_single_identical() - - @skip_mps - def test_dict_tuple_outputs_equivalent(self): - return super().test_dict_tuple_outputs_equivalent() - - @skip_mps - def test_save_load_optional_components(self): - return super().test_save_load_optional_components() - - @skip_mps - def test_attention_slicing_forward_pass(self): - return super().test_attention_slicing_forward_pass() - - -@nightly -@require_torch_gpu -class RepaintPipelineNightlyTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_celebahq(self): - original_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" - "repaint/celeba_hq_256.png" - ) - mask_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" - ) - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" - "repaint/celeba_hq_256_result.npy" - ) - - model_id = "google/ddpm-ema-celebahq-256" - unet = UNet2DModel.from_pretrained(model_id) - scheduler = RePaintScheduler.from_pretrained(model_id) - - repaint = RePaintPipeline(unet=unet, scheduler=scheduler).to(torch_device) - repaint.set_progress_bar_config(disable=None) - repaint.enable_attention_slicing() - - generator = torch.manual_seed(0) - output = repaint( - original_image, - mask_image, - num_inference_steps=250, - eta=0.0, - jump_length=10, - jump_n_sample=10, - generator=generator, - output_type="np", - ) - image = output.images[0] - - assert image.shape == (256, 256, 3) - assert np.abs(expected_image - image).mean() < 1e-2 diff --git a/tests/pipelines/score_sde_ve/__init__.py b/tests/pipelines/score_sde_ve/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/pipelines/score_sde_ve/test_score_sde_ve.py b/tests/pipelines/score_sde_ve/test_score_sde_ve.py deleted file mode 100644 index fd8c77b6e41f..000000000000 --- a/tests/pipelines/score_sde_ve/test_score_sde_ve.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch - -from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device - - -enable_full_determinism() - - -class ScoreSdeVeipelineFastTests(unittest.TestCase): - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - def test_inference(self): - unet = self.dummy_uncond_unet - scheduler = ScoreSdeVeScheduler() - - sde_ve = ScoreSdeVePipeline(unet=unet, scheduler=scheduler) - sde_ve.to(torch_device) - sde_ve.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - image = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator).images - - generator = torch.manual_seed(0) - image_from_tuple = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator, return_dict=False)[ - 0 - ] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - -@nightly -@require_torch -class ScoreSdeVePipelineIntegrationTests(unittest.TestCase): - def test_inference(self): - model_id = "google/ncsnpp-church-256" - model = UNet2DModel.from_pretrained(model_id) - - scheduler = ScoreSdeVeScheduler.from_pretrained(model_id) - - sde_ve = ScoreSdeVePipeline(unet=model, scheduler=scheduler) - sde_ve.to(torch_device) - sde_ve.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - image = sde_ve(num_inference_steps=10, output_type="numpy", generator=generator).images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 256, 256, 3) - - expected_slice = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/spectrogram_diffusion/__init__.py b/tests/pipelines/spectrogram_diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py b/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py deleted file mode 100644 index 1d00c7e963bb..000000000000 --- a/tests/pipelines/spectrogram_diffusion/test_spectrogram_diffusion.py +++ /dev/null @@ -1,246 +0,0 @@ -# coding=utf-8 -# Copyright 2022 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch - -from diffusers import DDPMScheduler, MidiProcessor, SpectrogramDiffusionPipeline -from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder -from diffusers.utils.testing_utils import ( - enable_full_determinism, - nightly, - require_note_seq, - require_onnxruntime, - require_torch_gpu, - skip_mps, - torch_device, -) - -from ..pipeline_params import TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS, TOKENS_TO_AUDIO_GENERATION_PARAMS -from ..test_pipelines_common import PipelineTesterMixin - - -enable_full_determinism() - - -MIDI_FILE = "./tests/fixtures/elise_format0.mid" - - -# The note-seq package throws an error on import because the default installed version of Ipython -# is not compatible with python 3.8 which we run in the CI. -# https://github.com/huggingface/diffusers/actions/runs/4830121056/jobs/8605954838#step:7:98 -@unittest.skip("The note-seq package currently throws an error on import") -class SpectrogramDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = SpectrogramDiffusionPipeline - required_optional_params = PipelineTesterMixin.required_optional_params - { - "callback", - "latents", - "callback_steps", - "output_type", - "num_images_per_prompt", - } - test_attention_slicing = False - - batch_params = TOKENS_TO_AUDIO_GENERATION_PARAMS - params = TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - notes_encoder = SpectrogramNotesEncoder( - max_length=2048, - vocab_size=1536, - d_model=768, - dropout_rate=0.1, - num_layers=1, - num_heads=1, - d_kv=4, - d_ff=2048, - feed_forward_proj="gated-gelu", - ) - - continuous_encoder = SpectrogramContEncoder( - input_dims=128, - targets_context_length=256, - d_model=768, - dropout_rate=0.1, - num_layers=1, - num_heads=1, - d_kv=4, - d_ff=2048, - feed_forward_proj="gated-gelu", - ) - - decoder = T5FilmDecoder( - input_dims=128, - targets_length=256, - max_decoder_noise_time=20000.0, - d_model=768, - num_layers=1, - num_heads=1, - d_kv=4, - d_ff=2048, - dropout_rate=0.1, - ) - - scheduler = DDPMScheduler() - - components = { - "notes_encoder": notes_encoder.eval(), - "continuous_encoder": continuous_encoder.eval(), - "decoder": decoder.eval(), - "scheduler": scheduler, - "melgan": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "input_tokens": [ - [1134, 90, 1135, 1133, 1080, 112, 1132, 1080, 1133, 1079, 133, 1132, 1079, 1133, 1] + [0] * 2033 - ], - "generator": generator, - "num_inference_steps": 4, - "output_type": "mel", - } - return inputs - - def test_spectrogram_diffusion(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - pipe = SpectrogramDiffusionPipeline(**components) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - output = pipe(**inputs) - mel = output.audios - - mel_slice = mel[0, -3:, -3:] - - assert mel_slice.shape == (3, 3) - expected_slice = np.array( - [-11.512925, -4.788215, -0.46172905, -2.051715, -10.539147, -10.970963, -9.091634, 4.0, 4.0] - ) - assert np.abs(mel_slice.flatten() - expected_slice).max() < 1e-2 - - @skip_mps - def test_save_load_local(self): - return super().test_save_load_local() - - @skip_mps - def test_dict_tuple_outputs_equivalent(self): - return super().test_dict_tuple_outputs_equivalent() - - @skip_mps - def test_save_load_optional_components(self): - return super().test_save_load_optional_components() - - @skip_mps - def test_attention_slicing_forward_pass(self): - return super().test_attention_slicing_forward_pass() - - def test_inference_batch_single_identical(self): - pass - - def test_inference_batch_consistent(self): - pass - - @skip_mps - def test_progress_bar(self): - return super().test_progress_bar() - - -@nightly -@require_torch_gpu -@require_onnxruntime -@require_note_seq -class PipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_callback(self): - # TODO - test that pipeline can decode tokens in a callback - # so that music can be played live - device = torch_device - - pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") - melgan = pipe.melgan - pipe.melgan = None - - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - def callback(step, mel_output): - # decode mel to audio - audio = melgan(input_features=mel_output.astype(np.float32))[0] - assert len(audio[0]) == 81920 * (step + 1) - # simulate that audio is played - return audio - - processor = MidiProcessor() - input_tokens = processor(MIDI_FILE) - - input_tokens = input_tokens[:3] - generator = torch.manual_seed(0) - pipe(input_tokens, num_inference_steps=5, generator=generator, callback=callback, output_type="mel") - - def test_spectrogram_fast(self): - device = torch_device - - pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - processor = MidiProcessor() - - input_tokens = processor(MIDI_FILE) - # just run two denoising loops - input_tokens = input_tokens[:2] - - generator = torch.manual_seed(0) - output = pipe(input_tokens, num_inference_steps=2, generator=generator) - - audio = output.audios[0] - - assert abs(np.abs(audio).sum() - 3612.841) < 1e-1 - - def test_spectrogram(self): - device = torch_device - - pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - processor = MidiProcessor() - - input_tokens = processor(MIDI_FILE) - - # just run 4 denoising loops - input_tokens = input_tokens[:4] - - generator = torch.manual_seed(0) - output = pipe(input_tokens, num_inference_steps=100, generator=generator) - - audio = output.audios[0] - assert abs(np.abs(audio).sum() - 9389.1111) < 5e-2 diff --git a/tests/pipelines/versatile_diffusion/__init__.py b/tests/pipelines/versatile_diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py deleted file mode 100644 index bb8584192ff0..000000000000 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py +++ /dev/null @@ -1,107 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import tempfile -import unittest - -import numpy as np -import torch - -from diffusers import VersatileDiffusionDualGuidedPipeline -from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -@nightly -@require_torch_gpu -class VersatileDiffusionDualGuidedPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_remove_unused_weights_save_load(self): - pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion") - # remove text_unet - pipe.remove_unused_weights() - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - second_prompt = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - - generator = torch.manual_seed(0) - image = pipe( - prompt="first prompt", - image=second_prompt, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - with tempfile.TemporaryDirectory() as tmpdirname: - pipe.save_pretrained(tmpdirname) - pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained(tmpdirname) - - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - generator = generator.manual_seed(0) - new_image = pipe( - prompt="first prompt", - image=second_prompt, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - assert np.abs(image - new_image).max() < 1e-5, "Models don't have the same forward pass" - - def test_inference_dual_guided(self): - pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion") - pipe.remove_unused_weights() - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - first_prompt = "cyberpunk 2077" - second_prompt = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - generator = torch.manual_seed(0) - image = pipe( - prompt=first_prompt, - image=second_prompt, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=50, - output_type="numpy", - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.0787, 0.0849, 0.0826, 0.0812, 0.0807, 0.0795, 0.0818, 0.0798, 0.0779]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py deleted file mode 100644 index 1f312a0b71ce..000000000000 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch - -from diffusers import VersatileDiffusionImageVariationPipeline -from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase): - pass - - -@nightly -@require_torch_gpu -class VersatileDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase): - def test_inference_image_variations(self): - pipe = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion") - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - image_prompt = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - generator = torch.manual_seed(0) - image = pipe( - image=image_prompt, - generator=generator, - guidance_scale=7.5, - num_inference_steps=50, - output_type="numpy", - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py deleted file mode 100644 index 585f4f023bc7..000000000000 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py +++ /dev/null @@ -1,129 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import tempfile -import unittest - -import numpy as np -import torch - -from diffusers import VersatileDiffusionPipeline -from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class VersatileDiffusionMegaPipelineFastTests(unittest.TestCase): - pass - - -@nightly -@require_torch_gpu -class VersatileDiffusionMegaPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_from_save_pretrained(self): - pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - prompt_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - - generator = torch.manual_seed(0) - image = pipe.dual_guided( - prompt="first prompt", - image=prompt_image, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - with tempfile.TemporaryDirectory() as tmpdirname: - pipe.save_pretrained(tmpdirname) - pipe = VersatileDiffusionPipeline.from_pretrained(tmpdirname, torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - generator = generator.manual_seed(0) - new_image = pipe.dual_guided( - prompt="first prompt", - image=prompt_image, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - assert np.abs(image - new_image).max() < 1e-5, "Models don't have the same forward pass" - - def test_inference_dual_guided_then_text_to_image(self): - pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - prompt = "cyberpunk 2077" - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - generator = torch.manual_seed(0) - image = pipe.dual_guided( - prompt=prompt, - image=init_image, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=50, - output_type="numpy", - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - prompt = "A painting of a squirrel eating a burger " - generator = torch.manual_seed(0) - image = pipe.text_to_image( - prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - image = pipe.image_variation(init_image, generator=generator, output_type="numpy").images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py deleted file mode 100644 index e17770778418..000000000000 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py +++ /dev/null @@ -1,87 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import tempfile -import unittest - -import numpy as np -import torch - -from diffusers import VersatileDiffusionTextToImagePipeline -from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class VersatileDiffusionTextToImagePipelineFastTests(unittest.TestCase): - pass - - -@nightly -@require_torch_gpu -class VersatileDiffusionTextToImagePipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_remove_unused_weights_save_load(self): - pipe = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion") - # remove text_unet - pipe.remove_unused_weights() - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger " - generator = torch.manual_seed(0) - image = pipe( - prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" - ).images - - with tempfile.TemporaryDirectory() as tmpdirname: - pipe.save_pretrained(tmpdirname) - pipe = VersatileDiffusionTextToImagePipeline.from_pretrained(tmpdirname) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - generator = generator.manual_seed(0) - new_image = pipe( - prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" - ).images - - assert np.abs(image - new_image).max() < 1e-5, "Models don't have the same forward pass" - - def test_inference_text2img(self): - pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( - "shi-labs/versatile-diffusion", torch_dtype=torch.float16 - ) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger " - generator = torch.manual_seed(0) - image = pipe( - prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/vq_diffusion/__init__.py b/tests/pipelines/vq_diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/tests/pipelines/vq_diffusion/test_vq_diffusion.py b/tests/pipelines/vq_diffusion/test_vq_diffusion.py deleted file mode 100644 index 88e9f19df709..000000000000 --- a/tests/pipelines/vq_diffusion/test_vq_diffusion.py +++ /dev/null @@ -1,227 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel -from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings -from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class VQDiffusionPipelineFastTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def num_embed(self): - return 12 - - @property - def num_embeds_ada_norm(self): - return 12 - - @property - def text_embedder_hidden_size(self): - return 32 - - @property - def dummy_vqvae(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, - num_vq_embeddings=self.num_embed, - vq_embed_dim=3, - ) - return model - - @property - def dummy_tokenizer(self): - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return tokenizer - - @property - def dummy_text_encoder(self): - torch.manual_seed(0) - config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=self.text_embedder_hidden_size, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - return CLIPTextModel(config) - - @property - def dummy_transformer(self): - torch.manual_seed(0) - - height = 12 - width = 12 - - model_kwargs = { - "attention_bias": True, - "cross_attention_dim": 32, - "attention_head_dim": height * width, - "num_attention_heads": 1, - "num_vector_embeds": self.num_embed, - "num_embeds_ada_norm": self.num_embeds_ada_norm, - "norm_num_groups": 32, - "sample_size": width, - "activation_fn": "geglu-approximate", - } - - model = Transformer2DModel(**model_kwargs) - return model - - def test_vq_diffusion(self): - device = "cpu" - - vqvae = self.dummy_vqvae - text_encoder = self.dummy_text_encoder - tokenizer = self.dummy_tokenizer - transformer = self.dummy_transformer - scheduler = VQDiffusionScheduler(self.num_embed) - learned_classifier_free_sampling_embeddings = LearnedClassifierFreeSamplingEmbeddings(learnable=False) - - pipe = VQDiffusionPipeline( - vqvae=vqvae, - text_encoder=text_encoder, - tokenizer=tokenizer, - transformer=transformer, - scheduler=scheduler, - learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, - ) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - prompt = "teddy bear playing in the pool" - - generator = torch.Generator(device=device).manual_seed(0) - output = pipe([prompt], generator=generator, num_inference_steps=2, output_type="np") - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = pipe( - [prompt], generator=generator, output_type="np", return_dict=False, num_inference_steps=2 - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 24, 24, 3) - - expected_slice = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - def test_vq_diffusion_classifier_free_sampling(self): - device = "cpu" - - vqvae = self.dummy_vqvae - text_encoder = self.dummy_text_encoder - tokenizer = self.dummy_tokenizer - transformer = self.dummy_transformer - scheduler = VQDiffusionScheduler(self.num_embed) - learned_classifier_free_sampling_embeddings = LearnedClassifierFreeSamplingEmbeddings( - learnable=True, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length - ) - - pipe = VQDiffusionPipeline( - vqvae=vqvae, - text_encoder=text_encoder, - tokenizer=tokenizer, - transformer=transformer, - scheduler=scheduler, - learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, - ) - pipe = pipe.to(device) - pipe.set_progress_bar_config(disable=None) - - prompt = "teddy bear playing in the pool" - - generator = torch.Generator(device=device).manual_seed(0) - output = pipe([prompt], generator=generator, num_inference_steps=2, output_type="np") - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = pipe( - [prompt], generator=generator, output_type="np", return_dict=False, num_inference_steps=2 - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 24, 24, 3) - - expected_slice = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - -@nightly -@require_torch_gpu -class VQDiffusionPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_vq_diffusion_classifier_free_sampling(self): - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" - ) - - pipeline = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq") - pipeline = pipeline.to(torch_device) - pipeline.set_progress_bar_config(disable=None) - - # requires GPU generator for gumbel softmax - # don't use GPU generator in tests though - generator = torch.Generator(device=torch_device).manual_seed(0) - output = pipeline( - "teddy bear playing in the pool", - num_images_per_prompt=1, - generator=generator, - output_type="np", - ) - - image = output.images[0] - - assert image.shape == (256, 256, 3) - assert np.abs(expected_image - image).max() < 2.0 From 23fe7ecaf2b30093b39d902f24deac3c5d9c364e Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 07:11:44 +0000 Subject: [PATCH 07/17] remove deprecation message --- src/diffusers/pipelines/deprecated/README.md | 3 +++ .../deprecated/alt_diffusion/pipeline_alt_diffusion.py | 2 -- .../deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py | 2 -- .../deprecated/audio_diffusion/pipeline_audio_diffusion.py | 1 - .../pipeline_latent_diffusion_uncond.py | 1 - src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py | 1 - src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py | 1 - .../pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py | 1 - .../spectrogram_diffusion/pipeline_spectrogram_diffusion.py | 1 - .../stable_diffusion_variants/pipeline_cycle_diffusion.py | 1 - .../pipeline_onnx_stable_diffusion_inpaint_legacy.py | 1 - .../pipeline_stable_diffusion_inpaint_legacy.py | 1 - .../pipeline_stable_diffusion_model_editing.py | 1 - .../pipeline_stable_diffusion_paradigms.py | 1 - .../pipeline_stable_diffusion_pix2pix_zero.py | 1 - .../stochastic_karras_ve/pipeline_stochastic_karras_ve.py | 1 - .../versatile_diffusion/pipeline_versatile_diffusion.py | 1 - .../pipeline_versatile_diffusion_dual_guided.py | 1 - .../pipeline_versatile_diffusion_image_variation.py | 1 - .../pipeline_versatile_diffusion_text_to_image.py | 1 - .../pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py | 1 - 21 files changed, 3 insertions(+), 22 deletions(-) create mode 100644 src/diffusers/pipelines/deprecated/README.md diff --git a/src/diffusers/pipelines/deprecated/README.md b/src/diffusers/pipelines/deprecated/README.md new file mode 100644 index 000000000000..1e21dbbbd96c --- /dev/null +++ b/src/diffusers/pipelines/deprecated/README.md @@ -0,0 +1,3 @@ +# Deprecated Pipelines + +This folder contains pipelines that have very low usage as measured by model downloads, issues and PRs. While you can still use the pipelines just as before, we will stop testing the pipelines and will not accept any changes to existing files. \ No newline at end of file diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py index 304b87b2c1da..b9dd9400725b 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py @@ -154,8 +154,6 @@ class AltDiffusionPipeline( A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - deprecate("AltDiffusionPipeline", "1.0.0", "") - model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] diff --git a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py index 263f72c86177..f6a40e1354c9 100644 --- a/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -194,8 +194,6 @@ class AltDiffusionImg2ImgPipeline( A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - deprecate("AltDiffusionImg2ImgPipeline", "1.0.0", "") - model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] diff --git a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py index 7bb288ea893c..75e1ff2a184d 100644 --- a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -47,7 +47,6 @@ class AudioDiffusionPipeline(DiffusionPipeline): [`DDIMScheduler`] or [`DDPMScheduler`]. """ - deprecate("AudioDiffusionPipeline", "1.0.0", "") _optional_components = ["vqvae"] def __init__( diff --git a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py index 7abab3d43e58..347371f06dbe 100644 --- a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +++ b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py @@ -40,7 +40,6 @@ class LDMPipeline(DiffusionPipeline): [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. """ - deprecate("LDMPipeline", "1.0.0", "") def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): super().__init__() diff --git a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py index 1b103d6c476a..ef0dfb9137ca 100644 --- a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +++ b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py @@ -38,7 +38,6 @@ class PNDMPipeline(DiffusionPipeline): A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image. """ - deprecate("PNDMPipeline", "1.0.0", "") unet: UNet2DModel scheduler: PNDMScheduler diff --git a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py index b87641a9c7a4..c7c96d684d36 100644 --- a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +++ b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py @@ -87,7 +87,6 @@ class RePaintPipeline(DiffusionPipeline): A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image. """ - deprecate("RePaintPipeline", "1.0.0", "") unet: UNet2DModel scheduler: RePaintScheduler diff --git a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py index 419b379b7b82..206d967ef599 100644 --- a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +++ b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py @@ -37,7 +37,6 @@ class ScoreSdeVePipeline(DiffusionPipeline): A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. """ - deprecate("ScoreSdeVePipeline", "1.0.0", "") unet: UNet2DModel scheduler: ScoreSdeVeScheduler diff --git a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py index 46f272503bd2..0943a2359c0f 100644 --- a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -55,7 +55,6 @@ class SpectrogramDiffusionPipeline(DiffusionPipeline): melgan ([`OnnxRuntimeModel`]): """ - deprecate("SpectrogramDiffusionPipeline", "1.0.0", "") _optional_components = ["melgan"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py index ebe8a6a59c7b..856258602052 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -163,7 +163,6 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - deprecate("CycleDiffusionPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py index d7ed6b228e98..81f7270794dc 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -67,7 +67,6 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ - deprecate("OnnxStableDiffusionInpaintPipelineLegacy", "1.0.0", "") _optional_components = ["safety_checker", "feature_extractor"] _is_onnx = True diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py index a9f6883e0b1a..a8b2e87cb4a6 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -116,7 +116,6 @@ class StableDiffusionInpaintPipelineLegacy( Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ - deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py index d80dd87c52c4..28c03229a4c0 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -67,7 +67,6 @@ class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoa Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations. """ - deprecate("StableDiffusionModelEditingPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py index edc8a5efcca3..0c6aa51f4b8e 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -97,7 +97,6 @@ class StableDiffusionParadigmsPipeline( A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - deprecate("StableDiffusionParadigmsPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index b6191890c0c7..bbefdca165eb 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -311,7 +311,6 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline): pipeline publicly. """ - deprecate("StableDiffusionPix2PixZeroPipeline", "1.0.0", "") model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = [ diff --git a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py index d59a2742fbd0..42d5645a8e81 100644 --- a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +++ b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py @@ -34,7 +34,6 @@ class KarrasVePipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image. """ - deprecate("KarrasVePipeline", "1.0.0", "") # add type hints for linting unet: UNet2DModel diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py index 727a607022a5..44157621e839 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -44,7 +44,6 @@ class VersatileDiffusionPipeline(DiffusionPipeline): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - deprecate("VersatileDiffusionPipeline", "1.0.0", "") tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py index c9dea29af0ec..3f69ef36e376 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -59,7 +59,6 @@ class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ - deprecate("VersatileDiffusionDualGuidedPipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index 2cbd74a3a7a4..c7adfc2ee12f 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -53,7 +53,6 @@ class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ - deprecate("VersatileDiffusionImageVariationPipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py index 8e1a0617f1b5..1ec05bc8372f 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -52,7 +52,6 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ - deprecate("VersatileDiffusionTextToImagePipeline", "1.0.0", "") model_cpu_offload_seq = "bert->unet->vqvae" diff --git a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py index 212b6796b39a..6f1654124e92 100644 --- a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +++ b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py @@ -70,7 +70,6 @@ class VQDiffusionPipeline(DiffusionPipeline): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ - deprecate("VQDiffusionPipeline", "1.0.0", "") vqvae: VQModel text_encoder: CLIPTextModel From da12b1c4b1ec31f4ea13ba167f6e96d3d4d415ee Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 07:13:31 +0000 Subject: [PATCH 08/17] make style --- .../deprecated/audio_diffusion/pipeline_audio_diffusion.py | 1 - .../pipeline_latent_diffusion_uncond.py | 2 -- src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py | 2 -- src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py | 1 - .../pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py | 2 -- .../spectrogram_diffusion/pipeline_spectrogram_diffusion.py | 3 +-- .../stable_diffusion_variants/pipeline_cycle_diffusion.py | 1 - .../pipeline_onnx_stable_diffusion_inpaint_legacy.py | 1 - .../pipeline_stable_diffusion_inpaint_legacy.py | 1 - .../pipeline_stable_diffusion_model_editing.py | 1 - .../pipeline_stable_diffusion_paradigms.py | 1 - .../pipeline_stable_diffusion_pix2pix_zero.py | 1 - .../stochastic_karras_ve/pipeline_stochastic_karras_ve.py | 2 -- .../versatile_diffusion/pipeline_versatile_diffusion.py | 3 +-- .../pipeline_versatile_diffusion_dual_guided.py | 1 - .../pipeline_versatile_diffusion_image_variation.py | 1 - .../pipeline_versatile_diffusion_text_to_image.py | 1 - .../pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py | 3 +-- 18 files changed, 3 insertions(+), 25 deletions(-) diff --git a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py index 75e1ff2a184d..731d22f3def8 100644 --- a/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -22,7 +22,6 @@ from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import DDIMScheduler, DDPMScheduler -from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel diff --git a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py index 347371f06dbe..4e14d1708ccf 100644 --- a/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +++ b/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py @@ -19,7 +19,6 @@ from ....models import UNet2DModel, VQModel from ....schedulers import DDIMScheduler -from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -40,7 +39,6 @@ class LDMPipeline(DiffusionPipeline): [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. """ - def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): super().__init__() self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) diff --git a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py index ef0dfb9137ca..c988e8292987 100644 --- a/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py +++ b/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py @@ -19,7 +19,6 @@ from ....models import UNet2DModel from ....schedulers import PNDMScheduler -from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -38,7 +37,6 @@ class PNDMPipeline(DiffusionPipeline): A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image. """ - unet: UNet2DModel scheduler: PNDMScheduler diff --git a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py index c7c96d684d36..eeea28d4d06f 100644 --- a/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py +++ b/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py @@ -87,7 +87,6 @@ class RePaintPipeline(DiffusionPipeline): A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image. """ - unet: UNet2DModel scheduler: RePaintScheduler model_cpu_offload_seq = "unet" diff --git a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py index 206d967ef599..b9b3eb08f845 100644 --- a/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py +++ b/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py @@ -18,7 +18,6 @@ from ....models import UNet2DModel from ....schedulers import ScoreSdeVeScheduler -from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -37,7 +36,6 @@ class ScoreSdeVePipeline(DiffusionPipeline): A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. """ - unet: UNet2DModel scheduler: ScoreSdeVeScheduler diff --git a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py index 0943a2359c0f..b803d921a388 100644 --- a/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py +++ b/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -21,7 +21,7 @@ from ....models import T5FilmDecoder from ....schedulers import DDPMScheduler -from ....utils import deprecate, is_onnx_available, logging +from ....utils import is_onnx_available, logging from ....utils.torch_utils import randn_tensor @@ -55,7 +55,6 @@ class SpectrogramDiffusionPipeline(DiffusionPipeline): melgan ([`OnnxRuntimeModel`]): """ - _optional_components = ["melgan"] def __init__( diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py index 856258602052..7bb709bae6ac 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -163,7 +163,6 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Lor A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py index 81f7270794dc..0aa5e68bfcb4 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -67,7 +67,6 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ - _optional_components = ["safety_checker", "feature_extractor"] _is_onnx = True diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py index a8b2e87cb4a6..4daa1c07f0c6 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -116,7 +116,6 @@ class StableDiffusionInpaintPipelineLegacy( Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ - model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py index 28c03229a4c0..b5ec477d34d6 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -67,7 +67,6 @@ class StableDiffusionModelEditingPipeline(DiffusionPipeline, TextualInversionLoa Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations. """ - model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py index 0c6aa51f4b8e..3c9d744c6dfa 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -97,7 +97,6 @@ class StableDiffusionParadigmsPipeline( A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py index bbefdca165eb..f8d0603a10c3 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -311,7 +311,6 @@ class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline): pipeline publicly. """ - model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = [ "safety_checker", diff --git a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py index 42d5645a8e81..55ca6186626d 100644 --- a/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +++ b/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py @@ -18,7 +18,6 @@ from ....models import UNet2DModel from ....schedulers import KarrasVeScheduler -from ....utils import deprecate from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -34,7 +33,6 @@ class KarrasVePipeline(DiffusionPipeline): A scheduler to be used in combination with `unet` to denoise the encoded image. """ - # add type hints for linting unet: UNet2DModel scheduler: KarrasVeScheduler diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py index 44157621e839..4455d20df213 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -7,7 +7,7 @@ from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers -from ....utils import deprecate, logging +from ....utils import logging from ...pipeline_utils import DiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline @@ -44,7 +44,6 @@ class VersatileDiffusionPipeline(DiffusionPipeline): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ - tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModel diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py index 3f69ef36e376..168e6a44a5c9 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -59,7 +59,6 @@ class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ - model_cpu_offload_seq = "bert->unet->vqvae" tokenizer: CLIPTokenizer diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index c7adfc2ee12f..a2111283a6dd 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -53,7 +53,6 @@ class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ - model_cpu_offload_seq = "bert->unet->vqvae" image_feature_extractor: CLIPImageProcessor diff --git a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py index 1ec05bc8372f..de6ab3891214 100644 --- a/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ b/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -52,7 +52,6 @@ class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ - model_cpu_offload_seq = "bert->unet->vqvae" tokenizer: CLIPTokenizer diff --git a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py index 6f1654124e92..d7c2945b463c 100644 --- a/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py +++ b/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py @@ -20,7 +20,7 @@ from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin, Transformer2DModel, VQModel from ....schedulers import VQDiffusionScheduler -from ....utils import deprecate, logging +from ....utils import logging from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -70,7 +70,6 @@ class VQDiffusionPipeline(DiffusionPipeline): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ - vqvae: VQModel text_encoder: CLIPTextModel tokenizer: CLIPTokenizer From a4a1404366503ee6d6f2deed2c04809aa61b0302 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 07:16:35 +0000 Subject: [PATCH 09/17] fix copies --- .../stable_diffusion/pipeline_stable_diffusion_diffedit.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py index 3d48c811cdf1..81d936be62b4 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py @@ -788,7 +788,6 @@ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype latents = latents * self.scheduler.init_noise_sigma return latents - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_pix2pix_zero.StableDiffusionPix2PixZeroPipeline.prepare_image_latents def prepare_image_latents(self, image, batch_size, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( From e66b520fd0b5b81378e9a86d567e32a01de3c979 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 07:38:07 +0000 Subject: [PATCH 10/17] clean up --- src/diffusers/pipelines/stable_diffusion/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/diffusers/pipelines/stable_diffusion/__init__.py b/src/diffusers/pipelines/stable_diffusion/__init__.py index bb4ea55a24a1..dbd79ec1f367 100644 --- a/src/diffusers/pipelines/stable_diffusion/__init__.py +++ b/src/diffusers/pipelines/stable_diffusion/__init__.py @@ -133,9 +133,6 @@ from ...utils.dummy_torch_and_transformers_objects import * else: - from ..deprecated.stable_diffusion_variants.pipeline_stable_diffusion_inpaint_legacy import ( - StableDiffusionInpaintPipelineLegacy, - ) from .clip_image_project_model import CLIPImageProjection from .pipeline_stable_diffusion import ( StableDiffusionPipeline, From c6876488428eabc909e9c8f6b92f3f3dce05215c Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 18 Dec 2023 11:29:09 +0100 Subject: [PATCH 11/17] clean --- docs/source/en/api/pipelines/alt_diffusion.md | 47 --- .../en/api/pipelines/audio_diffusion.md | 35 --- docs/source/en/api/pipelines/audioldm.md | 50 --- .../en/api/pipelines/cycle_diffusion.md | 33 -- .../api/pipelines/latent_diffusion_uncond.md | 35 --- docs/source/en/api/pipelines/model_editing.md | 35 --- docs/source/en/api/pipelines/musicldm.md | 52 ---- docs/source/en/api/pipelines/paradigms.md | 51 ---- docs/source/en/api/pipelines/pix2pix_zero.md | 289 ------------------ docs/source/en/api/pipelines/pndm.md | 35 --- docs/source/en/api/pipelines/repaint.md | 37 --- docs/source/en/api/pipelines/score_sde_ve.md | 35 --- .../en/api/pipelines/spectrogram_diffusion.md | 37 --- .../en/api/pipelines/stochastic_karras_ve.md | 33 -- .../en/api/pipelines/versatile_diffusion.md | 54 ---- docs/source/en/api/pipelines/vq_diffusion.md | 35 --- 16 files changed, 893 deletions(-) delete mode 100644 docs/source/en/api/pipelines/alt_diffusion.md delete mode 100644 docs/source/en/api/pipelines/audio_diffusion.md delete mode 100644 docs/source/en/api/pipelines/audioldm.md delete mode 100644 docs/source/en/api/pipelines/cycle_diffusion.md delete mode 100644 docs/source/en/api/pipelines/latent_diffusion_uncond.md delete mode 100644 docs/source/en/api/pipelines/model_editing.md delete mode 100644 docs/source/en/api/pipelines/musicldm.md delete mode 100644 docs/source/en/api/pipelines/paradigms.md delete mode 100644 docs/source/en/api/pipelines/pix2pix_zero.md delete mode 100644 docs/source/en/api/pipelines/pndm.md delete mode 100644 docs/source/en/api/pipelines/repaint.md delete mode 100644 docs/source/en/api/pipelines/score_sde_ve.md delete mode 100644 docs/source/en/api/pipelines/spectrogram_diffusion.md delete mode 100644 docs/source/en/api/pipelines/stochastic_karras_ve.md delete mode 100644 docs/source/en/api/pipelines/versatile_diffusion.md delete mode 100644 docs/source/en/api/pipelines/vq_diffusion.md diff --git a/docs/source/en/api/pipelines/alt_diffusion.md b/docs/source/en/api/pipelines/alt_diffusion.md deleted file mode 100644 index d0326affbb63..000000000000 --- a/docs/source/en/api/pipelines/alt_diffusion.md +++ /dev/null @@ -1,47 +0,0 @@ - - -# AltDiffusion - -AltDiffusion was proposed in [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://huggingface.co/papers/2211.06679) by Zhongzhi Chen, Guang Liu, Bo-Wen Zhang, Fulong Ye, Qinghong Yang, Ledell Wu. - -The abstract from the paper is: - -*In this work, we present a conceptually simple and effective method to train a strong bilingual/multilingual multimodal representation model. Starting from the pre-trained multimodal representation model CLIP released by OpenAI, we altered its text encoder with a pre-trained multilingual text encoder XLM-R, and aligned both languages and image representations by a two-stage training schema consisting of teacher learning and contrastive learning. We validate our method through evaluations of a wide range of tasks. We set new state-of-the-art performances on a bunch of tasks including ImageNet-CN, Flicker30k-CN, COCO-CN and XTD. Further, we obtain very close performances with CLIP on almost all tasks, suggesting that one can simply alter the text encoder in CLIP for extended capabilities such as multilingual understanding. Our models and code are available at [this https URL](https://github.com/FlagAI-Open/FlagAI).* - -## Tips - -`AltDiffusion` is conceptually the same as [Stable Diffusion](./stable_diffusion/overview). - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## AltDiffusionPipeline - -[[autodoc]] AltDiffusionPipeline - - all - - __call__ - -## AltDiffusionImg2ImgPipeline - -[[autodoc]] AltDiffusionImg2ImgPipeline - - all - - __call__ - -## AltDiffusionPipelineOutput - -[[autodoc]] pipelines.alt_diffusion.AltDiffusionPipelineOutput - - all - - __call__ diff --git a/docs/source/en/api/pipelines/audio_diffusion.md b/docs/source/en/api/pipelines/audio_diffusion.md deleted file mode 100644 index 3d140fe202a6..000000000000 --- a/docs/source/en/api/pipelines/audio_diffusion.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# Audio Diffusion - -[Audio Diffusion](https://github.com/teticio/audio-diffusion) is by Robert Dargavel Smith, and it leverages the recent advances in image generation from diffusion models by converting audio samples to and from Mel spectrogram images. - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## AudioDiffusionPipeline -[[autodoc]] AudioDiffusionPipeline - - all - - __call__ - -## AudioPipelineOutput -[[autodoc]] pipelines.AudioPipelineOutput - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput - -## Mel -[[autodoc]] Mel diff --git a/docs/source/en/api/pipelines/audioldm.md b/docs/source/en/api/pipelines/audioldm.md deleted file mode 100644 index 43fb0f1a3bf4..000000000000 --- a/docs/source/en/api/pipelines/audioldm.md +++ /dev/null @@ -1,50 +0,0 @@ - - -# AudioLDM - -AudioLDM was proposed in [AudioLDM: Text-to-Audio Generation with Latent Diffusion Models](https://huggingface.co/papers/2301.12503) by Haohe Liu et al. Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview), AudioLDM -is a text-to-audio _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/docs/transformers/main/model_doc/clap) -latents. AudioLDM takes a text prompt as input and predicts the corresponding audio. It can generate text-conditional -sound effects, human speech and music. - -The abstract from the paper is: - -*Text-to-audio (TTA) system has recently gained attention for its ability to synthesize general audio based on text descriptions. However, previous studies in TTA have limited generation quality with high computational costs. In this study, we propose AudioLDM, a TTA system that is built on a latent space to learn the continuous audio representations from contrastive language-audio pretraining (CLAP) latents. The pretrained CLAP models enable us to train LDMs with audio embedding while providing text embedding as a condition during sampling. By learning the latent representations of audio signals and their compositions without modeling the cross-modal relationship, AudioLDM is advantageous in both generation quality and computational efficiency. Trained on AudioCaps with a single GPU, AudioLDM achieves state-of-the-art TTA performance measured by both objective and subjective metrics (e.g., frechet distance). Moreover, AudioLDM is the first TTA system that enables various text-guided audio manipulations (e.g., style transfer) in a zero-shot fashion. Our implementation and demos are available at [this https URL](https://audioldm.github.io/).* - -The original codebase can be found at [haoheliu/AudioLDM](https://github.com/haoheliu/AudioLDM). - -## Tips - -When constructing a prompt, keep in mind: - -* Descriptive prompt inputs work best; you can use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific (for example, "water stream in a forest" instead of "stream"). -* It's best to use general terms like "cat" or "dog" instead of specific names or abstract objects the model may not be familiar with. - -During inference: - -* The _quality_ of the predicted audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference. -* The _length_ of the predicted audio sample can be controlled by varying the `audio_length_in_s` argument. - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## AudioLDMPipeline -[[autodoc]] AudioLDMPipeline - - all - - __call__ - -## AudioPipelineOutput -[[autodoc]] pipelines.AudioPipelineOutput diff --git a/docs/source/en/api/pipelines/cycle_diffusion.md b/docs/source/en/api/pipelines/cycle_diffusion.md deleted file mode 100644 index 13ada0594a6a..000000000000 --- a/docs/source/en/api/pipelines/cycle_diffusion.md +++ /dev/null @@ -1,33 +0,0 @@ - - -# Cycle Diffusion - -Cycle Diffusion is a text guided image-to-image generation model proposed in [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://huggingface.co/papers/2210.05559) by Chen Henry Wu, Fernando De la Torre. - -The abstract from the paper is: - -*Diffusion models have achieved unprecedented performance in generative modeling. The commonly-adopted formulation of the latent code of diffusion models is a sequence of gradually denoised samples, as opposed to the simpler (e.g., Gaussian) latent space of GANs, VAEs, and normalizing flows. This paper provides an alternative, Gaussian formulation of the latent space of various diffusion models, as well as an invertible DPM-Encoder that maps images into the latent space. While our formulation is purely based on the definition of diffusion models, we demonstrate several intriguing consequences. (1) Empirically, we observe that a common latent space emerges from two diffusion models trained independently on related domains. In light of this finding, we propose CycleDiffusion, which uses DPM-Encoder for unpaired image-to-image translation. Furthermore, applying CycleDiffusion to text-to-image diffusion models, we show that large-scale text-to-image diffusion models can be used as zero-shot image-to-image editors. (2) One can guide pre-trained diffusion models and GANs by controlling the latent codes in a unified, plug-and-play formulation based on energy-based models. Using the CLIP model and a face recognition model as guidance, we demonstrate that diffusion models have better coverage of low-density sub-populations and individuals than GANs. The code is publicly available at [this https URL](https://github.com/ChenWu98/cycle-diffusion).* - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## CycleDiffusionPipeline -[[autodoc]] CycleDiffusionPipeline - - all - - __call__ - -## StableDiffusionPiplineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/latent_diffusion_uncond.md b/docs/source/en/api/pipelines/latent_diffusion_uncond.md deleted file mode 100644 index 54835c2115b9..000000000000 --- a/docs/source/en/api/pipelines/latent_diffusion_uncond.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# Unconditional Latent Diffusion - -Unconditional Latent Diffusion was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer. - -The abstract from the paper is: - -*By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve a new state of the art for image inpainting and highly competitive performance on various tasks, including unconditional image generation, semantic scene synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.* - -The original codebase can be found at [CompVis/latent-diffusion](https://github.com/CompVis/latent-diffusion). - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## LDMPipeline -[[autodoc]] LDMPipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/docs/source/en/api/pipelines/model_editing.md b/docs/source/en/api/pipelines/model_editing.md deleted file mode 100644 index 2d94a50e4355..000000000000 --- a/docs/source/en/api/pipelines/model_editing.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# Text-to-image model editing - -[Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://huggingface.co/papers/2303.08084) is by Hadas Orgad, Bahjat Kawar, and Yonatan Belinkov. This pipeline enables editing diffusion model weights, such that its assumptions of a given concept are changed. The resulting change is expected to take effect in all prompt generations related to the edited concept. - -The abstract from the paper is: - -*Text-to-image diffusion models often make implicit assumptions about the world when generating images. While some assumptions are useful (e.g., the sky is blue), they can also be outdated, incorrect, or reflective of social biases present in the training data. Thus, there is a need to control these assumptions without requiring explicit user input or costly re-training. In this work, we aim to edit a given implicit assumption in a pre-trained diffusion model. Our Text-to-Image Model Editing method, TIME for short, receives a pair of inputs: a "source" under-specified prompt for which the model makes an implicit assumption (e.g., "a pack of roses"), and a "destination" prompt that describes the same setting, but with a specified desired attribute (e.g., "a pack of blue roses"). TIME then updates the model's cross-attention layers, as these layers assign visual meaning to textual tokens. We edit the projection matrices in these layers such that the source prompt is projected close to the destination prompt. Our method is highly efficient, as it modifies a mere 2.2% of the model's parameters in under one second. To evaluate model editing approaches, we introduce TIMED (TIME Dataset), containing 147 source and destination prompt pairs from various domains. Our experiments (using Stable Diffusion) show that TIME is successful in model editing, generalizes well for related prompts unseen during editing, and imposes minimal effect on unrelated generations.* - -You can find additional information about model editing on the [project page](https://time-diffusion.github.io/), [original codebase](https://github.com/bahjat-kawar/time-diffusion), and try it out in a [demo](https://huggingface.co/spaces/bahjat-kawar/time-diffusion). - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## StableDiffusionModelEditingPipeline -[[autodoc]] StableDiffusionModelEditingPipeline - - __call__ - - all - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/musicldm.md b/docs/source/en/api/pipelines/musicldm.md deleted file mode 100644 index 896f707c76d7..000000000000 --- a/docs/source/en/api/pipelines/musicldm.md +++ /dev/null @@ -1,52 +0,0 @@ - - -# MusicLDM - -MusicLDM was proposed in [MusicLDM: Enhancing Novelty in Text-to-Music Generation Using Beat-Synchronous Mixup Strategies](https://huggingface.co/papers/2308.01546) by Ke Chen, Yusong Wu, Haohe Liu, Marianna Nezhurina, Taylor Berg-Kirkpatrick, Shlomo Dubnov. -MusicLDM takes a text prompt as input and predicts the corresponding music sample. - -Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview) and [AudioLDM](https://huggingface.co/docs/diffusers/api/pipelines/audioldm), -MusicLDM is a text-to-music _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/docs/transformers/main/model_doc/clap) -latents. - -MusicLDM is trained on a corpus of 466 hours of music data. Beat-synchronous data augmentation strategies are applied to the music samples, both in the time domain and in the latent space. Using beat-synchronous data augmentation strategies encourages the model to interpolate between the training samples, but stay within the domain of the training data. The result is generated music that is more diverse while staying faithful to the corresponding style. - -The abstract of the paper is the following: - -*Diffusion models have shown promising results in cross-modal generation tasks, including text-to-image and text-to-audio generation. However, generating music, as a special type of audio, presents unique challenges due to limited availability of music data and sensitive issues related to copyright and plagiarism. In this paper, to tackle these challenges, we first construct a state-of-the-art text-to-music model, MusicLDM, that adapts Stable Diffusion and AudioLDM architectures to the music domain. We achieve this by retraining the contrastive language-audio pretraining model (CLAP) and the Hifi-GAN vocoder, as components of MusicLDM, on a collection of music data samples. Then, to address the limitations of training data and to avoid plagiarism, we leverage a beat tracking model and propose two different mixup strategies for data augmentation: beat-synchronous audio mixup and beat-synchronous latent mixup, which recombine training audio directly or via a latent embeddings space, respectively. Such mixup strategies encourage the model to interpolate between musical training samples and generate new music within the convex hull of the training data, making the generated music more diverse while still staying faithful to the corresponding style. In addition to popular evaluation metrics, we design several new evaluation metrics based on CLAP score to demonstrate that our proposed MusicLDM and beat-synchronous mixup strategies improve both the quality and novelty of generated music, as well as the correspondence between input text and generated music.* - -This pipeline was contributed by [sanchit-gandhi](https://huggingface.co/sanchit-gandhi). - -## Tips - -When constructing a prompt, keep in mind: - -* Descriptive prompt inputs work best; use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific where possible (e.g. "melodic techno with a fast beat and synths" works better than "techno"). -* Using a *negative prompt* can significantly improve the quality of the generated audio. Try using a negative prompt of "low quality, average quality". - -During inference: - -* The _quality_ of the generated audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference. -* Multiple waveforms can be generated in one go: set `num_waveforms_per_prompt` to a value greater than 1 to enable. Automatic scoring will be performed between the generated waveforms and prompt text, and the audios ranked from best to worst accordingly. -* The _length_ of the generated audio sample can be controlled by varying the `audio_length_in_s` argument. - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## MusicLDMPipeline -[[autodoc]] MusicLDMPipeline - - all - - __call__ diff --git a/docs/source/en/api/pipelines/paradigms.md b/docs/source/en/api/pipelines/paradigms.md deleted file mode 100644 index ca2fedc796df..000000000000 --- a/docs/source/en/api/pipelines/paradigms.md +++ /dev/null @@ -1,51 +0,0 @@ - - -# Parallel Sampling of Diffusion Models - -[Parallel Sampling of Diffusion Models](https://huggingface.co/papers/2305.16317) is by Andy Shih, Suneel Belkhale, Stefano Ermon, Dorsa Sadigh, Nima Anari. - -The abstract from the paper is: - -*Diffusion models are powerful generative models but suffer from slow sampling, often taking 1000 sequential denoising steps for one sample. As a result, considerable efforts have been directed toward reducing the number of denoising steps, but these methods hurt sample quality. Instead of reducing the number of denoising steps (trading quality for speed), in this paper we explore an orthogonal approach: can we run the denoising steps in parallel (trading compute for speed)? In spite of the sequential nature of the denoising steps, we show that surprisingly it is possible to parallelize sampling via Picard iterations, by guessing the solution of future denoising steps and iteratively refining until convergence. With this insight, we present ParaDiGMS, a novel method to accelerate the sampling of pretrained diffusion models by denoising multiple steps in parallel. ParaDiGMS is the first diffusion sampling method that enables trading compute for speed and is even compatible with existing fast sampling techniques such as DDIM and DPMSolver. Using ParaDiGMS, we improve sampling speed by 2-4x across a range of robotics and image generation models, giving state-of-the-art sampling speeds of 0.2s on 100-step DiffusionPolicy and 14.6s on 1000-step StableDiffusion-v2 with no measurable degradation of task reward, FID score, or CLIP score.* - -The original codebase can be found at [AndyShih12/paradigms](https://github.com/AndyShih12/paradigms), and the pipeline was contributed by [AndyShih12](https://github.com/AndyShih12). ❤️ - -## Tips - -This pipeline improves sampling speed by running denoising steps in parallel, at the cost of increased total FLOPs. -Therefore, it is better to call this pipeline when running on multiple GPUs. Otherwise, without enough GPU bandwidth -sampling may be even slower than sequential sampling. - -The two parameters to play with are `parallel` (batch size) and `tolerance`. -- If it fits in memory, for a 1000-step DDPM you can aim for a batch size of around 100 (for example, 8 GPUs and `batch_per_device=12` to get `parallel=96`). A higher batch size may not fit in memory, and lower batch size gives less parallelism. -- For tolerance, using a higher tolerance may get better speedups but can risk sample quality degradation. If there is quality degradation with the default tolerance, then use a lower tolerance like `0.001`. - -For a 1000-step DDPM on 8 A100 GPUs, you can expect around a 3x speedup from [`StableDiffusionParadigmsPipeline`] compared to the [`StableDiffusionPipeline`] -by setting `parallel=80` and `tolerance=0.1`. - -🤗 Diffusers offers [distributed inference support](../../training/distributed_inference) for generating multiple prompts -in parallel on multiple GPUs. But [`StableDiffusionParadigmsPipeline`] is designed for speeding up sampling of a single prompt by using multiple GPUs. - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## StableDiffusionParadigmsPipeline -[[autodoc]] StableDiffusionParadigmsPipeline - - __call__ - - all - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/pix2pix_zero.md b/docs/source/en/api/pipelines/pix2pix_zero.md deleted file mode 100644 index 6d7b9fb31471..000000000000 --- a/docs/source/en/api/pipelines/pix2pix_zero.md +++ /dev/null @@ -1,289 +0,0 @@ - - -# Pix2Pix Zero - -[Zero-shot Image-to-Image Translation](https://huggingface.co/papers/2302.03027) is by Gaurav Parmar, Krishna Kumar Singh, Richard Zhang, Yijun Li, Jingwan Lu, and Jun-Yan Zhu. - -The abstract from the paper is: - -*Large-scale text-to-image generative models have shown their remarkable ability to synthesize diverse and high-quality images. However, it is still challenging to directly apply these models for editing real images for two reasons. First, it is hard for users to come up with a perfect text prompt that accurately describes every visual detail in the input image. Second, while existing models can introduce desirable changes in certain regions, they often dramatically alter the input content and introduce unexpected changes in unwanted regions. In this work, we propose pix2pix-zero, an image-to-image translation method that can preserve the content of the original image without manual prompting. We first automatically discover editing directions that reflect desired edits in the text embedding space. To preserve the general content structure after editing, we further propose cross-attention guidance, which aims to retain the cross-attention maps of the input image throughout the diffusion process. In addition, our method does not need additional training for these edits and can directly use the existing pre-trained text-to-image diffusion model. We conduct extensive experiments and show that our method outperforms existing and concurrent works for both real and synthetic image editing.* - -You can find additional information about Pix2Pix Zero on the [project page](https://pix2pixzero.github.io/), [original codebase](https://github.com/pix2pixzero/pix2pix-zero), and try it out in a [demo](https://huggingface.co/spaces/pix2pix-zero-library/pix2pix-zero-demo). - -## Tips - -* The pipeline can be conditioned on real input images. Check out the code examples below to know more. -* The pipeline exposes two arguments namely `source_embeds` and `target_embeds` -that let you control the direction of the semantic edits in the final image to be generated. Let's say, -you wanted to translate from "cat" to "dog". In this case, the edit direction will be "cat -> dog". To reflect -this in the pipeline, you simply have to set the embeddings related to the phrases including "cat" to -`source_embeds` and "dog" to `target_embeds`. Refer to the code example below for more details. -* When you're using this pipeline from a prompt, specify the _source_ concept in the prompt. Taking -the above example, a valid input prompt would be: "a high resolution painting of a **cat** in the style of van gogh". -* If you wanted to reverse the direction in the example above, i.e., "dog -> cat", then it's recommended to: - * Swap the `source_embeds` and `target_embeds`. - * Change the input prompt to include "dog". -* To learn more about how the source and target embeddings are generated, refer to the [original paper](https://arxiv.org/abs/2302.03027). Below, we also provide some directions on how to generate the embeddings. -* Note that the quality of the outputs generated with this pipeline is dependent on how good the `source_embeds` and `target_embeds` are. Please, refer to [this discussion](#generating-source-and-target-embeddings) for some suggestions on the topic. - -## Available Pipelines: - -| Pipeline | Tasks | Demo -|---|---|:---:| -| [StableDiffusionPix2PixZeroPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_pix2pix_zero.py) | *Text-Based Image Editing* | [🤗 Space](https://huggingface.co/spaces/pix2pix-zero-library/pix2pix-zero-demo) | - - - -## Usage example - -### Based on an image generated with the input prompt - -```python -import requests -import torch - -from diffusers import DDIMScheduler, StableDiffusionPix2PixZeroPipeline - - -def download(embedding_url, local_filepath): - r = requests.get(embedding_url) - with open(local_filepath, "wb") as f: - f.write(r.content) - - -model_ckpt = "CompVis/stable-diffusion-v1-4" -pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained( - model_ckpt, conditions_input_image=False, torch_dtype=torch.float16 -) -pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) -pipeline.to("cuda") - -prompt = "a high resolution painting of a cat in the style of van gogh" -src_embs_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/embeddings_sd_1.4/cat.pt" -target_embs_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/embeddings_sd_1.4/dog.pt" - -for url in [src_embs_url, target_embs_url]: - download(url, url.split("/")[-1]) - -src_embeds = torch.load(src_embs_url.split("/")[-1]) -target_embeds = torch.load(target_embs_url.split("/")[-1]) - -image = pipeline( - prompt, - source_embeds=src_embeds, - target_embeds=target_embeds, - num_inference_steps=50, - cross_attention_guidance_amount=0.15, -).images[0] -image -``` - -### Based on an input image - -When the pipeline is conditioned on an input image, we first obtain an inverted -noise from it using a `DDIMInverseScheduler` with the help of a generated caption. Then the inverted noise is used to start the generation process. - -First, let's load our pipeline: - -```py -import torch -from transformers import BlipForConditionalGeneration, BlipProcessor -from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionPix2PixZeroPipeline - -captioner_id = "Salesforce/blip-image-captioning-base" -processor = BlipProcessor.from_pretrained(captioner_id) -model = BlipForConditionalGeneration.from_pretrained(captioner_id, torch_dtype=torch.float16, low_cpu_mem_usage=True) - -sd_model_ckpt = "CompVis/stable-diffusion-v1-4" -pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained( - sd_model_ckpt, - caption_generator=model, - caption_processor=processor, - torch_dtype=torch.float16, - safety_checker=None, -) -pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) -pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) -pipeline.enable_model_cpu_offload() -``` - -Then, we load an input image for conditioning and obtain a suitable caption for it: - -```py -from diffusers.utils import load_image - -img_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/test_images/cats/cat_6.png" -raw_image = load_image(url).resize((512, 512)) -caption = pipeline.generate_caption(raw_image) -caption -``` - -Then we employ the generated caption and the input image to get the inverted noise: - -```py -generator = torch.manual_seed(0) -inv_latents = pipeline.invert(caption, image=raw_image, generator=generator).latents -``` - -Now, generate the image with edit directions: - -```py -# See the "Generating source and target embeddings" section below to -# automate the generation of these captions with a pre-trained model like Flan-T5 as explained below. -source_prompts = ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"] -target_prompts = ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"] - -source_embeds = pipeline.get_embeds(source_prompts, batch_size=2) -target_embeds = pipeline.get_embeds(target_prompts, batch_size=2) - - -image = pipeline( - caption, - source_embeds=source_embeds, - target_embeds=target_embeds, - num_inference_steps=50, - cross_attention_guidance_amount=0.15, - generator=generator, - latents=inv_latents, - negative_prompt=caption, -).images[0] -image -``` - -## Generating source and target embeddings - -The authors originally used the [GPT-3 API](https://openai.com/api/) to generate the source and target captions for discovering -edit directions. However, we can also leverage open source and public models for the same purpose. -Below, we provide an end-to-end example with the [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) model -for generating captions and [CLIP](https://huggingface.co/docs/transformers/model_doc/clip) for -computing embeddings on the generated captions. - -**1. Load the generation model**: - -```py -import torch -from transformers import AutoTokenizer, T5ForConditionalGeneration - -tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl") -model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl", device_map="auto", torch_dtype=torch.float16) -``` - -**2. Construct a starting prompt**: - -```py -source_concept = "cat" -target_concept = "dog" - -source_text = f"Provide a caption for images containing a {source_concept}. " -"The captions should be in English and should be no longer than 150 characters." - -target_text = f"Provide a caption for images containing a {target_concept}. " -"The captions should be in English and should be no longer than 150 characters." -``` - -Here, we're interested in the "cat -> dog" direction. - -**3. Generate captions**: - -We can use a utility like so for this purpose. - -```py -def generate_captions(input_prompt): - input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids.to("cuda") - - outputs = model.generate( - input_ids, temperature=0.8, num_return_sequences=16, do_sample=True, max_new_tokens=128, top_k=10 - ) - return tokenizer.batch_decode(outputs, skip_special_tokens=True) -``` - -And then we just call it to generate our captions: - -```py -source_captions = generate_captions(source_text) -target_captions = generate_captions(target_concept) -print(source_captions, target_captions, sep='\n') -``` - -We encourage you to play around with the different parameters supported by the -`generate()` method ([documentation](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate)) for the generation quality you are looking for. - -**4. Load the embedding model**: - -Here, we need to use the same text encoder model used by the subsequent Stable Diffusion model. - -```py -from diffusers import StableDiffusionPix2PixZeroPipeline - -pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 -) -pipeline = pipeline.to("cuda") -tokenizer = pipeline.tokenizer -text_encoder = pipeline.text_encoder -``` - -**5. Compute embeddings**: - -```py -import torch - -def embed_captions(sentences, tokenizer, text_encoder, device="cuda"): - with torch.no_grad(): - embeddings = [] - for sent in sentences: - text_inputs = tokenizer( - sent, - padding="max_length", - max_length=tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=None)[0] - embeddings.append(prompt_embeds) - return torch.concatenate(embeddings, dim=0).mean(dim=0).unsqueeze(0) - -source_embeddings = embed_captions(source_captions, tokenizer, text_encoder) -target_embeddings = embed_captions(target_captions, tokenizer, text_encoder) -``` - -And you're done! [Here](https://colab.research.google.com/drive/1tz2C1EdfZYAPlzXXbTnf-5PRBiR8_R1F?usp=sharing) is a Colab Notebook that you can use to interact with the entire process. - -Now, you can use these embeddings directly while calling the pipeline: - -```py -from diffusers import DDIMScheduler - -pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) - -image = pipeline( - prompt, - source_embeds=source_embeddings, - target_embeds=target_embeddings, - num_inference_steps=50, - cross_attention_guidance_amount=0.15, -).images[0] -image -``` - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## StableDiffusionPix2PixZeroPipeline -[[autodoc]] StableDiffusionPix2PixZeroPipeline - - __call__ - - all diff --git a/docs/source/en/api/pipelines/pndm.md b/docs/source/en/api/pipelines/pndm.md deleted file mode 100644 index 162e7934dc22..000000000000 --- a/docs/source/en/api/pipelines/pndm.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# PNDM - -[Pseudo Numerical Methods for Diffusion Models on Manifolds](https://huggingface.co/papers/2202.09778) (PNDM) is by Luping Liu, Yi Ren, Zhijie Lin and Zhou Zhao. - -The abstract from the paper is: - -*Denoising Diffusion Probabilistic Models (DDPMs) can generate high-quality samples such as image and audio samples. However, DDPMs require hundreds to thousands of iterations to produce final samples. Several prior works have successfully accelerated DDPMs through adjusting the variance schedule (e.g., Improved Denoising Diffusion Probabilistic Models) or the denoising equation (e.g., Denoising Diffusion Implicit Models (DDIMs)). However, these acceleration methods cannot maintain the quality of samples and even introduce new noise at a high speedup rate, which limit their practicability. To accelerate the inference process while keeping the sample quality, we provide a fresh perspective that DDPMs should be treated as solving differential equations on manifolds. Under such a perspective, we propose pseudo numerical methods for diffusion models (PNDMs). Specifically, we figure out how to solve differential equations on manifolds and show that DDIMs are simple cases of pseudo numerical methods. We change several classical numerical methods to corresponding pseudo numerical methods and find that the pseudo linear multi-step method is the best in most situations. According to our experiments, by directly using pre-trained models on Cifar10, CelebA and LSUN, PNDMs can generate higher quality synthetic images with only 50 steps compared with 1000-step DDIMs (20x speedup), significantly outperform DDIMs with 250 steps (by around 0.4 in FID) and have good generalization on different variance schedules.* - -The original codebase can be found at [luping-liu/PNDM](https://github.com/luping-liu/PNDM). - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## PNDMPipeline -[[autodoc]] PNDMPipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/docs/source/en/api/pipelines/repaint.md b/docs/source/en/api/pipelines/repaint.md deleted file mode 100644 index 1be69a3f9a46..000000000000 --- a/docs/source/en/api/pipelines/repaint.md +++ /dev/null @@ -1,37 +0,0 @@ - - -# RePaint - -[RePaint: Inpainting using Denoising Diffusion Probabilistic Models](https://huggingface.co/papers/2201.09865) is by Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher Yu, Radu Timofte, Luc Van Gool. - -The abstract from the paper is: - -*Free-form inpainting is the task of adding new content to an image in the regions specified by an arbitrary binary mask. Most existing approaches train for a certain distribution of masks, which limits their generalization capabilities to unseen mask types. Furthermore, training with pixel-wise and perceptual losses often leads to simple textural extensions towards the missing areas instead of semantically meaningful generation. In this work, we propose RePaint: A Denoising Diffusion Probabilistic Model (DDPM) based inpainting approach that is applicable to even extreme masks. We employ a pretrained unconditional DDPM as the generative prior. To condition the generation process, we only alter the reverse diffusion iterations by sampling the unmasked regions using the given image information. Since this technique does not modify or condition the original DDPM network itself, the model produces high-quality and diverse output images for any inpainting form. We validate our method for both faces and general-purpose image inpainting using standard and extreme masks. -RePaint outperforms state-of-the-art Autoregressive, and GAN approaches for at least five out of six mask distributions.* - -The original codebase can be found at [andreas128/RePaint](https://github.com/andreas128/RePaint). - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - - -## RePaintPipeline -[[autodoc]] RePaintPipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/docs/source/en/api/pipelines/score_sde_ve.md b/docs/source/en/api/pipelines/score_sde_ve.md deleted file mode 100644 index cc9c8574f92d..000000000000 --- a/docs/source/en/api/pipelines/score_sde_ve.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# Score SDE VE - -[Score-Based Generative Modeling through Stochastic Differential Equations](https://huggingface.co/papers/2011.13456) (Score SDE) is by Yang Song, Jascha Sohl-Dickstein, Diederik P. Kingma, Abhishek Kumar, Stefano Ermon and Ben Poole. This pipeline implements the variance expanding (VE) variant of the stochastic differential equation method. - -The abstract from the paper is: - -*Creating noise from data is easy; creating data from noise is generative modeling. We present a stochastic differential equation (SDE) that smoothly transforms a complex data distribution to a known prior distribution by slowly injecting noise, and a corresponding reverse-time SDE that transforms the prior distribution back into the data distribution by slowly removing the noise. Crucially, the reverse-time SDE depends only on the time-dependent gradient field (\aka, score) of the perturbed data distribution. By leveraging advances in score-based generative modeling, we can accurately estimate these scores with neural networks, and use numerical SDE solvers to generate samples. We show that this framework encapsulates previous approaches in score-based generative modeling and diffusion probabilistic modeling, allowing for new sampling procedures and new modeling capabilities. In particular, we introduce a predictor-corrector framework to correct errors in the evolution of the discretized reverse-time SDE. We also derive an equivalent neural ODE that samples from the same distribution as the SDE, but additionally enables exact likelihood computation, and improved sampling efficiency. In addition, we provide a new way to solve inverse problems with score-based models, as demonstrated with experiments on class-conditional generation, image inpainting, and colorization. Combined with multiple architectural improvements, we achieve record-breaking performance for unconditional image generation on CIFAR-10 with an Inception score of 9.89 and FID of 2.20, a competitive likelihood of 2.99 bits/dim, and demonstrate high fidelity generation of 1024 x 1024 images for the first time from a score-based generative model.* - -The original codebase can be found at [yang-song/score_sde_pytorch](https://github.com/yang-song/score_sde_pytorch). - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## ScoreSdeVePipeline -[[autodoc]] ScoreSdeVePipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/docs/source/en/api/pipelines/spectrogram_diffusion.md b/docs/source/en/api/pipelines/spectrogram_diffusion.md deleted file mode 100644 index cc9ff3e45646..000000000000 --- a/docs/source/en/api/pipelines/spectrogram_diffusion.md +++ /dev/null @@ -1,37 +0,0 @@ - - -# Spectrogram Diffusion - -[Spectrogram Diffusion](https://huggingface.co/papers/2206.05408) is by Curtis Hawthorne, Ian Simon, Adam Roberts, Neil Zeghidour, Josh Gardner, Ethan Manilow, and Jesse Engel. - -*An ideal music synthesizer should be both interactive and expressive, generating high-fidelity audio in realtime for arbitrary combinations of instruments and notes. Recent neural synthesizers have exhibited a tradeoff between domain-specific models that offer detailed control of only specific instruments, or raw waveform models that can train on any music but with minimal control and slow generation. In this work, we focus on a middle ground of neural synthesizers that can generate audio from MIDI sequences with arbitrary combinations of instruments in realtime. This enables training on a wide range of transcription datasets with a single model, which in turn offers note-level control of composition and instrumentation across a wide range of instruments. We use a simple two-stage process: MIDI to spectrograms with an encoder-decoder Transformer, then spectrograms to audio with a generative adversarial network (GAN) spectrogram inverter. We compare training the decoder as an autoregressive model and as a Denoising Diffusion Probabilistic Model (DDPM) and find that the DDPM approach is superior both qualitatively and as measured by audio reconstruction and Fréchet distance metrics. Given the interactivity and generality of this approach, we find this to be a promising first step towards interactive and expressive neural synthesis for arbitrary combinations of instruments and notes.* - -The original codebase can be found at [magenta/music-spectrogram-diffusion](https://github.com/magenta/music-spectrogram-diffusion). - -![img](https://storage.googleapis.com/music-synthesis-with-spectrogram-diffusion/architecture.png) - -As depicted above the model takes as input a MIDI file and tokenizes it into a sequence of 5 second intervals. Each tokenized interval then together with positional encodings is passed through the Note Encoder and its representation is concatenated with the previous window's generated spectrogram representation obtained via the Context Encoder. For the initial 5 second window this is set to zero. The resulting context is then used as conditioning to sample the denoised Spectrogram from the MIDI window and we concatenate this spectrogram to the final output as well as use it for the context of the next MIDI window. The process repeats till we have gone over all the MIDI inputs. Finally a MelGAN decoder converts the potentially long spectrogram to audio which is the final result of this pipeline. - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## SpectrogramDiffusionPipeline -[[autodoc]] SpectrogramDiffusionPipeline - - all - - __call__ - -## AudioPipelineOutput -[[autodoc]] pipelines.AudioPipelineOutput diff --git a/docs/source/en/api/pipelines/stochastic_karras_ve.md b/docs/source/en/api/pipelines/stochastic_karras_ve.md deleted file mode 100644 index 0e3f1a5b8333..000000000000 --- a/docs/source/en/api/pipelines/stochastic_karras_ve.md +++ /dev/null @@ -1,33 +0,0 @@ - - -# Stochastic Karras VE - -[Elucidating the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) is by Tero Karras, Miika Aittala, Timo Aila and Samuli Laine. This pipeline implements the stochastic sampling tailored to variance expanding (VE) models. - -The abstract from the paper: - -*We argue that the theory and practice of diffusion-based generative models are currently unnecessarily convoluted and seek to remedy the situation by presenting a design space that clearly separates the concrete design choices. This lets us identify several changes to both the sampling and training processes, as well as preconditioning of the score networks. Together, our improvements yield new state-of-the-art FID of 1.79 for CIFAR-10 in a class-conditional setting and 1.97 in an unconditional setting, with much faster sampling (35 network evaluations per image) than prior designs. To further demonstrate their modular nature, we show that our design changes dramatically improve both the efficiency and quality obtainable with pre-trained score networks from previous work, including improving the FID of a previously trained ImageNet-64 model from 2.07 to near-SOTA 1.55, and after re-training with our proposed improvements to a new SOTA of 1.36.* - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## KarrasVePipeline -[[autodoc]] KarrasVePipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/docs/source/en/api/pipelines/versatile_diffusion.md b/docs/source/en/api/pipelines/versatile_diffusion.md deleted file mode 100644 index 953f4822486a..000000000000 --- a/docs/source/en/api/pipelines/versatile_diffusion.md +++ /dev/null @@ -1,54 +0,0 @@ - - -# Versatile Diffusion - -Versatile Diffusion was proposed in [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://huggingface.co/papers/2211.08332) by Xingqian Xu, Zhangyang Wang, Eric Zhang, Kai Wang, Humphrey Shi. - -The abstract from the paper is: - -*Recent advances in diffusion models have set an impressive milestone in many generation tasks, and trending works such as DALL-E2, Imagen, and Stable Diffusion have attracted great interest. Despite the rapid landscape changes, recent new approaches focus on extensions and performance rather than capacity, thus requiring separate models for separate tasks. In this work, we expand the existing single-flow diffusion pipeline into a multi-task multimodal network, dubbed Versatile Diffusion (VD), that handles multiple flows of text-to-image, image-to-text, and variations in one unified model. The pipeline design of VD instantiates a unified multi-flow diffusion framework, consisting of sharable and swappable layer modules that enable the crossmodal generality beyond images and text. Through extensive experiments, we demonstrate that VD successfully achieves the following: a) VD outperforms the baseline approaches and handles all its base tasks with competitive quality; b) VD enables novel extensions such as disentanglement of style and semantics, dual- and multi-context blending, etc.; c) The success of our multi-flow multimodal framework over images and text may inspire further diffusion-based universal AI research.* - -## Tips - -You can load the more memory intensive "all-in-one" [`VersatileDiffusionPipeline`] that supports all the tasks or use the individual pipelines which are more memory efficient. - -| **Pipeline** | **Supported tasks** | -|------------------------------------------------------|-----------------------------------| -| [`VersatileDiffusionPipeline`] | all of the below | -| [`VersatileDiffusionTextToImagePipeline`] | text-to-image | -| [`VersatileDiffusionImageVariationPipeline`] | image variation | -| [`VersatileDiffusionDualGuidedPipeline`] | image-text dual guided generation | - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## VersatileDiffusionPipeline -[[autodoc]] VersatileDiffusionPipeline - -## VersatileDiffusionTextToImagePipeline -[[autodoc]] VersatileDiffusionTextToImagePipeline - - all - - __call__ - -## VersatileDiffusionImageVariationPipeline -[[autodoc]] VersatileDiffusionImageVariationPipeline - - all - - __call__ - -## VersatileDiffusionDualGuidedPipeline -[[autodoc]] VersatileDiffusionDualGuidedPipeline - - all - - __call__ diff --git a/docs/source/en/api/pipelines/vq_diffusion.md b/docs/source/en/api/pipelines/vq_diffusion.md deleted file mode 100644 index f2b0db716123..000000000000 --- a/docs/source/en/api/pipelines/vq_diffusion.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# VQ Diffusion - -[Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://huggingface.co/papers/2111.14822) is by Shuyang Gu, Dong Chen, Jianmin Bao, Fang Wen, Bo Zhang, Dongdong Chen, Lu Yuan, Baining Guo. - -The abstract from the paper is: - -*We present the vector quantized diffusion (VQ-Diffusion) model for text-to-image generation. This method is based on a vector quantized variational autoencoder (VQ-VAE) whose latent space is modeled by a conditional variant of the recently developed Denoising Diffusion Probabilistic Model (DDPM). We find that this latent-space method is well-suited for text-to-image generation tasks because it not only eliminates the unidirectional bias with existing methods but also allows us to incorporate a mask-and-replace diffusion strategy to avoid the accumulation of errors, which is a serious problem with existing methods. Our experiments show that the VQ-Diffusion produces significantly better text-to-image generation results when compared with conventional autoregressive (AR) models with similar numbers of parameters. Compared with previous GAN-based text-to-image methods, our VQ-Diffusion can handle more complex scenes and improve the synthesized image quality by a large margin. Finally, we show that the image generation computation in our method can be made highly efficient by reparameterization. With traditional AR methods, the text-to-image generation time increases linearly with the output image resolution and hence is quite time consuming even for normal size images. The VQ-Diffusion allows us to achieve a better trade-off between quality and speed. Our experiments indicate that the VQ-Diffusion model with the reparameterization is fifteen times faster than traditional AR methods while achieving a better image quality.* - -The original codebase can be found at [microsoft/VQ-Diffusion](https://github.com/microsoft/VQ-Diffusion). - - - -Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. - - - -## VQDiffusionPipeline -[[autodoc]] VQDiffusionPipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput From 95b8a96630bfc1d9df423dd7e0c8363ed5c9506e Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 18 Dec 2023 11:31:18 +0100 Subject: [PATCH 12/17] clean --- docs/source/en/api/pipelines/audioldm.md | 50 ++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 docs/source/en/api/pipelines/audioldm.md diff --git a/docs/source/en/api/pipelines/audioldm.md b/docs/source/en/api/pipelines/audioldm.md new file mode 100644 index 000000000000..43fb0f1a3bf4 --- /dev/null +++ b/docs/source/en/api/pipelines/audioldm.md @@ -0,0 +1,50 @@ + + +# AudioLDM + +AudioLDM was proposed in [AudioLDM: Text-to-Audio Generation with Latent Diffusion Models](https://huggingface.co/papers/2301.12503) by Haohe Liu et al. Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview), AudioLDM +is a text-to-audio _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/docs/transformers/main/model_doc/clap) +latents. AudioLDM takes a text prompt as input and predicts the corresponding audio. It can generate text-conditional +sound effects, human speech and music. + +The abstract from the paper is: + +*Text-to-audio (TTA) system has recently gained attention for its ability to synthesize general audio based on text descriptions. However, previous studies in TTA have limited generation quality with high computational costs. In this study, we propose AudioLDM, a TTA system that is built on a latent space to learn the continuous audio representations from contrastive language-audio pretraining (CLAP) latents. The pretrained CLAP models enable us to train LDMs with audio embedding while providing text embedding as a condition during sampling. By learning the latent representations of audio signals and their compositions without modeling the cross-modal relationship, AudioLDM is advantageous in both generation quality and computational efficiency. Trained on AudioCaps with a single GPU, AudioLDM achieves state-of-the-art TTA performance measured by both objective and subjective metrics (e.g., frechet distance). Moreover, AudioLDM is the first TTA system that enables various text-guided audio manipulations (e.g., style transfer) in a zero-shot fashion. Our implementation and demos are available at [this https URL](https://audioldm.github.io/).* + +The original codebase can be found at [haoheliu/AudioLDM](https://github.com/haoheliu/AudioLDM). + +## Tips + +When constructing a prompt, keep in mind: + +* Descriptive prompt inputs work best; you can use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific (for example, "water stream in a forest" instead of "stream"). +* It's best to use general terms like "cat" or "dog" instead of specific names or abstract objects the model may not be familiar with. + +During inference: + +* The _quality_ of the predicted audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference. +* The _length_ of the predicted audio sample can be controlled by varying the `audio_length_in_s` argument. + + + +Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. + + + +## AudioLDMPipeline +[[autodoc]] AudioLDMPipeline + - all + - __call__ + +## AudioPipelineOutput +[[autodoc]] pipelines.AudioPipelineOutput From 321e37adab940465161ff7b459511014b65055bd Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 18 Dec 2023 11:33:26 +0100 Subject: [PATCH 13/17] clean --- docs/source/en/api/pipelines/musicldm.md | 52 ++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 docs/source/en/api/pipelines/musicldm.md diff --git a/docs/source/en/api/pipelines/musicldm.md b/docs/source/en/api/pipelines/musicldm.md new file mode 100644 index 000000000000..896f707c76d7 --- /dev/null +++ b/docs/source/en/api/pipelines/musicldm.md @@ -0,0 +1,52 @@ + + +# MusicLDM + +MusicLDM was proposed in [MusicLDM: Enhancing Novelty in Text-to-Music Generation Using Beat-Synchronous Mixup Strategies](https://huggingface.co/papers/2308.01546) by Ke Chen, Yusong Wu, Haohe Liu, Marianna Nezhurina, Taylor Berg-Kirkpatrick, Shlomo Dubnov. +MusicLDM takes a text prompt as input and predicts the corresponding music sample. + +Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview) and [AudioLDM](https://huggingface.co/docs/diffusers/api/pipelines/audioldm), +MusicLDM is a text-to-music _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/docs/transformers/main/model_doc/clap) +latents. + +MusicLDM is trained on a corpus of 466 hours of music data. Beat-synchronous data augmentation strategies are applied to the music samples, both in the time domain and in the latent space. Using beat-synchronous data augmentation strategies encourages the model to interpolate between the training samples, but stay within the domain of the training data. The result is generated music that is more diverse while staying faithful to the corresponding style. + +The abstract of the paper is the following: + +*Diffusion models have shown promising results in cross-modal generation tasks, including text-to-image and text-to-audio generation. However, generating music, as a special type of audio, presents unique challenges due to limited availability of music data and sensitive issues related to copyright and plagiarism. In this paper, to tackle these challenges, we first construct a state-of-the-art text-to-music model, MusicLDM, that adapts Stable Diffusion and AudioLDM architectures to the music domain. We achieve this by retraining the contrastive language-audio pretraining model (CLAP) and the Hifi-GAN vocoder, as components of MusicLDM, on a collection of music data samples. Then, to address the limitations of training data and to avoid plagiarism, we leverage a beat tracking model and propose two different mixup strategies for data augmentation: beat-synchronous audio mixup and beat-synchronous latent mixup, which recombine training audio directly or via a latent embeddings space, respectively. Such mixup strategies encourage the model to interpolate between musical training samples and generate new music within the convex hull of the training data, making the generated music more diverse while still staying faithful to the corresponding style. In addition to popular evaluation metrics, we design several new evaluation metrics based on CLAP score to demonstrate that our proposed MusicLDM and beat-synchronous mixup strategies improve both the quality and novelty of generated music, as well as the correspondence between input text and generated music.* + +This pipeline was contributed by [sanchit-gandhi](https://huggingface.co/sanchit-gandhi). + +## Tips + +When constructing a prompt, keep in mind: + +* Descriptive prompt inputs work best; use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific where possible (e.g. "melodic techno with a fast beat and synths" works better than "techno"). +* Using a *negative prompt* can significantly improve the quality of the generated audio. Try using a negative prompt of "low quality, average quality". + +During inference: + +* The _quality_ of the generated audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference. +* Multiple waveforms can be generated in one go: set `num_waveforms_per_prompt` to a value greater than 1 to enable. Automatic scoring will be performed between the generated waveforms and prompt text, and the audios ranked from best to worst accordingly. +* The _length_ of the generated audio sample can be controlled by varying the `audio_length_in_s` argument. + + + +Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. + + + +## MusicLDMPipeline +[[autodoc]] MusicLDMPipeline + - all + - __call__ From 6bfdf131252356d2b01a4b4a8b064dc884d3970a Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 12:19:45 +0000 Subject: [PATCH 14/17] clean up --- src/diffusers/pipelines/deprecated/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/deprecated/__init__.py b/src/diffusers/pipelines/deprecated/__init__.py index 2a86421dee8c..9936323170ad 100644 --- a/src/diffusers/pipelines/deprecated/__init__.py +++ b/src/diffusers/pipelines/deprecated/__init__.py @@ -109,7 +109,7 @@ StableDiffusionInpaintPipelineLegacy, StableDiffusionModelEditingPipeline, StableDiffusionParadigmsPipeline, - StableDiffusionPix2PixZero, + StableDiffusionPix2PixZeroPipeline, ) from .stochastic_karras_ve import KarrasVePipeline from .versatile_diffusion import ( From 3892ce945358e9fcfc92931280a1fbffc83820fb Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 13:04:29 +0000 Subject: [PATCH 15/17] clean up --- src/diffusers/pipelines/__init__.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 38c93d84b525..e7d34b623711 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -317,12 +317,7 @@ from .dance_diffusion import DanceDiffusionPipeline from .ddim import DDIMPipeline from .ddpm import DDPMPipeline - from .deprecated import ( - KarrasVePipeline, - PNDMPipeline, - RePaintPipeline, - ScoreSdeVePipeline, - ) + from .deprecated import KarrasVePipeline, LDMPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline from .dit import DiTPipeline from .latent_diffusion import LDMSuperResolutionPipeline from .pipeline_utils import ( From fef19188eea3bcc59a131ae77470d0d6750665bb Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 13:15:36 +0000 Subject: [PATCH 16/17] clean up toctree --- docs/source/en/_toctree.yml | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index eab726d0b616..79b726c55489 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -244,14 +244,10 @@ - sections: - local: api/pipelines/overview title: Overview - - local: api/pipelines/alt_diffusion - title: AltDiffusion - local: api/pipelines/animatediff title: AnimateDiff - local: api/pipelines/attend_and_excite title: Attend-and-Excite - - local: api/pipelines/audio_diffusion - title: Audio Diffusion - local: api/pipelines/audioldm title: AudioLDM - local: api/pipelines/audioldm2 @@ -270,8 +266,6 @@ title: ControlNet-XS - local: api/pipelines/controlnetxs_sdxl title: ControlNet-XS with Stable Diffusion XL - - local: api/pipelines/cycle_diffusion - title: Cycle Diffusion - local: api/pipelines/dance_diffusion title: Dance Diffusion - local: api/pipelines/ddim @@ -302,16 +296,8 @@ title: MusicLDM - local: api/pipelines/paint_by_example title: Paint by Example - - local: api/pipelines/paradigms - title: Parallel Sampling of Diffusion Models - - local: api/pipelines/pix2pix_zero - title: Pix2Pix Zero - local: api/pipelines/pixart title: PixArt-α - - local: api/pipelines/pndm - title: PNDM - - local: api/pipelines/repaint - title: RePaint - local: api/pipelines/score_sde_ve title: Score SDE VE - local: api/pipelines/self_attention_guidance @@ -320,8 +306,6 @@ title: Semantic Guidance - local: api/pipelines/shap_e title: Shap-E - - local: api/pipelines/spectrogram_diffusion - title: Spectrogram Diffusion - sections: - local: api/pipelines/stable_diffusion/overview title: Overview @@ -356,26 +340,16 @@ title: Stable Diffusion - local: api/pipelines/stable_unclip title: Stable unCLIP - - local: api/pipelines/stochastic_karras_ve - title: Stochastic Karras VE - - local: api/pipelines/model_editing - title: Text-to-image model editing - local: api/pipelines/text_to_video title: Text-to-video - local: api/pipelines/text_to_video_zero title: Text2Video-Zero - local: api/pipelines/unclip title: unCLIP - - local: api/pipelines/latent_diffusion_uncond - title: Unconditional Latent Diffusion - local: api/pipelines/unidiffuser title: UniDiffuser - local: api/pipelines/value_guided_sampling title: Value-guided sampling - - local: api/pipelines/versatile_diffusion - title: Versatile Diffusion - - local: api/pipelines/vq_diffusion - title: VQ Diffusion - local: api/pipelines/wuerstchen title: Wuerstchen title: Pipelines From c86b766d6b9584ca1afa7e35f2017848b81a85f5 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 18 Dec 2023 13:21:22 +0000 Subject: [PATCH 17/17] clean up --- docs/source/en/_toctree.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 79b726c55489..62588bf4abb8 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -298,8 +298,6 @@ title: Paint by Example - local: api/pipelines/pixart title: PixArt-α - - local: api/pipelines/score_sde_ve - title: Score SDE VE - local: api/pipelines/self_attention_guidance title: Self-Attention Guidance - local: api/pipelines/semantic_stable_diffusion