Skip to content

Commit 6e05c21

Browse files
committed
remove.
1 parent 3be4b01 commit 6e05c21

File tree

3 files changed

+0
-167
lines changed

3 files changed

+0
-167
lines changed

src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py

Lines changed: 0 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -464,85 +464,6 @@ def _get_add_time_ids(
464464

465465
return add_time_ids, add_neg_time_ids
466466

467-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
468-
def upcast_vae(self):
469-
deprecate(
470-
"upcast_vae",
471-
"1.0.0",
472-
"`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`. For more details, please refer to: https://github.com/huggingface/diffusers/pull/12619#issue-3606633695.",
473-
)
474-
self.vae.to(dtype=torch.float32)
475-
476-
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
477-
def get_guidance_scale_embedding(
478-
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
479-
) -> torch.Tensor:
480-
"""
481-
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
482-
483-
Args:
484-
w (`torch.Tensor`):
485-
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
486-
embedding_dim (`int`, *optional*, defaults to 512):
487-
Dimension of the embeddings to generate.
488-
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
489-
Data type of the generated embeddings.
490-
491-
Returns:
492-
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
493-
"""
494-
assert len(w.shape) == 1
495-
w = w * 1000.0
496-
497-
half_dim = embedding_dim // 2
498-
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
499-
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
500-
emb = w.to(dtype)[:, None] * emb[None, :]
501-
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
502-
if embedding_dim % 2 == 1: # zero pad
503-
emb = torch.nn.functional.pad(emb, (0, 1))
504-
assert emb.shape == (w.shape[0], embedding_dim)
505-
return emb
506-
507-
@property
508-
def guidance_scale(self):
509-
return self._guidance_scale
510-
511-
@property
512-
def guidance_rescale(self):
513-
return self._guidance_rescale
514-
515-
@property
516-
def clip_skip(self):
517-
return self._clip_skip
518-
519-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
520-
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
521-
# corresponds to doing no classifier free guidance.
522-
@property
523-
def do_classifier_free_guidance(self):
524-
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
525-
526-
@property
527-
def cross_attention_kwargs(self):
528-
return self._cross_attention_kwargs
529-
530-
@property
531-
def denoising_end(self):
532-
return self._denoising_end
533-
534-
@property
535-
def denoising_start(self):
536-
return self._denoising_start
537-
538-
@property
539-
def num_timesteps(self):
540-
return self._num_timesteps
541-
542-
@property
543-
def interrupt(self):
544-
return self._interrupt
545-
546467
@torch.no_grad()
547468
@replace_example_docstring(EXAMPLE_DOC_STRING)
548469
def __call__(

src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py

Lines changed: 0 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -552,85 +552,6 @@ def _get_add_time_ids(
552552

553553
return add_time_ids, add_neg_time_ids
554554

555-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
556-
def upcast_vae(self):
557-
deprecate(
558-
"upcast_vae",
559-
"1.0.0",
560-
"`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`. For more details, please refer to: https://github.com/huggingface/diffusers/pull/12619#issue-3606633695.",
561-
)
562-
self.vae.to(dtype=torch.float32)
563-
564-
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
565-
def get_guidance_scale_embedding(
566-
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
567-
) -> torch.Tensor:
568-
"""
569-
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
570-
571-
Args:
572-
w (`torch.Tensor`):
573-
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
574-
embedding_dim (`int`, *optional*, defaults to 512):
575-
Dimension of the embeddings to generate.
576-
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
577-
Data type of the generated embeddings.
578-
579-
Returns:
580-
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
581-
"""
582-
assert len(w.shape) == 1
583-
w = w * 1000.0
584-
585-
half_dim = embedding_dim // 2
586-
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
587-
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
588-
emb = w.to(dtype)[:, None] * emb[None, :]
589-
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
590-
if embedding_dim % 2 == 1: # zero pad
591-
emb = torch.nn.functional.pad(emb, (0, 1))
592-
assert emb.shape == (w.shape[0], embedding_dim)
593-
return emb
594-
595-
@property
596-
def guidance_scale(self):
597-
return self._guidance_scale
598-
599-
@property
600-
def guidance_rescale(self):
601-
return self._guidance_rescale
602-
603-
@property
604-
def clip_skip(self):
605-
return self._clip_skip
606-
607-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
608-
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
609-
# corresponds to doing no classifier free guidance.
610-
@property
611-
def do_classifier_free_guidance(self):
612-
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
613-
614-
@property
615-
def cross_attention_kwargs(self):
616-
return self._cross_attention_kwargs
617-
618-
@property
619-
def denoising_end(self):
620-
return self._denoising_end
621-
622-
@property
623-
def denoising_start(self):
624-
return self._denoising_start
625-
626-
@property
627-
def num_timesteps(self):
628-
return self._num_timesteps
629-
630-
@property
631-
def interrupt(self):
632-
return self._interrupt
633-
634555
@torch.no_grad()
635556
@replace_example_docstring(EXAMPLE_DOC_STRING)
636557
def __call__(

src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -327,15 +327,6 @@ def _get_add_time_ids(
327327
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
328328
return add_time_ids
329329

330-
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae
331-
def upcast_vae(self):
332-
deprecate(
333-
"upcast_vae",
334-
"1.0.0",
335-
"`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`. For more details, please refer to: https://github.com/huggingface/diffusers/pull/12619#issue-3606633695.",
336-
)
337-
self.vae.to(dtype=torch.float32)
338-
339330
@torch.no_grad()
340331
@replace_example_docstring(EXAMPLE_DOC_STRING)
341332
def __call__(

0 commit comments

Comments
 (0)