Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 41 additions & 3 deletions src/diffusers/schedulers/scheduling_consistency_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def set_begin_index(self, begin_index: int = 0):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.

Args:
begin_index (`int`):
begin_index (`int`, defaults to `0`):
The begin index for the scheduler.
"""
self._begin_index = begin_index
Expand Down Expand Up @@ -287,7 +287,23 @@ def get_scalings_for_boundary_condition(self, sigma):
return c_skip, c_out

# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
def index_for_timestep(self, timestep, schedule_timesteps=None):
def index_for_timestep(
self, timestep: Union[float, torch.Tensor], schedule_timesteps: Optional[torch.Tensor] = None
) -> int:
"""
Find the index of a given timestep in the timestep schedule.

Args:
timestep (`float` or `torch.Tensor`):
The timestep value to find in the schedule.
schedule_timesteps (`torch.Tensor`, *optional*):
The timestep schedule to search in. If `None`, uses `self.timesteps`.

Returns:
`int`:
The index of the timestep in the schedule. For the very first step, returns the second index if
multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image).
"""
if schedule_timesteps is None:
schedule_timesteps = self.timesteps

Expand All @@ -302,7 +318,14 @@ def index_for_timestep(self, timestep, schedule_timesteps=None):
return indices[pos].item()

# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
def _init_step_index(self, timestep):
def _init_step_index(self, timestep: Union[float, torch.Tensor]) -> None:
"""
Initialize the step index for the scheduler based on the given timestep.

Args:
timestep (`float` or `torch.Tensor`):
The current timestep to initialize the step index from.
"""
if self.begin_index is None:
if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device)
Expand Down Expand Up @@ -410,6 +433,21 @@ def add_noise(
noise: torch.Tensor,
timesteps: torch.Tensor,
) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.

Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.

Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def set_begin_index(self, begin_index: int = 0):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.

Args:
begin_index (`int`):
begin_index (`int`, defaults to `0`):
The begin index for the scheduler.
"""
self._begin_index = begin_index
Expand Down Expand Up @@ -266,6 +266,19 @@ def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> t

# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.

Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.

Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10))

Expand Down Expand Up @@ -537,6 +550,21 @@ def add_noise(
noise: torch.Tensor,
timesteps: torch.Tensor,
) -> torch.Tensor:
"""
Add noise to the original samples according to the noise schedule at the specified timesteps.

Args:
original_samples (`torch.Tensor`):
The original samples to which noise will be added.
noise (`torch.Tensor`):
The noise tensor to add to the original samples.
timesteps (`torch.Tensor`):
The timesteps at which to add noise, determining the noise level from the schedule.

Returns:
`torch.Tensor`:
The noisy samples with added noise scaled according to the timestep schedule.
"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
Expand Down
5 changes: 3 additions & 2 deletions src/diffusers/schedulers/scheduling_ddim.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor:

Args:
betas (`torch.Tensor`):
the betas that the scheduler is being initialized with.
The betas that the scheduler is being initialized with.

Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR
`torch.Tensor`:
Rescaled betas with zero terminal SNR.
"""
# Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas
Expand Down
5 changes: 3 additions & 2 deletions src/diffusers/schedulers/scheduling_ddim_inverse.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,10 +98,11 @@ def rescale_zero_terminal_snr(betas):

Args:
betas (`torch.Tensor`):
the betas that the scheduler is being initialized with.
The betas that the scheduler is being initialized with.

Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR
`torch.Tensor`:
Rescaled betas with zero terminal SNR.
"""
# Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas
Expand Down
5 changes: 3 additions & 2 deletions src/diffusers/schedulers/scheduling_ddim_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,11 @@ def rescale_zero_terminal_snr(betas):

Args:
betas (`torch.Tensor`):
the betas that the scheduler is being initialized with.
The betas that the scheduler is being initialized with.

Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR
`torch.Tensor`:
Rescaled betas with zero terminal SNR.
"""
# Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas
Expand Down
5 changes: 3 additions & 2 deletions src/diffusers/schedulers/scheduling_ddpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,11 @@ def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor:

Args:
betas (`torch.Tensor`):
the betas that the scheduler is being initialized with.
The betas that the scheduler is being initialized with.

Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR
`torch.Tensor`:
Rescaled betas with zero terminal SNR.
"""
# Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas
Expand Down
5 changes: 3 additions & 2 deletions src/diffusers/schedulers/scheduling_ddpm_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,11 @@ def rescale_zero_terminal_snr(betas):

Args:
betas (`torch.Tensor`):
the betas that the scheduler is being initialized with.
The betas that the scheduler is being initialized with.

Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR
`torch.Tensor`:
Rescaled betas with zero terminal SNR.
"""
# Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas
Expand Down
63 changes: 59 additions & 4 deletions src/diffusers/schedulers/scheduling_deis_multistep.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def set_begin_index(self, begin_index: int = 0):
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.

Args:
begin_index (`int`):
begin_index (`int`, defaults to `0`):
The begin index for the scheduler.
"""
self._begin_index = begin_index
Expand Down Expand Up @@ -364,6 +364,19 @@ def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:

# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas):
"""
Convert sigma values to corresponding timestep values through interpolation.

Args:
sigma (`np.ndarray`):
The sigma value(s) to convert to timestep(s).
log_sigmas (`np.ndarray`):
The logarithm of the sigma schedule used for interpolation.

Returns:
`np.ndarray`:
The interpolated timestep value(s) corresponding to the input sigma(s).
"""
# get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10))

Expand Down Expand Up @@ -399,7 +412,20 @@ def _sigma_to_alpha_sigma_t(self, sigma):

# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022)."""
"""
Construct the noise schedule as proposed in [Elucidating the Design Space of Diffusion-Based Generative
Models](https://huggingface.co/papers/2206.00364).

Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.

Returns:
`torch.Tensor`:
The converted sigma values following the Karras noise schedule.
"""

# Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers
Expand All @@ -425,7 +451,19 @@ def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> to

# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
"""Constructs an exponential noise schedule."""
"""
Construct an exponential noise schedule.

Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.

Returns:
`torch.Tensor`:
The converted sigma values following an exponential schedule.
"""

# Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers
Expand All @@ -449,7 +487,24 @@ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps:
def _convert_to_beta(
self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
) -> torch.Tensor:
"""From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)"""
"""
Construct a beta noise schedule as proposed in [Beta Sampling is All You
Need](https://huggingface.co/papers/2407.12173).

Args:
in_sigmas (`torch.Tensor`):
The input sigma values to be converted.
num_inference_steps (`int`):
The number of inference steps to generate the noise schedule for.
alpha (`float`, *optional*, defaults to `0.6`):
The alpha parameter for the beta distribution.
beta (`float`, *optional*, defaults to `0.6`):
The beta parameter for the beta distribution.

Returns:
`torch.Tensor`:
The converted sigma values following a beta distribution schedule.
"""

# Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers
Expand Down
Loading
Loading