Skip to content
32 changes: 20 additions & 12 deletions src/diffusers/training_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,9 @@

def set_seed(seed: int):
"""
Args:
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.

Args:
seed (`int`): The seed to set.
"""
random.seed(seed)
Expand Down Expand Up @@ -89,8 +90,7 @@ def resolve_interpolation_mode(interpolation_type: str):
in torchvision.

Returns:
`torchvision.transforms.InterpolationMode`: an `InterpolationMode` enum used by torchvision's `resize`
transform.
`torchvision.transforms.InterpolationMode`: an `InterpolationMode` enum used by torchvision's `resize` transform.
"""
if not is_torchvision_available():
raise ImportError(
Expand Down Expand Up @@ -225,7 +225,8 @@ def _set_state_dict_into_text_encoder(
def compute_density_for_timestep_sampling(
weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None
):
"""Compute the density for sampling the timesteps when doing SD3 training.
"""
Compute the density for sampling the timesteps when doing SD3 training.

Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528.

Expand All @@ -244,7 +245,8 @@ def compute_density_for_timestep_sampling(


def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None):
"""Computes loss weighting scheme for SD3 training.
"""
Computes loss weighting scheme for SD3 training.

Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528.

Expand All @@ -261,7 +263,9 @@ def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None):


def free_memory():
"""Runs garbage collection. Then clears the cache of the available accelerator."""
"""
Runs garbage collection. Then clears the cache of the available accelerator.
"""
gc.collect()

if torch.cuda.is_available():
Expand Down Expand Up @@ -494,7 +498,8 @@ def pin_memory(self) -> None:
self.shadow_params = [p.pin_memory() for p in self.shadow_params]

def to(self, device=None, dtype=None, non_blocking=False) -> None:
r"""Move internal buffers of the ExponentialMovingAverage to `device`.
r"""
Move internal buffers of the ExponentialMovingAverage to `device`.

Args:
device: like `device` argument to `torch.Tensor.to`
Expand Down Expand Up @@ -528,23 +533,25 @@ def state_dict(self) -> dict:

def store(self, parameters: Iterable[torch.nn.Parameter]) -> None:
r"""
Saves the current parameters for restoring later.

Args:
Save the current parameters for restoring later.
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
parameters: Iterable of `torch.nn.Parameter`. The parameters to be temporarily stored.
"""
self.temp_stored_params = [param.detach().cpu().clone() for param in parameters]

def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None:
r"""
Args:
Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without:
affecting the original optimization process. Store the parameters before the `copy_to()` method. After
validation (or model saving), use this to restore the former parameters.

Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters. If `None`, the parameters with which this
`ExponentialMovingAverage` was initialized will be used.
"""

if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`")
if self.foreach:
Expand All @@ -560,9 +567,10 @@ def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None:

def load_state_dict(self, state_dict: dict) -> None:
r"""
Args:
Loads the ExponentialMovingAverage state. This method is used by accelerate during checkpointing to save the
ema state dict.

Args:
state_dict (dict): EMA state. Should be an object returned
from a call to :meth:`state_dict`.
"""
Expand Down
Loading