@@ -1327,7 +1327,7 @@ def join_uneven_inputs(self, joinables, even_batches=None):
13271327
13281328 <Tip warning={true}>
13291329
1330- Overidding `even_batches` will not affect iterable-style data loaders.
1330+ Overriding `even_batches` will not affect iterable-style data loaders.
13311331
13321332 </Tip>
13331333
@@ -1363,7 +1363,7 @@ def join_uneven_inputs(self, joinables, even_batches=None):
13631363
13641364 if iterable_dl_seen :
13651365 warnings .warn (
1366- "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable"
1366+ "Overriding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable"
13671367 )
13681368 else :
13691369 even_batches = self .even_batches
@@ -1542,7 +1542,7 @@ def prepare(self, *args, device_placement=None):
15421542 and self .state .use_ipex
15431543 ):
15441544 logger .warning (
1545- "You are using lower version of PyTorch(< 2.7.0) with ipex acceleration on Intel CPU or XPU, Intel has upstreamed most of the optimizations into stock PyTorch from 2.7.0, we enourage you to install the latest stock PyTorch and enjoy the out-of-experience on Intel CPU/XPU."
1545+ "You are using lower version of PyTorch(< 2.7.0) with ipex acceleration on Intel CPU or XPU, Intel has upstreamed most of the optimizations into stock PyTorch from 2.7.0, we encourage you to install the latest stock PyTorch and enjoy the out-of-experience on Intel CPU/XPU."
15461546 )
15471547 args = self ._prepare_ipex (* args )
15481548 if self .parallelism_config and self .parallelism_config .tp_enabled :
@@ -1672,7 +1672,7 @@ def _prepare_fsdp2(self, *args):
16721672 else :
16731673 model = torch .compile (model , ** self .state .dynamo_plugin .to_kwargs ())
16741674
1675- # Get old params and canonicalize - we cannonicalize to have the mapping easy
1675+ # Get old params and canonicalize - we canonicalize to have the mapping easy
16761676 old_named_params = fsdp2_canonicalize_names (self ._get_named_parameters (* tuple (result ), drop_refs = True ))
16771677
16781678 # Swap the optimizer parameters with empty, so `fully_shard` after will not allocate too much memory
@@ -2888,7 +2888,7 @@ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
28882888 while isinstance (opt , AcceleratedOptimizer ):
28892889 opt = opt .optimizer
28902890 gradients = xm ._fetch_gradients (opt )
2891- # Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor
2891+ # Use xm.all_reduce to perform an in-place all-reduce. Recursive all-reduce each tensor
28922892 # one by one in self.reduce is non-inplace.
28932893 xm .all_reduce ("sum" , gradients , scale = 1.0 / self .num_processes )
28942894 # Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step.
@@ -3047,7 +3047,7 @@ def reduce(self, tensor, reduction="sum", scale=1.0):
30473047 reduction (`str`, *optional*, defaults to "sum"):
30483048 A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
30493049 scale (`float`, *optional*, defaults to 1.0):
3050- A default scaling value to be applied after the reduce, only valied on XLA.
3050+ A default scaling value to be applied after the reduce, only valid on XLA.
30513051
30523052 Returns:
30533053 `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
@@ -3339,7 +3339,7 @@ def save_model(
33393339
33403340 Arguments:
33413341 model: (`torch.nn.Module`):
3342- Model to be saved. The model can be wrapped or unwraped .
3342+ Model to be saved. The model can be wrapped or unwrapped .
33433343 save_directory (`str` or `os.PathLike`):
33443344 Directory to which to save. Will be created if it doesn't exist.
33453345 max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
@@ -3450,7 +3450,7 @@ def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.Remov
34503450
34513451 `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None`
34523452
3453- The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths `
3453+ The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weights `
34543454 argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed
34553455 to [`Accelerator.load_state`].
34563456
0 commit comments