Skip to content

Commit 11bf2cf

Browse files
committed
up
1 parent 19921e9 commit 11bf2cf

File tree

377 files changed

+1068
-1066
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

377 files changed

+1068
-1066
lines changed

src/diffusers/hooks/context_parallel.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -283,8 +283,8 @@ def _find_submodule_by_name(model: torch.nn.Module, name: str) -> torch.nn.Modul
283283
return model
284284
first_atom, remaining_name = name.split(".", 1) if "." in name else (name, "")
285285
if first_atom == "*":
286-
if not isinstance(model, torch.nn.ModuleList):
287-
raise ValueError("Wildcard '*' can only be used with ModuleList")
286+
if not isinstance(model, torch.nn.Modulelist):
287+
raise ValueError("Wildcard '*' can only be used with Modulelist")
288288
submodules = []
289289
for submodule in model:
290290
subsubmodules = _find_submodule_by_name(submodule, remaining_name)

src/diffusers/hooks/first_block_cache.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ def apply_first_block_cache(module: torch.nn.Module, config: FirstBlockCacheConf
227227
remaining_blocks = []
228228

229229
for name, submodule in module.named_children():
230-
if name not in _ALL_TRANSFORMER_BLOCK_IDENTIFIERS or not isinstance(submodule, torch.nn.ModuleList):
230+
if name not in _ALL_TRANSFORMER_BLOCK_IDENTIFIERS or not isinstance(submodule, torch.nn.Modulelist):
231231
continue
232232
for index, block in enumerate(submodule):
233233
remaining_blocks.append((f"{name}.{index}", block))

src/diffusers/hooks/group_offloading.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,7 @@ def apply_group_offloading(
471471
memory, but can be slower due to the excessive number of device synchronizations.
472472
473473
Group offloading is a middle ground between the two methods. It works by offloading groups of internal layers,
474-
(either `torch.nn.ModuleList` or `torch.nn.Sequential`). This method uses lower memory than module-level
474+
(either `torch.nn.Modulelist` or `torch.nn.Sequential`). This method uses lower memory than module-level
475475
offloading. It is also faster than leaf-level/sequential offloading, as the number of device synchronizations is
476476
reduced.
477477
@@ -576,7 +576,7 @@ def _apply_group_offloading(module: torch.nn.Module, config: GroupOffloadingConf
576576

577577
def _apply_group_offloading_block_level(module: torch.nn.Module, config: GroupOffloadingConfig) -> None:
578578
r"""
579-
This function applies offloading to groups of torch.nn.ModuleList or torch.nn.Sequential blocks. In comparison to
579+
This function applies offloading to groups of torch.nn.Modulelist or torch.nn.Sequential blocks. In comparison to
580580
the "leaf_level" offloading, which is more fine-grained, this offloading is done at the top-level blocks.
581581
"""
582582

@@ -586,12 +586,12 @@ def _apply_group_offloading_block_level(module: torch.nn.Module, config: GroupOf
586586
)
587587
config.num_blocks_per_group = 1
588588

589-
# Create module groups for ModuleList and Sequential blocks
589+
# Create module groups for Modulelist and Sequential blocks
590590
modules_with_group_offloading = set()
591591
unmatched_modules = []
592592
matched_module_groups = []
593593
for name, submodule in module.named_children():
594-
if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
594+
if not isinstance(submodule, (torch.nn.Modulelist, torch.nn.Sequential)):
595595
unmatched_modules.append((name, submodule))
596596
modules_with_group_offloading.add(name)
597597
continue

src/diffusers/hooks/layer_skip.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,10 +218,10 @@ def _apply_layer_skip_hook(module: torch.nn.Module, config: LayerSkipConfig, nam
218218
)
219219

220220
transformer_blocks = _get_submodule_from_fqn(module, config.fqn)
221-
if transformer_blocks is None or not isinstance(transformer_blocks, torch.nn.ModuleList):
221+
if transformer_blocks is None or not isinstance(transformer_blocks, torch.nn.Modulelist):
222222
raise ValueError(
223223
f"Could not find {config.fqn} in the provided module, or configured `fqn` (fully qualified name) does not identify "
224-
f"a `torch.nn.ModuleList`. Please provide a valid `fqn` that identifies a stack of transformer blocks."
224+
f"a `torch.nn.Modulelist`. Please provide a valid `fqn` that identifies a stack of transformer blocks."
225225
)
226226
if len(config.indices) == 0:
227227
raise ValueError("Layer index list is empty. Please provide a non-empty list of layer indices to skip.")

src/diffusers/hooks/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def _get_identifiable_transformer_blocks_in_module(module: torch.nn.Module):
2121
module_list_with_transformer_blocks = []
2222
for name, submodule in module.named_modules():
2323
name_endswith_identifier = any(name.endswith(identifier) for identifier in _ALL_TRANSFORMER_BLOCK_IDENTIFIERS)
24-
is_modulelist = isinstance(submodule, torch.nn.ModuleList)
24+
is_modulelist = isinstance(submodule, torch.nn.Modulelist)
2525
if name_endswith_identifier and is_modulelist:
2626
module_list_with_transformer_blocks.append((name, submodule))
2727
return module_list_with_transformer_blocks

src/diffusers/image_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def is_valid_image_imagelist(images):
6363
- A list of valid images.
6464
6565
Args:
66-
images (`Union[np.ndarray, torch.Tensor, PIL.Image.Image, List]`):
66+
images (`Union[np.ndarray, torch.Tensor, PIL.Image.Image, list]`):
6767
The image(s) to check. Can be a batch of images (4D tensor/array), a single image, or a list of valid
6868
images.
6969

src/diffusers/loaders/ip_adapter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,7 @@ def LinearStrengthModel(start, finish, size):
830830
# Single value for all layers of all IP-Adapters
831831
if isinstance(scale, scale_type):
832832
scale = [scale for _ in range(num_ip_adapters)]
833-
# List of per-layer scales for a single IP-Adapter
833+
# list of per-layer scales for a single IP-Adapter
834834
elif _is_valid_type(scale, list[scale_type]) and num_ip_adapters == 1:
835835
scale = [scale]
836836
# Invalid scale type

src/diffusers/loaders/lora_base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ def fuse_lora(
547547
> [!WARNING] > This is an experimental API.
548548
549549
Args:
550-
components: (`list[str]`): List of LoRA-injectable components to fuse the LoRAs into.
550+
components: (`list[str]`): list of LoRA-injectable components to fuse the LoRAs into.
551551
lora_scale (`float`, defaults to 1.0):
552552
Controls how much to influence the outputs with the LoRA parameters.
553553
safe_fusing (`bool`, defaults to `False`):
@@ -627,7 +627,7 @@ def unfuse_lora(self, components: list[str] = [], **kwargs):
627627
> [!WARNING] > This is an experimental API.
628628
629629
Args:
630-
components (`list[str]`): List of LoRA-injectable components to unfuse LoRA from.
630+
components (`list[str]`): list of LoRA-injectable components to unfuse LoRA from.
631631
unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
632632
unfuse_text_encoder (`bool`, defaults to `True`):
633633
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
@@ -956,7 +956,7 @@ def set_lora_device(self, adapter_names: list[str], device: torch.device | str |
956956
957957
Args:
958958
adapter_names (`list[str]`):
959-
List of adapters to send device to.
959+
list of adapters to send device to.
960960
device (`Union[torch.device, str, int]`):
961961
Device to send the adapters to. Can be either a torch device, a str or an integer.
962962
"""

src/diffusers/loaders/lora_pipeline.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -543,7 +543,7 @@ def fuse_lora(
543543
> [!WARNING] > This is an experimental API.
544544
545545
Args:
546-
components: (`list[str]`): List of LoRA-injectable components to fuse the LoRAs into.
546+
components: (`list[str]`): list of LoRA-injectable components to fuse the LoRAs into.
547547
lora_scale (`float`, defaults to 1.0):
548548
Controls how much to influence the outputs with the LoRA parameters.
549549
safe_fusing (`bool`, defaults to `False`):
@@ -580,7 +580,7 @@ def unfuse_lora(self, components: list[str] = ["unet", "text_encoder"], **kwargs
580580
> [!WARNING] > This is an experimental API.
581581
582582
Args:
583-
components (`list[str]`): List of LoRA-injectable components to unfuse LoRA from.
583+
components (`list[str]`): list of LoRA-injectable components to unfuse LoRA from.
584584
unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
585585
unfuse_text_encoder (`bool`, defaults to `True`):
586586
Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
@@ -1992,7 +1992,7 @@ def unfuse_lora(self, components: list[str] = ["transformer", "text_encoder"], *
19921992
> [!WARNING] > This is an experimental API.
19931993
19941994
Args:
1995-
components (`list[str]`): List of LoRA-injectable components to unfuse LoRA from.
1995+
components (`list[str]`): list of LoRA-injectable components to unfuse LoRA from.
19961996
"""
19971997
transformer = getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer
19981998
if hasattr(transformer, "_transformer_norm_layers") and transformer._transformer_norm_layers:
@@ -3889,7 +3889,7 @@ def fuse_lora(
38893889
Fuses the LoRA parameters into the original parameters of the corresponding blocks.
38903890
38913891
Args:
3892-
components: (`list[str]`): List of LoRA-injectable components to fuse the LoRAs into.
3892+
components: (`list[str]`): list of LoRA-injectable components to fuse the LoRAs into.
38933893
lora_scale (`float`, defaults to 1.0):
38943894
Controls how much to influence the outputs with the LoRA parameters.
38953895
safe_fusing (`bool`, defaults to `False`):
@@ -3919,7 +3919,7 @@ def unfuse_lora(self, components: list[str] = ["transformer"], **kwargs):
39193919
Reverses the effect of [`pipe.fuse_lora()`].
39203920
39213921
Args:
3922-
components (`list[str]`): List of LoRA-injectable components to unfuse LoRA from.
3922+
components (`list[str]`): list of LoRA-injectable components to unfuse LoRA from.
39233923
"""
39243924
super().unfuse_lora(components=components, **kwargs)
39253925

src/diffusers/loaders/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
from typing import List, Optional
14+
from typing import Optional, list
1515

1616
import safetensors
1717
import torch
@@ -127,14 +127,14 @@ def maybe_convert_prompt(self, prompt: str | list[str], tokenizer: "PreTrainedTo
127127
Returns:
128128
`str` or list of `str`: The converted prompt
129129
"""
130-
if not isinstance(prompt, List):
130+
if not isinstance(prompt, list):
131131
prompts = [prompt]
132132
else:
133133
prompts = prompt
134134

135135
prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]
136136

137-
if not isinstance(prompt, List):
137+
if not isinstance(prompt, list):
138138
return prompts[0]
139139

140140
return prompts

0 commit comments

Comments
 (0)