From 25033dafd3a78e9047cf7543fa2b0f5aa4ad1956 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 24 Apr 2024 12:35:46 +0530 Subject: [PATCH 01/18] add; utility to check if attn_procs,norms,acts are properly documented. --- docs/source/en/api/attnprocessor.md | 25 ++++---- docs/source/en/api/normalization.md | 16 +++++ src/diffusers/models/normalization.py | 10 +++ utils/check_support_list.py | 90 +++++++++++++++++++++++++++ 4 files changed, 130 insertions(+), 11 deletions(-) create mode 100644 utils/check_support_list.py diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index ab89d4d260f0..c91272595666 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -20,12 +20,21 @@ An attention processor is a class for applying different types of attention mech ## AttnProcessor2_0 [[autodoc]] models.attention_processor.AttnProcessor2_0 +## FusedAttnProcessor2_0 +[[autodoc]] models.attention_processor.FusedAttnProcessor2_0 + +## XFormersAttnProcessor +[[autodoc]] models.attention_processor.XFormersAttnProcessor + ## AttnAddedKVProcessor [[autodoc]] models.attention_processor.AttnAddedKVProcessor ## AttnAddedKVProcessor2_0 [[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0 +## XFormersAttnAddedKVProcessor +[[autodoc]] models.attention_processor.XFormersAttnAddedKVProcessor + ## CrossFrameAttnProcessor [[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor @@ -38,20 +47,14 @@ An attention processor is a class for applying different types of attention mech ## CustomDiffusionXFormersAttnProcessor [[autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor -## FusedAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedAttnProcessor2_0 - -## LoRAAttnAddedKVProcessor -[[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor - -## LoRAXFormersAttnProcessor -[[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor - ## SlicedAttnProcessor [[autodoc]] models.attention_processor.SlicedAttnProcessor ## SlicedAttnAddedKVProcessor [[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor -## XFormersAttnProcessor -[[autodoc]] models.attention_processor.XFormersAttnProcessor +## IPAdapterAttnProcessor +[[autodoc]] models.attention_processor.IPAdapterAttnProcessor + +## IPAdapterAttnProcessor2_0 +[[autodoc]] models.attention_processor.IPAdapterAttnProcessor2_0 diff --git a/docs/source/en/api/normalization.md b/docs/source/en/api/normalization.md index ef4b694a4d85..5ad8f0072953 100644 --- a/docs/source/en/api/normalization.md +++ b/docs/source/en/api/normalization.md @@ -29,3 +29,19 @@ Customized normalization layers for supporting various models in 🤗 Diffusers. ## AdaGroupNorm [[autodoc]] models.normalization.AdaGroupNorm + +## AdaLayerNormContinuous + +[[autodoc]] models.normalization.AdaLayerNormContinuous + +## LayerNorm + +[[autodoc]] models.normalization.LayerNorm + +## RMSNorm + +[[autodoc]] models.normalization.RMSNorm + +## GlobalResponseNorm + +[[autodoc]] models.normalization.GlobalResponseNorm diff --git a/src/diffusers/models/normalization.py b/src/diffusers/models/normalization.py index 036a66890e67..f01c5b262a7c 100644 --- a/src/diffusers/models/normalization.py +++ b/src/diffusers/models/normalization.py @@ -151,6 +151,10 @@ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: class AdaLayerNormContinuous(nn.Module): + r""" + Adaptive normalization layer with a norm layer (layer_norm or rms_norm). + """ + def __init__( self, embedding_dim: int, @@ -188,6 +192,8 @@ def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torc # Has optional bias parameter compared to torch layer norm # TODO: replace with torch layernorm once min required torch version >= 2.1 class LayerNorm(nn.Module): + r"""LayerNorm with the bias parameter.""" + def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): super().__init__() @@ -210,6 +216,8 @@ def forward(self, input): class RMSNorm(nn.Module): + r"""RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al.""" + def __init__(self, dim, eps: float, elementwise_affine: bool = True): super().__init__() @@ -242,6 +250,8 @@ def forward(self, hidden_states): class GlobalResponseNorm(nn.Module): + r"""Global response normalization as introduced in ConvNeXt-v2 (https://arxiv.org/abs/2301.00808).""" + # Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 def __init__(self, dim): super().__init__() diff --git a/utils/check_support_list.py b/utils/check_support_list.py new file mode 100644 index 000000000000..8a4880b31dea --- /dev/null +++ b/utils/check_support_list.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility that checks that modules like attention processors are listed in the documentation file. + +```bash +python utils/check_support_list.py +``` + +It has no auto-fix mode. +""" +import os +import re + + +# All paths are set with the intent you should run this script from the root of the repo with the command +# python utils/check_doctest_list.py +REPO_PATH = "." + + +def check_attention_processor_classes(): + print(os.path.join(REPO_PATH, "docs/source/en/api/attnprocessor.md")) + with open(os.path.join(REPO_PATH, "docs/source/en/api/attnprocessor.md"), "r") as f: + doctext = f.read() + matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) + documented_attention_processors = [match.split(".")[-1] for match in matches] + + with open(os.path.join(REPO_PATH, "src/diffusers/models/attention_processor.py"), "r") as f: + doctext = f.read() + processor_classes = re.findall(r"class\s+(\w+Processor(?:\d*_?\d*))[(:]", doctext) + processor_classes = [proc for proc in processor_classes if "LoRA" not in proc and proc != "Attention"] + + for processor in processor_classes: + if processor not in documented_attention_processors: + raise ValueError( + f"{processor} should be in listed in the attention processor documentation but is not. Please update the documentation." + ) + + +def check_activations(): + with open(os.path.join(REPO_PATH, "docs/source/en/api/activations.md"), "r") as f: + doctext = f.read() + matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) + documented_activations = [match.split(".")[-1] for match in matches] + + with open(os.path.join(REPO_PATH, "src/diffusers/models/activations.py"), "r") as f: + doctext = f.read() + activation_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) + + for activation in activation_classes: + if activation not in documented_activations: + raise ValueError( + f"{activation} should be in listed in the activations documentation but is not. Please update the documentation." + ) + + +def check_normalizations(): + print(os.path.join(REPO_PATH, "docs/source/en/api/normalization.md")) + with open(os.path.join(REPO_PATH, "docs/source/en/api/normalization.md"), "r") as f: + doctext = f.read() + matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) + documented_normalizations = [match.split(".")[-1] for match in matches] + + with open(os.path.join(REPO_PATH, "src/diffusers/models/normalization.py"), "r") as f: + doctext = f.read() + normalization_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) + + for norm in normalization_classes: + if norm not in documented_normalizations: + raise ValueError( + f"{norm} should be in listed in the normalizations documentation but is not. Please update the documentation." + ) + + +if __name__ == "__main__": + check_attention_processor_classes() + check_activations() + check_normalizations() From 9398e0f04703676734c5c2c030255764078285ab Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 24 Apr 2024 12:37:09 +0530 Subject: [PATCH 02/18] add support listing to the workflows. --- .github/workflows/pr_test_peft_backend.yml | 1 + .github/workflows/pr_tests.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/pr_test_peft_backend.yml b/.github/workflows/pr_test_peft_backend.yml index b4915a3bf4d2..019316e82e31 100644 --- a/.github/workflows/pr_test_peft_backend.yml +++ b/.github/workflows/pr_test_peft_backend.yml @@ -55,6 +55,7 @@ jobs: run: | python utils/check_copies.py python utils/check_dummies.py + python utils/check_support_list.py make deps_table_check_updated - name: Check if failure if: ${{ failure() }} diff --git a/.github/workflows/pr_tests.yml b/.github/workflows/pr_tests.yml index b1bed6568aa4..455e894cb1c7 100644 --- a/.github/workflows/pr_tests.yml +++ b/.github/workflows/pr_tests.yml @@ -63,6 +63,7 @@ jobs: run: | python utils/check_copies.py python utils/check_dummies.py + python utils/check_support_list.py make deps_table_check_updated - name: Check if failure if: ${{ failure() }} From 57ca5bebf8f321297cbd861a92230650fa6b67e8 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 24 Apr 2024 12:42:16 +0530 Subject: [PATCH 03/18] change to 2024. --- utils/check_support_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/check_support_list.py b/utils/check_support_list.py index 8a4880b31dea..5ec0877d4b52 100644 --- a/utils/check_support_list.py +++ b/utils/check_support_list.py @@ -1,5 +1,5 @@ # coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. +# Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From b5c9aeb80d746f6689a25b63919cb5e82326ca1a Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 24 Apr 2024 16:25:55 +0530 Subject: [PATCH 04/18] small fixes. --- src/diffusers/models/normalization.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/normalization.py b/src/diffusers/models/normalization.py index f01c5b262a7c..c018750fccdf 100644 --- a/src/diffusers/models/normalization.py +++ b/src/diffusers/models/normalization.py @@ -192,7 +192,9 @@ def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torc # Has optional bias parameter compared to torch layer norm # TODO: replace with torch layernorm once min required torch version >= 2.1 class LayerNorm(nn.Module): - r"""LayerNorm with the bias parameter.""" + r""" + LayerNorm with the bias parameter. + """ def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): super().__init__() @@ -216,7 +218,9 @@ def forward(self, input): class RMSNorm(nn.Module): - r"""RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al.""" + r""" + RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al. + """ def __init__(self, dim, eps: float, elementwise_affine: bool = True): super().__init__() @@ -250,7 +254,9 @@ def forward(self, hidden_states): class GlobalResponseNorm(nn.Module): - r"""Global response normalization as introduced in ConvNeXt-v2 (https://arxiv.org/abs/2301.00808).""" + r""" + Global response normalization as introduced in ConvNeXt-v2 (https://arxiv.org/abs/2301.00808). + """ # Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 def __init__(self, dim): From c6251666bdb6c30975cca12e4478bd5cd874a2e9 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 25 Apr 2024 07:27:20 +0530 Subject: [PATCH 05/18] does adding detailed docstrings help? --- src/diffusers/models/normalization.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/diffusers/models/normalization.py b/src/diffusers/models/normalization.py index c018750fccdf..637a34c9f304 100644 --- a/src/diffusers/models/normalization.py +++ b/src/diffusers/models/normalization.py @@ -153,6 +153,14 @@ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: class AdaLayerNormContinuous(nn.Module): r""" Adaptive normalization layer with a norm layer (layer_norm or rms_norm). + + Args: + embedding_dim (`int`): Embedding dimension to use during projection. + conditioning_embedding_dim (`int`): Dimension of the input condition. + elementwise_affine (`bool`): Boolean flag to denote if affine transformation should be applied. + eps (`int`): Epsilon factor. + bias (`bias`): Boolean flag to denote if bias should be use. + norm_type (`str`): Normalization layer to use. Values supported: "layer_norm", "rms_norm". """ def __init__( @@ -194,6 +202,12 @@ def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torc class LayerNorm(nn.Module): r""" LayerNorm with the bias parameter. + + Args: + dim (`int`): Dimensionality to use for the parameters. + eps (`int`): Epsilon factor. + elementwise_affine (`bool`): Boolean flag to denote if affine transformation should be applied. + bias (`bias`): Boolean flag to denote if bias should be use. """ def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): @@ -220,6 +234,11 @@ def forward(self, input): class RMSNorm(nn.Module): r""" RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al. + + Args: + dim (`int`): Number of dimensions to use for `weights`. Only effective when `elementwise_affine` is True. + eps (`float`): Small value to use when calculating the reciprocal of the square-root. + elementwise_affine (`bool`): Boolean flag to denote if affine transformation should be applied. """ def __init__(self, dim, eps: float, elementwise_affine: bool = True): @@ -256,6 +275,9 @@ def forward(self, hidden_states): class GlobalResponseNorm(nn.Module): r""" Global response normalization as introduced in ConvNeXt-v2 (https://arxiv.org/abs/2301.00808). + + Args: + dim (`int`): Number of dimensions to use for the `gamma` and `beta`. """ # Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 From dac63dd73e91594dc07bc672433b660f6a6be818 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Fri, 10 May 2024 14:54:42 +0200 Subject: [PATCH 06/18] uncomment image processor check --- utils/check_support_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/check_support_list.py b/utils/check_support_list.py index 1f2836e0b8b5..ac7931aa5099 100644 --- a/utils/check_support_list.py +++ b/utils/check_support_list.py @@ -99,6 +99,6 @@ def check_normalizations(): if __name__ == "__main__": check_attention_processors() - # check_image_processors() + check_image_processors() check_activations() check_normalizations() From 900cd1cb3d73e45fadd30d63573e70c718a0fbae Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Fri, 10 May 2024 15:05:53 +0200 Subject: [PATCH 07/18] quality --- utils/check_support_list.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/check_support_list.py b/utils/check_support_list.py index ac7931aa5099..ddf5ef6e364b 100644 --- a/utils/check_support_list.py +++ b/utils/check_support_list.py @@ -47,6 +47,7 @@ def check_attention_processors(): f"{processor} should be in listed in the attention processor documentation but is not. Please update the documentation." ) + def check_image_processors(): with open(os.path.join(REPO_PATH, "docs/source/en/api/image_processor.md"), "r") as f: doctext = f.read() @@ -63,6 +64,7 @@ def check_image_processors(): f"{processor} should be in listed in the image processor documentation but is not. Please update the documentation." ) + def check_activations(): with open(os.path.join(REPO_PATH, "docs/source/en/api/activations.md"), "r") as f: doctext = f.read() From 844918670ee9adbce7205b49ba3341acba47de70 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 14 May 2024 21:27:07 +0530 Subject: [PATCH 08/18] fix, thanks to @mishig. --- docs/source/en/api/normalization.md | 4 ---- utils/check_support_list.py | 2 ++ 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/source/en/api/normalization.md b/docs/source/en/api/normalization.md index 5ad8f0072953..2ef9772508a6 100644 --- a/docs/source/en/api/normalization.md +++ b/docs/source/en/api/normalization.md @@ -34,10 +34,6 @@ Customized normalization layers for supporting various models in 🤗 Diffusers. [[autodoc]] models.normalization.AdaLayerNormContinuous -## LayerNorm - -[[autodoc]] models.normalization.LayerNorm - ## RMSNorm [[autodoc]] models.normalization.RMSNorm diff --git a/utils/check_support_list.py b/utils/check_support_list.py index ddf5ef6e364b..445312f36d7e 100644 --- a/utils/check_support_list.py +++ b/utils/check_support_list.py @@ -91,6 +91,8 @@ def check_normalizations(): with open(os.path.join(REPO_PATH, "src/diffusers/models/normalization.py"), "r") as f: doctext = f.read() normalization_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) + # LayerNorm is an exception because adding doc for is confusing. + normalization_classes = [norm for norm in normalization_classes if norm != "LayerNorm"] for norm in normalization_classes: if norm not in documented_normalizations: From af2370ba1da755be6258cdec6e63836c43ab7be0 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 15 May 2024 08:07:46 +0530 Subject: [PATCH 09/18] Apply suggestions from code review Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- src/diffusers/models/normalization.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/diffusers/models/normalization.py b/src/diffusers/models/normalization.py index 637a34c9f304..8557b0d4c7ba 100644 --- a/src/diffusers/models/normalization.py +++ b/src/diffusers/models/normalization.py @@ -157,10 +157,10 @@ class AdaLayerNormContinuous(nn.Module): Args: embedding_dim (`int`): Embedding dimension to use during projection. conditioning_embedding_dim (`int`): Dimension of the input condition. - elementwise_affine (`bool`): Boolean flag to denote if affine transformation should be applied. - eps (`int`): Epsilon factor. - bias (`bias`): Boolean flag to denote if bias should be use. - norm_type (`str`): Normalization layer to use. Values supported: "layer_norm", "rms_norm". + elementwise_affine (`bool`, defaults to `True`): Boolean flag to denote if affine transformation should be applied. + eps (`float`, defaults to 1e-5): Epsilon factor. + bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use. + norm_type (`str`, defaults to `"layer_norm"`): Normalization layer to use. Values supported: "layer_norm", "rms_norm". """ def __init__( @@ -205,9 +205,9 @@ class LayerNorm(nn.Module): Args: dim (`int`): Dimensionality to use for the parameters. - eps (`int`): Epsilon factor. - elementwise_affine (`bool`): Boolean flag to denote if affine transformation should be applied. - bias (`bias`): Boolean flag to denote if bias should be use. + eps (`float`, defaults to 1e-5): Epsilon factor. + elementwise_affine (`bool`, defaults to `True`): Boolean flag to denote if affine transformation should be applied. + bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use. """ def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): @@ -238,7 +238,7 @@ class RMSNorm(nn.Module): Args: dim (`int`): Number of dimensions to use for `weights`. Only effective when `elementwise_affine` is True. eps (`float`): Small value to use when calculating the reciprocal of the square-root. - elementwise_affine (`bool`): Boolean flag to denote if affine transformation should be applied. + elementwise_affine (`bool`, defaults to `True`): Boolean flag to denote if affine transformation should be applied. """ def __init__(self, dim, eps: float, elementwise_affine: bool = True): From 15b2f5755e28e73858cffe1e850b353188e0e8ed Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 15 May 2024 13:16:55 +0530 Subject: [PATCH 10/18] style --- src/diffusers/models/normalization.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/diffusers/models/normalization.py b/src/diffusers/models/normalization.py index 8557b0d4c7ba..e7484aade163 100644 --- a/src/diffusers/models/normalization.py +++ b/src/diffusers/models/normalization.py @@ -157,10 +157,12 @@ class AdaLayerNormContinuous(nn.Module): Args: embedding_dim (`int`): Embedding dimension to use during projection. conditioning_embedding_dim (`int`): Dimension of the input condition. - elementwise_affine (`bool`, defaults to `True`): Boolean flag to denote if affine transformation should be applied. + elementwise_affine (`bool`, defaults to `True`): + Boolean flag to denote if affine transformation should be applied. eps (`float`, defaults to 1e-5): Epsilon factor. bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use. - norm_type (`str`, defaults to `"layer_norm"`): Normalization layer to use. Values supported: "layer_norm", "rms_norm". + norm_type (`str`, defaults to `"layer_norm"`): + Normalization layer to use. Values supported: "layer_norm", "rms_norm". """ def __init__( @@ -206,7 +208,8 @@ class LayerNorm(nn.Module): Args: dim (`int`): Dimensionality to use for the parameters. eps (`float`, defaults to 1e-5): Epsilon factor. - elementwise_affine (`bool`, defaults to `True`): Boolean flag to denote if affine transformation should be applied. + elementwise_affine (`bool`, defaults to `True`): + Boolean flag to denote if affine transformation should be applied. bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use. """ @@ -238,7 +241,8 @@ class RMSNorm(nn.Module): Args: dim (`int`): Number of dimensions to use for `weights`. Only effective when `elementwise_affine` is True. eps (`float`): Small value to use when calculating the reciprocal of the square-root. - elementwise_affine (`bool`, defaults to `True`): Boolean flag to denote if affine transformation should be applied. + elementwise_affine (`bool`, defaults to `True`): + Boolean flag to denote if affine transformation should be applied. """ def __init__(self, dim, eps: float, elementwise_affine: bool = True): From 4227392d31e2209b0c7535e20852f1977a9c98f0 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Sun, 8 Dec 2024 13:56:38 +0530 Subject: [PATCH 11/18] JointAttnProcessor2_0 --- docs/source/en/api/attnprocessor.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index 1b5a9b623dfc..dc72d8913a1c 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -64,3 +64,6 @@ An attention processor is a class for applying different types of attention mech ## AttnProcessorNPU [[autodoc]] models.attention_processor.AttnProcessorNPU + +## JointAttnProcessor2_0 +[[autodoc]] models.attention_processor.JointAttnProcessor2_0 From 0034db2c458c3a87c1ac1f12baa2138c2792cf23 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Sun, 8 Dec 2024 14:10:24 +0530 Subject: [PATCH 12/18] fixes --- docs/source/en/api/activations.md | 9 ++++ docs/source/en/api/attnprocessor.md | 73 +++++++++++++++++++++++++++++ docs/source/en/api/normalization.md | 25 ++++++++++ utils/check_support_list.py | 34 ++++++++++---- 4 files changed, 131 insertions(+), 10 deletions(-) diff --git a/docs/source/en/api/activations.md b/docs/source/en/api/activations.md index 3bef28a5ab0d..16fbf5057f96 100644 --- a/docs/source/en/api/activations.md +++ b/docs/source/en/api/activations.md @@ -25,3 +25,12 @@ Customized activation functions for supporting various models in 🤗 Diffusers. ## ApproximateGELU [[autodoc]] models.activations.ApproximateGELU + + +## SwiGLU + +[[autodoc]] models.activations.SwiGLU + +## FP32SiLU + +[[autodoc]] models.activations.FP32SiLU diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index dc72d8913a1c..0f0e92a51ff8 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -67,3 +67,76 @@ An attention processor is a class for applying different types of attention mech ## JointAttnProcessor2_0 [[autodoc]] models.attention_processor.JointAttnProcessor2_0 + +## JointAttnProcessor2_0 +[[autodoc]] models.attention_processor.PAGJointAttnProcessor2_0 + +## PAGCFGJointAttnProcessor2_0 +[[autodoc]] models.attention_processor.PAGCFGJointAttnProcessor2_0 + + +## FusedJointAttnProcessor2_0 +[[autodoc]] models.attention_processor.FusedJointAttnProcessor2_0 + +## AllegroAttnProcessor2_0 +[[autodoc]] models.attention_processor.AllegroAttnProcessor2_0 + +## AuraFlowAttnProcessor2_0 +[[autodoc]] models.attention_processor.AuraFlowAttnProcessor2_0 + +## MochiVaeAttnProcessor2_0 +[[autodoc]] models.attention_processor.MochiVaeAttnProcessor2_0 + +## PAGCFGIdentitySelfAttnProcessor2_0 +[[autodoc]] models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0 + +## FusedAuraFlowAttnProcessor2_0 +[[autodoc]] models.attention_processor.FusedAuraFlowAttnProcessor2_0 + +## FusedFluxAttnProcessor2_0 +[[autodoc]] models.attention_processor.FusedFluxAttnProcessor2_0 + +## SanaMultiscaleAttnProcessor2_0 +[[autodoc]] models.attention_processor.SanaMultiscaleAttnProcessor2_0 + +## PAGHunyuanAttnProcessor2_0 +[[autodoc]] models.attention_processor.PAGHunyuanAttnProcessor2_0 + +## HunyuanAttnProcessor2_0 +[[autodoc]] models.attention_processor.HunyuanAttnProcessor2_0 + +## FluxAttnProcessor2_0 +[[autodoc]] models.attention_processor.FluxAttnProcessor2_0 + +## PAGIdentitySelfAttnProcessor2_0 +[[autodoc]] models.attention_processor.PAGIdentitySelfAttnProcessor2_0 + +## FusedCogVideoXAttnProcessor2_0 +[[autodoc]] models.attention_processor.FusedCogVideoXAttnProcessor2_0 + +## MochiAttnProcessor2_0 +[[autodoc]] models.attention_processor.MochiAttnProcessor2_0 + +## StableAudioAttnProcessor2_0 +[[autodoc]] models.attention_processor.StableAudioAttnProcessor2_0 + +## XLAFlashAttnProcessor2_0 +[[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0 + +## FusedHunyuanAttnProcessor2_0 +[[autodoc]] models.attention_processor.FusedHunyuanAttnProcessor2_0 + +## IPAdapterXFormersAttnProcessor +[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor + +## LuminaAttnProcessor2_0 +[[autodoc]] models.attention_processor.LuminaAttnProcessor2_0 + +## PAGCFGHunyuanAttnProcessor2_0 +[[autodoc]] models.attention_processor.PAGCFGHunyuanAttnProcessor2_0 + +## FluxSingleAttnProcessor2_0 +[[autodoc]] models.attention_processor.FluxSingleAttnProcessor2_0 + +## CogVideoXAttnProcessor2_0 +[[autodoc]] models.attention_processor.CogVideoXAttnProcessor2_0 diff --git a/docs/source/en/api/normalization.md b/docs/source/en/api/normalization.md index 2ef9772508a6..cdd4d744f8a2 100644 --- a/docs/source/en/api/normalization.md +++ b/docs/source/en/api/normalization.md @@ -41,3 +41,28 @@ Customized normalization layers for supporting various models in 🤗 Diffusers. ## GlobalResponseNorm [[autodoc]] models.normalization.GlobalResponseNorm + + +## LuminaLayerNormContinuous +[[autodoc]] models.normalization.LuminaLayerNormContinuous + +## SD35AdaLayerNormZeroX +[[autodoc]] models.normalization.SD35AdaLayerNormZeroX + +## AdaLayerNormZeroSingle +[[autodoc]] models.normalization.AdaLayerNormZeroSingle + +## LuminaRMSNormZero +[[autodoc]] models.normalization.LuminaRMSNormZero + +## LpNorm +[[autodoc]] models.normalization.LpNorm + +## CogView3PlusAdaLayerNormZeroTextImage +[[autodoc]] models.normalization.CogView3PlusAdaLayerNormZeroTextImage + +## CogVideoXLayerNormZero +[[autodoc]] models.normalization.CogVideoXLayerNormZero + +## MochiRMSNormZero +[[autodoc]] models.normalization.MochiRMSNormZero \ No newline at end of file diff --git a/utils/check_support_list.py b/utils/check_support_list.py index 445312f36d7e..56037955f98d 100644 --- a/utils/check_support_list.py +++ b/utils/check_support_list.py @@ -41,11 +41,15 @@ def check_attention_processors(): processor_classes = re.findall(r"class\s+(\w+Processor(?:\d*_?\d*))[(:]", doctext) processor_classes = [proc for proc in processor_classes if "LoRA" not in proc and proc != "Attention"] + undocumented_attn_processors = set() for processor in processor_classes: if processor not in documented_attention_processors: - raise ValueError( - f"{processor} should be in listed in the attention processor documentation but is not. Please update the documentation." - ) + undocumented_attn_processors.add(processor) + + if undocumented_attn_processors: + raise ValueError( + f"The following attention processors should be in listed in the attention processor documentation but are not: {list(undocumented_attn_processors)}. Please update the documentation." + ) def check_image_processors(): @@ -58,10 +62,12 @@ def check_image_processors(): doctext = f.read() processor_classes = re.findall(r"class\s+(\w+Processor(?:\d*_?\d*))[(:]", doctext) + undocumented_img_processors = set() for processor in processor_classes: if processor not in documented_image_processors: + undocumented_img_processors.add(processor) raise ValueError( - f"{processor} should be in listed in the image processor documentation but is not. Please update the documentation." + f"The following image processors should be in listed in the image processor documentation but are not: {list(undocumented_img_processors)}. Please update the documentation." ) @@ -75,11 +81,15 @@ def check_activations(): doctext = f.read() activation_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) + undocumented_activations = set() for activation in activation_classes: if activation not in documented_activations: - raise ValueError( - f"{activation} should be in listed in the activations documentation but is not. Please update the documentation." - ) + undocumented_activations.add(activation) + + if undocumented_activations: + raise ValueError( + f"The following activations should be in listed in the activations documentation but are not: {list(undocumented_activations)}. Please update the documentation." + ) def check_normalizations(): @@ -94,11 +104,15 @@ def check_normalizations(): # LayerNorm is an exception because adding doc for is confusing. normalization_classes = [norm for norm in normalization_classes if norm != "LayerNorm"] + undocumented_norms = set() for norm in normalization_classes: if norm not in documented_normalizations: - raise ValueError( - f"{norm} should be in listed in the normalizations documentation but is not. Please update the documentation." - ) + undocumented_norms.add(norm) + + if undocumented_norms: + raise ValueError( + f"The following norms should be in listed in the normalizations documentation but are not: {list(undocumented_norms)}. Please update the documentation." + ) if __name__ == "__main__": From 005a2e91405a550ed23549049d276e2fac888d7b Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 17 Dec 2024 07:28:42 +0530 Subject: [PATCH 13/18] fixes --- docs/source/en/api/attnprocessor.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index 566ca8aeee08..36d3499f6490 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -247,3 +247,7 @@ An attention processor is a class for applying different types of attention mech ## XLAFlashAttnProcessor2_0 [[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0 + +## XFormersJointAttnProcessor + +[[autodoc]] models.attention_processor.XFormersJointAttnProcessor From b653eaa7e4899c972c8e527147a250a6d96a3707 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 17 Dec 2024 07:34:37 +0530 Subject: [PATCH 14/18] fixes --- docs/source/en/api/activations.md | 4 ++++ utils/check_support_list.py | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/docs/source/en/api/activations.md b/docs/source/en/api/activations.md index 16fbf5057f96..140a2ae1a1b2 100644 --- a/docs/source/en/api/activations.md +++ b/docs/source/en/api/activations.md @@ -34,3 +34,7 @@ Customized activation functions for supporting various models in 🤗 Diffusers. ## FP32SiLU [[autodoc]] models.activations.FP32SiLU + +## LinearActivation + +[[autodoc]] models.activations.LinearActivation diff --git a/utils/check_support_list.py b/utils/check_support_list.py index 56037955f98d..fad6f2f5fc4f 100644 --- a/utils/check_support_list.py +++ b/utils/check_support_list.py @@ -115,8 +115,30 @@ def check_normalizations(): ) +def check_lora_mixins(): + with open(os.path.join(REPO_PATH, "docs/source/en/api/loaders/lora.md"), "r") as f: + doctext = f.read() + matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) + documented_loras = [match.split(".")[-1] for match in matches] + + with open(os.path.join(REPO_PATH, "src/diffusers/loaders/lora_pipeline.py"), "r") as f: + doctext = f.read() + lora_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) + + undocumented_loras = set() + for lora in lora_classes: + if lora not in documented_loras: + undocumented_loras.add(lora) + + if undocumented_loras: + raise ValueError( + f"The following LoRA mixins should be in listed in the LoRA loader documentation but are not: {list(undocumented_loras)}. Please update the documentation." + ) + + if __name__ == "__main__": check_attention_processors() check_image_processors() check_activations() check_normalizations() + check_lora_mixins() From 75136e6f9912c782b078420c97c02b2f2c5dd9e5 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 17 Dec 2024 07:42:44 +0530 Subject: [PATCH 15/18] fixes --- docs/source/en/api/attnprocessor.md | 106 ++-------------------------- 1 file changed, 4 insertions(+), 102 deletions(-) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index 36d3499f6490..8abdc32f8e57 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -20,29 +20,11 @@ An attention processor is a class for applying different types of attention mech [[autodoc]] models.attention_processor.AttnProcessor2_0 -<<<<<<< HEAD -## FusedAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedAttnProcessor2_0 - -## XFormersAttnProcessor -[[autodoc]] models.attention_processor.XFormersAttnProcessor - -## AttnAddedKVProcessor -======= ->>>>>>> main [[autodoc]] models.attention_processor.AttnAddedKVProcessor [[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0 -<<<<<<< HEAD -## XFormersAttnAddedKVProcessor -[[autodoc]] models.attention_processor.XFormersAttnAddedKVProcessor - -## CrossFrameAttnProcessor -[[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor -======= [[autodoc]] models.attention_processor.AttnProcessorNPU ->>>>>>> main [[autodoc]] models.attention_processor.FusedAttnProcessor2_0 @@ -154,90 +136,6 @@ An attention processor is a class for applying different types of attention mech [[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor -## IPAdapterAttnProcessor -[[autodoc]] models.attention_processor.IPAdapterAttnProcessor - -## IPAdapterAttnProcessor2_0 -[[autodoc]] models.attention_processor.IPAdapterAttnProcessor2_0 - -## AttnProcessorNPU -[[autodoc]] models.attention_processor.AttnProcessorNPU - -## JointAttnProcessor2_0 -[[autodoc]] models.attention_processor.JointAttnProcessor2_0 - -## JointAttnProcessor2_0 -[[autodoc]] models.attention_processor.PAGJointAttnProcessor2_0 - -## PAGCFGJointAttnProcessor2_0 -[[autodoc]] models.attention_processor.PAGCFGJointAttnProcessor2_0 - - -## FusedJointAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedJointAttnProcessor2_0 - -## AllegroAttnProcessor2_0 -[[autodoc]] models.attention_processor.AllegroAttnProcessor2_0 - -## AuraFlowAttnProcessor2_0 -[[autodoc]] models.attention_processor.AuraFlowAttnProcessor2_0 - -## MochiVaeAttnProcessor2_0 -[[autodoc]] models.attention_processor.MochiVaeAttnProcessor2_0 - -## PAGCFGIdentitySelfAttnProcessor2_0 -[[autodoc]] models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0 - -## FusedAuraFlowAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedAuraFlowAttnProcessor2_0 - -## FusedFluxAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedFluxAttnProcessor2_0 - -## SanaMultiscaleAttnProcessor2_0 -[[autodoc]] models.attention_processor.SanaMultiscaleAttnProcessor2_0 - -## PAGHunyuanAttnProcessor2_0 -[[autodoc]] models.attention_processor.PAGHunyuanAttnProcessor2_0 - -## HunyuanAttnProcessor2_0 -[[autodoc]] models.attention_processor.HunyuanAttnProcessor2_0 - -## FluxAttnProcessor2_0 -[[autodoc]] models.attention_processor.FluxAttnProcessor2_0 - -## PAGIdentitySelfAttnProcessor2_0 -[[autodoc]] models.attention_processor.PAGIdentitySelfAttnProcessor2_0 - -## FusedCogVideoXAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedCogVideoXAttnProcessor2_0 - -## MochiAttnProcessor2_0 -[[autodoc]] models.attention_processor.MochiAttnProcessor2_0 - -## StableAudioAttnProcessor2_0 -[[autodoc]] models.attention_processor.StableAudioAttnProcessor2_0 - -## XLAFlashAttnProcessor2_0 -[[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0 - -## FusedHunyuanAttnProcessor2_0 -[[autodoc]] models.attention_processor.FusedHunyuanAttnProcessor2_0 - -## IPAdapterXFormersAttnProcessor -[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor - -## LuminaAttnProcessor2_0 -[[autodoc]] models.attention_processor.LuminaAttnProcessor2_0 - -## PAGCFGHunyuanAttnProcessor2_0 -[[autodoc]] models.attention_processor.PAGCFGHunyuanAttnProcessor2_0 - -## FluxSingleAttnProcessor2_0 -[[autodoc]] models.attention_processor.FluxSingleAttnProcessor2_0 - -## CogVideoXAttnProcessor2_0 -[[autodoc]] models.attention_processor.CogVideoXAttnProcessor2_0 ## XFormersAttnProcessor [[autodoc]] models.attention_processor.XFormersAttnProcessor @@ -251,3 +149,7 @@ An attention processor is a class for applying different types of attention mech ## XFormersJointAttnProcessor [[autodoc]] models.attention_processor.XFormersJointAttnProcessor + +## IPAdapterXFormersAttnProcessor + +[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor \ No newline at end of file From 7eb617af0abccaf279ac54d3d7d926c74c5a77d1 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 17 Dec 2024 08:28:26 +0530 Subject: [PATCH 16/18] fixes --- tests/others/test_check_support_list.py | 68 ++++++++ utils/check_support_list.py | 202 +++++++++++------------- 2 files changed, 159 insertions(+), 111 deletions(-) create mode 100644 tests/others/test_check_support_list.py diff --git a/tests/others/test_check_support_list.py b/tests/others/test_check_support_list.py new file mode 100644 index 000000000000..0f6b134aad49 --- /dev/null +++ b/tests/others/test_check_support_list.py @@ -0,0 +1,68 @@ +import os +import sys +import unittest +from unittest.mock import mock_open, patch + + +git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +sys.path.append(os.path.join(git_repo_path, "utils")) + +from check_support_list import check_documentation # noqa: E402 + + +class TestCheckSupportList(unittest.TestCase): + def setUp(self): + # Mock doc and source contents that we can reuse + self.doc_content = """# Documentation +## FooProcessor + +[[autodoc]] module.FooProcessor + +## BarProcessor + +[[autodoc]] module.BarProcessor +""" + self.source_content = """ +class FooProcessor(nn.Module): + pass + +class BarProcessor(nn.Module): + pass +""" + + def test_check_documentation_all_documented(self): + # In this test, both FooProcessor and BarProcessor are documented + with patch("builtins.open", mock_open(read_data=self.doc_content)) as doc_file: + doc_file.side_effect = [ + mock_open(read_data=self.doc_content).return_value, + mock_open(read_data=self.source_content).return_value, + ] + + undocumented = check_documentation( + doc_path="fake_doc.md", + src_path="fake_source.py", + doc_regex=r"\[\[autodoc\]\]\s([^\n]+)", + src_regex=r"class\s+(\w+Processor)\(.*?nn\.Module.*?\):", + ) + self.assertEqual(len(undocumented), 0, f"Expected no undocumented classes, got {undocumented}") + + def test_check_documentation_missing_class(self): + # In this test, only FooProcessor is documented, but BarProcessor is missing from the docs + doc_content_missing = """# Documentation +## FooProcessor + +[[autodoc]] module.FooProcessor +""" + with patch("builtins.open", mock_open(read_data=doc_content_missing)) as doc_file: + doc_file.side_effect = [ + mock_open(read_data=doc_content_missing).return_value, + mock_open(read_data=self.source_content).return_value, + ] + + undocumented = check_documentation( + doc_path="fake_doc.md", + src_path="fake_source.py", + doc_regex=r"\[\[autodoc\]\]\s([^\n]+)", + src_regex=r"class\s+(\w+Processor)\(.*?nn\.Module.*?\):", + ) + self.assertIn("BarProcessor", undocumented, f"BarProcessor should be undocumented, got {undocumented}") diff --git a/utils/check_support_list.py b/utils/check_support_list.py index fad6f2f5fc4f..89cfce62de0b 100644 --- a/utils/check_support_list.py +++ b/utils/check_support_list.py @@ -7,11 +7,6 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. """ Utility that checks that modules like attention processors are listed in the documentation file. @@ -21,124 +16,109 @@ It has no auto-fix mode. """ + import os import re -# All paths are set with the intent you should run this script from the root of the repo with the command -# python utils/check_doctest_list.py +# All paths are set with the intent that you run this script from the root of the repo REPO_PATH = "." -def check_attention_processors(): - with open(os.path.join(REPO_PATH, "docs/source/en/api/attnprocessor.md"), "r") as f: - doctext = f.read() - matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) - documented_attention_processors = [match.split(".")[-1] for match in matches] - - with open(os.path.join(REPO_PATH, "src/diffusers/models/attention_processor.py"), "r") as f: - doctext = f.read() - processor_classes = re.findall(r"class\s+(\w+Processor(?:\d*_?\d*))[(:]", doctext) - processor_classes = [proc for proc in processor_classes if "LoRA" not in proc and proc != "Attention"] - - undocumented_attn_processors = set() - for processor in processor_classes: - if processor not in documented_attention_processors: - undocumented_attn_processors.add(processor) - - if undocumented_attn_processors: - raise ValueError( - f"The following attention processors should be in listed in the attention processor documentation but are not: {list(undocumented_attn_processors)}. Please update the documentation." - ) - - -def check_image_processors(): - with open(os.path.join(REPO_PATH, "docs/source/en/api/image_processor.md"), "r") as f: - doctext = f.read() - matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) - documented_image_processors = [match.split(".")[-1] for match in matches] - - with open(os.path.join(REPO_PATH, "src/diffusers/image_processor.py"), "r") as f: +def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"): + """ + Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class. + Returns a list of documented class names (just the class name portion). + """ + with open(os.path.join(REPO_PATH, doc_path), "r") as f: doctext = f.read() - processor_classes = re.findall(r"class\s+(\w+Processor(?:\d*_?\d*))[(:]", doctext) - - undocumented_img_processors = set() - for processor in processor_classes: - if processor not in documented_image_processors: - undocumented_img_processors.add(processor) - raise ValueError( - f"The following image processors should be in listed in the image processor documentation but are not: {list(undocumented_img_processors)}. Please update the documentation." - ) - - -def check_activations(): - with open(os.path.join(REPO_PATH, "docs/source/en/api/activations.md"), "r") as f: + matches = re.findall(autodoc_regex, doctext) + return [match.split(".")[-1] for match in matches] + + +def read_source_classes(src_path, class_regex, exclude_conditions=None): + """ + Reads class names from a source file using a regex that captures class definitions. + Optionally exclude classes based on a list of conditions (functions that take class name and return bool). + """ + if exclude_conditions is None: + exclude_conditions = [] + with open(os.path.join(REPO_PATH, src_path), "r") as f: doctext = f.read() - matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) - documented_activations = [match.split(".")[-1] for match in matches] + classes = re.findall(class_regex, doctext) + # Filter out classes that meet any of the exclude conditions + filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)] + return filtered_classes - with open(os.path.join(REPO_PATH, "src/diffusers/models/activations.py"), "r") as f: - doctext = f.read() - activation_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) - undocumented_activations = set() - for activation in activation_classes: - if activation not in documented_activations: - undocumented_activations.add(activation) +def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None): + """ + Generic function to check if all classes defined in `src_path` are documented in `doc_path`. + Returns a set of undocumented class names. + """ + documented = set(read_documented_classes(doc_path, doc_regex)) + source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions)) - if undocumented_activations: - raise ValueError( - f"The following activations should be in listed in the activations documentation but are not: {list(undocumented_activations)}. Please update the documentation." - ) - - -def check_normalizations(): - with open(os.path.join(REPO_PATH, "docs/source/en/api/normalization.md"), "r") as f: - doctext = f.read() - matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) - documented_normalizations = [match.split(".")[-1] for match in matches] - - with open(os.path.join(REPO_PATH, "src/diffusers/models/normalization.py"), "r") as f: - doctext = f.read() - normalization_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) - # LayerNorm is an exception because adding doc for is confusing. - normalization_classes = [norm for norm in normalization_classes if norm != "LayerNorm"] - - undocumented_norms = set() - for norm in normalization_classes: - if norm not in documented_normalizations: - undocumented_norms.add(norm) - - if undocumented_norms: - raise ValueError( - f"The following norms should be in listed in the normalizations documentation but are not: {list(undocumented_norms)}. Please update the documentation." - ) - - -def check_lora_mixins(): - with open(os.path.join(REPO_PATH, "docs/source/en/api/loaders/lora.md"), "r") as f: - doctext = f.read() - matches = re.findall(r"\[\[autodoc\]\]\s([^\n]+)", doctext) - documented_loras = [match.split(".")[-1] for match in matches] - - with open(os.path.join(REPO_PATH, "src/diffusers/loaders/lora_pipeline.py"), "r") as f: - doctext = f.read() - lora_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext) - - undocumented_loras = set() - for lora in lora_classes: - if lora not in documented_loras: - undocumented_loras.add(lora) - - if undocumented_loras: - raise ValueError( - f"The following LoRA mixins should be in listed in the LoRA loader documentation but are not: {list(undocumented_loras)}. Please update the documentation." - ) + # Find which classes in source are not documented in a deterministic way. + undocumented = sorted(source_classes - documented) + return undocumented if __name__ == "__main__": - check_attention_processors() - check_image_processors() - check_activations() - check_normalizations() - check_lora_mixins() + # Define the checks we need to perform + checks = { + "Attention Processors": { + "doc_path": "docs/source/en/api/attnprocessor.md", + "src_path": "src/diffusers/models/attention_processor.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", + "exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"], + }, + "Image Processors": { + "doc_path": "docs/source/en/api/image_processor.md", + "src_path": "src/diffusers/image_processor.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", + }, + "Activations": { + "doc_path": "docs/source/en/api/activations.md", + "src_path": "src/diffusers/models/activations.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", + }, + "Normalizations": { + "doc_path": "docs/source/en/api/normalization.md", + "src_path": "src/diffusers/models/normalization.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", + "exclude_conditions": [ + # Exclude LayerNorm as it's an intentional exception + lambda c: c == "LayerNorm" + ], + }, + "LoRA Mixins": { + "doc_path": "docs/source/en/api/loaders/lora.md", + "src_path": "src/diffusers/loaders/lora_pipeline.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", + }, + } + + missing_items = {} + for category, params in checks.items(): + undocumented = check_documentation( + doc_path=params["doc_path"], + src_path=params["src_path"], + doc_regex=params["doc_regex"], + src_regex=params["src_regex"], + exclude_conditions=params.get("exclude_conditions"), + ) + if undocumented: + missing_items[category] = undocumented + + # If we have any missing items, raise a single combined error + if missing_items: + error_msg = ["Some classes are not documented properly:\n"] + for category, classes in missing_items.items(): + error_msg.append(f"- {category}: {', '.join(sorted(classes))}") + raise ValueError("\n".join(error_msg)) From 53a336110ba996b29e828b1e3ac808140dd757d9 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 20 Feb 2025 11:57:50 +0530 Subject: [PATCH 17/18] fixes --- docs/source/en/api/attnprocessor.md | 11 ++++++++++- docs/source/en/api/normalization.md | 5 ++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index b07f6feeb1aa..638ecb973e5d 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -154,4 +154,13 @@ An attention processor is a class for applying different types of attention mech ## IPAdapterXFormersAttnProcessor -[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor \ No newline at end of file +[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor + +## FluxIPAdapterJointAttnProcessor2_0 + +[[autodoc]] models.attention_processor.FluxIPAdapterJointAttnProcessor2_0 + + +## XLAFluxFlashAttnProcessor2_0 + +[[autodoc]] models.attention_processor.XLAFluxFlashAttnProcessor2_0 \ No newline at end of file diff --git a/docs/source/en/api/normalization.md b/docs/source/en/api/normalization.md index cdd4d744f8a2..bc049524c73e 100644 --- a/docs/source/en/api/normalization.md +++ b/docs/source/en/api/normalization.md @@ -65,4 +65,7 @@ Customized normalization layers for supporting various models in 🤗 Diffusers. [[autodoc]] models.normalization.CogVideoXLayerNormZero ## MochiRMSNormZero -[[autodoc]] models.normalization.MochiRMSNormZero \ No newline at end of file +[[autodoc]] models.normalization.MochiRMSNormZero + +## MochiRMSNorm +[[autodoc]] models.normalization.MochiRMSNorm \ No newline at end of file From be989a68719f21628bb91b321ae6b94011a4eb02 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 20 Feb 2025 12:15:59 +0530 Subject: [PATCH 18/18] Update docs/source/en/api/normalization.md Co-authored-by: hlky --- docs/source/en/api/normalization.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/api/normalization.md b/docs/source/en/api/normalization.md index bc049524c73e..05ae92a28dc8 100644 --- a/docs/source/en/api/normalization.md +++ b/docs/source/en/api/normalization.md @@ -65,7 +65,7 @@ Customized normalization layers for supporting various models in 🤗 Diffusers. [[autodoc]] models.normalization.CogVideoXLayerNormZero ## MochiRMSNormZero -[[autodoc]] models.normalization.MochiRMSNormZero +[[autodoc]] models.transformers.transformer_mochi.MochiRMSNormZero ## MochiRMSNorm [[autodoc]] models.normalization.MochiRMSNorm \ No newline at end of file