Skip to content

Commit 0034db2

Browse files
committed
fixes
1 parent 4227392 commit 0034db2

File tree

4 files changed

+131
-10
lines changed

4 files changed

+131
-10
lines changed

docs/source/en/api/activations.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,3 +25,12 @@ Customized activation functions for supporting various models in 🤗 Diffusers.
2525
## ApproximateGELU
2626

2727
[[autodoc]] models.activations.ApproximateGELU
28+
29+
30+
## SwiGLU
31+
32+
[[autodoc]] models.activations.SwiGLU
33+
34+
## FP32SiLU
35+
36+
[[autodoc]] models.activations.FP32SiLU

docs/source/en/api/attnprocessor.md

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,3 +67,76 @@ An attention processor is a class for applying different types of attention mech
6767

6868
## JointAttnProcessor2_0
6969
[[autodoc]] models.attention_processor.JointAttnProcessor2_0
70+
71+
## JointAttnProcessor2_0
72+
[[autodoc]] models.attention_processor.PAGJointAttnProcessor2_0
73+
74+
## PAGCFGJointAttnProcessor2_0
75+
[[autodoc]] models.attention_processor.PAGCFGJointAttnProcessor2_0
76+
77+
78+
## FusedJointAttnProcessor2_0
79+
[[autodoc]] models.attention_processor.FusedJointAttnProcessor2_0
80+
81+
## AllegroAttnProcessor2_0
82+
[[autodoc]] models.attention_processor.AllegroAttnProcessor2_0
83+
84+
## AuraFlowAttnProcessor2_0
85+
[[autodoc]] models.attention_processor.AuraFlowAttnProcessor2_0
86+
87+
## MochiVaeAttnProcessor2_0
88+
[[autodoc]] models.attention_processor.MochiVaeAttnProcessor2_0
89+
90+
## PAGCFGIdentitySelfAttnProcessor2_0
91+
[[autodoc]] models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0
92+
93+
## FusedAuraFlowAttnProcessor2_0
94+
[[autodoc]] models.attention_processor.FusedAuraFlowAttnProcessor2_0
95+
96+
## FusedFluxAttnProcessor2_0
97+
[[autodoc]] models.attention_processor.FusedFluxAttnProcessor2_0
98+
99+
## SanaMultiscaleAttnProcessor2_0
100+
[[autodoc]] models.attention_processor.SanaMultiscaleAttnProcessor2_0
101+
102+
## PAGHunyuanAttnProcessor2_0
103+
[[autodoc]] models.attention_processor.PAGHunyuanAttnProcessor2_0
104+
105+
## HunyuanAttnProcessor2_0
106+
[[autodoc]] models.attention_processor.HunyuanAttnProcessor2_0
107+
108+
## FluxAttnProcessor2_0
109+
[[autodoc]] models.attention_processor.FluxAttnProcessor2_0
110+
111+
## PAGIdentitySelfAttnProcessor2_0
112+
[[autodoc]] models.attention_processor.PAGIdentitySelfAttnProcessor2_0
113+
114+
## FusedCogVideoXAttnProcessor2_0
115+
[[autodoc]] models.attention_processor.FusedCogVideoXAttnProcessor2_0
116+
117+
## MochiAttnProcessor2_0
118+
[[autodoc]] models.attention_processor.MochiAttnProcessor2_0
119+
120+
## StableAudioAttnProcessor2_0
121+
[[autodoc]] models.attention_processor.StableAudioAttnProcessor2_0
122+
123+
## XLAFlashAttnProcessor2_0
124+
[[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0
125+
126+
## FusedHunyuanAttnProcessor2_0
127+
[[autodoc]] models.attention_processor.FusedHunyuanAttnProcessor2_0
128+
129+
## IPAdapterXFormersAttnProcessor
130+
[[autodoc]] models.attention_processor.IPAdapterXFormersAttnProcessor
131+
132+
## LuminaAttnProcessor2_0
133+
[[autodoc]] models.attention_processor.LuminaAttnProcessor2_0
134+
135+
## PAGCFGHunyuanAttnProcessor2_0
136+
[[autodoc]] models.attention_processor.PAGCFGHunyuanAttnProcessor2_0
137+
138+
## FluxSingleAttnProcessor2_0
139+
[[autodoc]] models.attention_processor.FluxSingleAttnProcessor2_0
140+
141+
## CogVideoXAttnProcessor2_0
142+
[[autodoc]] models.attention_processor.CogVideoXAttnProcessor2_0

docs/source/en/api/normalization.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,3 +41,28 @@ Customized normalization layers for supporting various models in 🤗 Diffusers.
4141
## GlobalResponseNorm
4242

4343
[[autodoc]] models.normalization.GlobalResponseNorm
44+
45+
46+
## LuminaLayerNormContinuous
47+
[[autodoc]] models.normalization.LuminaLayerNormContinuous
48+
49+
## SD35AdaLayerNormZeroX
50+
[[autodoc]] models.normalization.SD35AdaLayerNormZeroX
51+
52+
## AdaLayerNormZeroSingle
53+
[[autodoc]] models.normalization.AdaLayerNormZeroSingle
54+
55+
## LuminaRMSNormZero
56+
[[autodoc]] models.normalization.LuminaRMSNormZero
57+
58+
## LpNorm
59+
[[autodoc]] models.normalization.LpNorm
60+
61+
## CogView3PlusAdaLayerNormZeroTextImage
62+
[[autodoc]] models.normalization.CogView3PlusAdaLayerNormZeroTextImage
63+
64+
## CogVideoXLayerNormZero
65+
[[autodoc]] models.normalization.CogVideoXLayerNormZero
66+
67+
## MochiRMSNormZero
68+
[[autodoc]] models.normalization.MochiRMSNormZero

utils/check_support_list.py

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,15 @@ def check_attention_processors():
4141
processor_classes = re.findall(r"class\s+(\w+Processor(?:\d*_?\d*))[(:]", doctext)
4242
processor_classes = [proc for proc in processor_classes if "LoRA" not in proc and proc != "Attention"]
4343

44+
undocumented_attn_processors = set()
4445
for processor in processor_classes:
4546
if processor not in documented_attention_processors:
46-
raise ValueError(
47-
f"{processor} should be in listed in the attention processor documentation but is not. Please update the documentation."
48-
)
47+
undocumented_attn_processors.add(processor)
48+
49+
if undocumented_attn_processors:
50+
raise ValueError(
51+
f"The following attention processors should be in listed in the attention processor documentation but are not: {list(undocumented_attn_processors)}. Please update the documentation."
52+
)
4953

5054

5155
def check_image_processors():
@@ -58,10 +62,12 @@ def check_image_processors():
5862
doctext = f.read()
5963
processor_classes = re.findall(r"class\s+(\w+Processor(?:\d*_?\d*))[(:]", doctext)
6064

65+
undocumented_img_processors = set()
6166
for processor in processor_classes:
6267
if processor not in documented_image_processors:
68+
undocumented_img_processors.add(processor)
6369
raise ValueError(
64-
f"{processor} should be in listed in the image processor documentation but is not. Please update the documentation."
70+
f"The following image processors should be in listed in the image processor documentation but are not: {list(undocumented_img_processors)}. Please update the documentation."
6571
)
6672

6773

@@ -75,11 +81,15 @@ def check_activations():
7581
doctext = f.read()
7682
activation_classes = re.findall(r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", doctext)
7783

84+
undocumented_activations = set()
7885
for activation in activation_classes:
7986
if activation not in documented_activations:
80-
raise ValueError(
81-
f"{activation} should be in listed in the activations documentation but is not. Please update the documentation."
82-
)
87+
undocumented_activations.add(activation)
88+
89+
if undocumented_activations:
90+
raise ValueError(
91+
f"The following activations should be in listed in the activations documentation but are not: {list(undocumented_activations)}. Please update the documentation."
92+
)
8393

8494

8595
def check_normalizations():
@@ -94,11 +104,15 @@ def check_normalizations():
94104
# LayerNorm is an exception because adding doc for is confusing.
95105
normalization_classes = [norm for norm in normalization_classes if norm != "LayerNorm"]
96106

107+
undocumented_norms = set()
97108
for norm in normalization_classes:
98109
if norm not in documented_normalizations:
99-
raise ValueError(
100-
f"{norm} should be in listed in the normalizations documentation but is not. Please update the documentation."
101-
)
110+
undocumented_norms.add(norm)
111+
112+
if undocumented_norms:
113+
raise ValueError(
114+
f"The following norms should be in listed in the normalizations documentation but are not: {list(undocumented_norms)}. Please update the documentation."
115+
)
102116

103117

104118
if __name__ == "__main__":

0 commit comments

Comments
 (0)