Skip to content

Commit 26e0f53

Browse files
committed
Rename Lumina(2)Text2ImgPipeline -> Lumina(2)Pipeline
1 parent 2bc82d6 commit 26e0f53

File tree

13 files changed

+91
-41
lines changed

13 files changed

+91
-41
lines changed

docs/source/en/api/pipelines/lumina.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,10 @@ Use [`torch.compile`](https://huggingface.co/docs/diffusers/main/en/tutorials/fa
5858
First, load the pipeline:
5959

6060
```python
61-
from diffusers import LuminaText2ImgPipeline
61+
from diffusers import LuminaPipeline
6262
import torch
6363

64-
pipeline = LuminaText2ImgPipeline.from_pretrained(
64+
pipeline = LuminaPipeline.from_pretrained(
6565
"Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16
6666
).to("cuda")
6767
```
@@ -86,11 +86,11 @@ image = pipeline(prompt="Upper body of a young woman in a Victorian-era outfit w
8686

8787
Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.
8888

89-
Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`LuminaText2ImgPipeline`] for inference with bitsandbytes.
89+
Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`LuminaPipeline`] for inference with bitsandbytes.
9090

9191
```py
9292
import torch
93-
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, Transformer2DModel, LuminaText2ImgPipeline
93+
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, Transformer2DModel, LuminaPipeline
9494
from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel
9595

9696
quant_config = BitsAndBytesConfig(load_in_8bit=True)
@@ -109,7 +109,7 @@ transformer_8bit = Transformer2DModel.from_pretrained(
109109
torch_dtype=torch.float16,
110110
)
111111

112-
pipeline = LuminaText2ImgPipeline.from_pretrained(
112+
pipeline = LuminaPipeline.from_pretrained(
113113
"Alpha-VLLM/Lumina-Next-SFT-diffusers",
114114
text_encoder=text_encoder_8bit,
115115
transformer=transformer_8bit,
@@ -122,9 +122,9 @@ image = pipeline(prompt).images[0]
122122
image.save("lumina.png")
123123
```
124124

125-
## LuminaText2ImgPipeline
125+
## LuminaPipeline
126126

127-
[[autodoc]] LuminaText2ImgPipeline
127+
[[autodoc]] LuminaPipeline
128128
- all
129129
- __call__
130130

docs/source/en/api/pipelines/lumina2.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,14 +32,14 @@ Single file loading for Lumina Image 2.0 is available for the `Lumina2Transforme
3232

3333
```python
3434
import torch
35-
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline
35+
from diffusers import Lumina2Transformer2DModel, Lumina2Pipeline
3636

3737
ckpt_path = "https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0/blob/main/consolidated.00-of-01.pth"
3838
transformer = Lumina2Transformer2DModel.from_single_file(
3939
ckpt_path, torch_dtype=torch.bfloat16
4040
)
4141

42-
pipe = Lumina2Text2ImgPipeline.from_pretrained(
42+
pipe = Lumina2Pipeline.from_pretrained(
4343
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
4444
)
4545
pipe.enable_model_cpu_offload()
@@ -56,7 +56,7 @@ image.save("lumina-single-file.png")
5656
GGUF Quantized checkpoints for the `Lumina2Transformer2DModel` can be loaded via `from_single_file` with the `GGUFQuantizationConfig`
5757

5858
```python
59-
from diffusers import Lumina2Transformer2DModel, Lumina2Text2ImgPipeline, GGUFQuantizationConfig
59+
from diffusers import Lumina2Transformer2DModel, Lumina2Pipeline, GGUFQuantizationConfig
6060

6161
ckpt_path = "https://huggingface.co/calcuis/lumina-gguf/blob/main/lumina2-q4_0.gguf"
6262
transformer = Lumina2Transformer2DModel.from_single_file(
@@ -65,7 +65,7 @@ transformer = Lumina2Transformer2DModel.from_single_file(
6565
torch_dtype=torch.bfloat16,
6666
)
6767

68-
pipe = Lumina2Text2ImgPipeline.from_pretrained(
68+
pipe = Lumina2Pipeline.from_pretrained(
6969
"Alpha-VLLM/Lumina-Image-2.0", transformer=transformer, torch_dtype=torch.bfloat16
7070
)
7171
pipe.enable_model_cpu_offload()
@@ -76,8 +76,8 @@ image = pipe(
7676
image.save("lumina-gguf.png")
7777
```
7878

79-
## Lumina2Text2ImgPipeline
79+
## Lumina2Pipeline
8080

81-
[[autodoc]] Lumina2Text2ImgPipeline
81+
[[autodoc]] Lumina2Pipeline
8282
- all
8383
- __call__

scripts/convert_lumina_to_diffusers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from safetensors.torch import load_file
66
from transformers import AutoModel, AutoTokenizer
77

8-
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline
8+
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaPipeline
99

1010

1111
def main(args):
@@ -115,7 +115,7 @@ def main(args):
115115
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
116116
text_encoder = AutoModel.from_pretrained("google/gemma-2b")
117117

118-
pipeline = LuminaText2ImgPipeline(
118+
pipeline = LuminaPipeline(
119119
tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler
120120
)
121121
pipeline.save_pretrained(args.dump_path)

src/diffusers/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,9 @@
341341
"LEditsPPPipelineStableDiffusionXL",
342342
"LTXImageToVideoPipeline",
343343
"LTXPipeline",
344+
"Lumina2Pipeline",
344345
"Lumina2Text2ImgPipeline",
346+
"LuminaPipeline",
345347
"LuminaText2ImgPipeline",
346348
"MarigoldDepthPipeline",
347349
"MarigoldNormalsPipeline",
@@ -840,7 +842,9 @@
840842
LEditsPPPipelineStableDiffusionXL,
841843
LTXImageToVideoPipeline,
842844
LTXPipeline,
845+
Lumina2Pipeline,
843846
Lumina2Text2ImgPipeline,
847+
LuminaPipeline,
844848
LuminaText2ImgPipeline,
845849
MarigoldDepthPipeline,
846850
MarigoldNormalsPipeline,

src/diffusers/pipelines/__init__.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -256,8 +256,8 @@
256256
)
257257
_import_structure["latte"] = ["LattePipeline"]
258258
_import_structure["ltx"] = ["LTXPipeline", "LTXImageToVideoPipeline"]
259-
_import_structure["lumina"] = ["LuminaText2ImgPipeline"]
260-
_import_structure["lumina2"] = ["Lumina2Text2ImgPipeline"]
259+
_import_structure["lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"]
260+
_import_structure["lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"]
261261
_import_structure["marigold"].extend(
262262
[
263263
"MarigoldDepthPipeline",
@@ -599,8 +599,8 @@
599599
LEditsPPPipelineStableDiffusionXL,
600600
)
601601
from .ltx import LTXImageToVideoPipeline, LTXPipeline
602-
from .lumina import LuminaText2ImgPipeline
603-
from .lumina2 import Lumina2Text2ImgPipeline
602+
from .lumina import LuminaPipeline, LuminaText2ImgPipeline
603+
from .lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline
604604
from .marigold import (
605605
MarigoldDepthPipeline,
606606
MarigoldNormalsPipeline,

src/diffusers/pipelines/auto_pipeline.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@
6565
)
6666
from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline
6767
from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline
68-
from .lumina import LuminaText2ImgPipeline
69-
from .lumina2 import Lumina2Text2ImgPipeline
68+
from .lumina import LuminaPipeline
69+
from .lumina2 import Lumina2Pipeline
7070
from .pag import (
7171
HunyuanDiTPAGPipeline,
7272
PixArtSigmaPAGPipeline,
@@ -136,8 +136,8 @@
136136
("flux", FluxPipeline),
137137
("flux-control", FluxControlPipeline),
138138
("flux-controlnet", FluxControlNetPipeline),
139-
("lumina", LuminaText2ImgPipeline),
140-
("lumina2", Lumina2Text2ImgPipeline),
139+
("lumina", LuminaPipeline),
140+
("lumina2", Lumina2Pipeline),
141141
("cogview3", CogView3PlusPipeline),
142142
("cogview4", CogView4Pipeline),
143143
]

src/diffusers/pipelines/lumina/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
2424
else:
25-
_import_structure["pipeline_lumina"] = ["LuminaText2ImgPipeline"]
25+
_import_structure["pipeline_lumina"] = ["LuminaPipeline", "LuminaText2ImgPipeline"]
2626

2727
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
2828
try:
@@ -32,7 +32,7 @@
3232
except OptionalDependencyNotAvailable:
3333
from ...utils.dummy_torch_and_transformers_objects import *
3434
else:
35-
from .pipeline_lumina import LuminaText2ImgPipeline
35+
from .pipeline_lumina import LuminaPipeline, LuminaText2ImgPipeline
3636

3737
else:
3838
import sys

src/diffusers/pipelines/lumina/pipeline_lumina.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
from ...schedulers import FlowMatchEulerDiscreteScheduler
3131
from ...utils import (
3232
BACKENDS_MAPPING,
33+
deprecate,
3334
is_bs4_available,
3435
is_ftfy_available,
3536
is_torch_xla_available,
@@ -60,9 +61,9 @@
6061
Examples:
6162
```py
6263
>>> import torch
63-
>>> from diffusers import LuminaText2ImgPipeline
64+
>>> from diffusers import LuminaPipeline
6465
65-
>>> pipe = LuminaText2ImgPipeline.from_pretrained(
66+
>>> pipe = LuminaPipeline.from_pretrained(
6667
... "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16
6768
... )
6869
>>> # Enable memory optimizations.
@@ -134,7 +135,7 @@ def retrieve_timesteps(
134135
return timesteps, num_inference_steps
135136

136137

137-
class LuminaText2ImgPipeline(DiffusionPipeline):
138+
class LuminaPipeline(DiffusionPipeline):
138139
r"""
139140
Pipeline for text-to-image generation using Lumina-T2I.
140141
@@ -935,3 +936,10 @@ def __call__(
935936
return (image,)
936937

937938
return ImagePipelineOutput(images=image)
939+
940+
941+
class LuminaText2ImgPipeline(LuminaPipeline):
942+
def __init__(self, *args, **kwargs):
943+
deprecation_message = "`LuminaText2ImgPipeline` has been renamed to `LuminaPipeline` and will be removed in a future version. Please use `LuminaPipeline` instead."
944+
deprecate("diffusers.pipelines.lumina.pipeline_lumina.LuminaText2ImgPipeline", "0.34", deprecation_message)
945+
super().__init__(*args, **kwargs)

src/diffusers/pipelines/lumina2/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
2424
else:
25-
_import_structure["pipeline_lumina2"] = ["Lumina2Text2ImgPipeline"]
25+
_import_structure["pipeline_lumina2"] = ["Lumina2Pipeline", "Lumina2Text2ImgPipeline"]
2626

2727
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
2828
try:
@@ -32,7 +32,7 @@
3232
except OptionalDependencyNotAvailable:
3333
from ...utils.dummy_torch_and_transformers_objects import *
3434
else:
35-
from .pipeline_lumina2 import Lumina2Text2ImgPipeline
35+
from .pipeline_lumina2 import Lumina2Pipeline, Lumina2Text2ImgPipeline
3636

3737
else:
3838
import sys

src/diffusers/pipelines/lumina2/pipeline_lumina2.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from ...models.transformers.transformer_lumina2 import Lumina2Transformer2DModel
2525
from ...schedulers import FlowMatchEulerDiscreteScheduler
2626
from ...utils import (
27+
deprecate,
2728
is_torch_xla_available,
2829
logging,
2930
replace_example_docstring,
@@ -46,9 +47,9 @@
4647
Examples:
4748
```py
4849
>>> import torch
49-
>>> from diffusers import Lumina2Text2ImgPipeline
50+
>>> from diffusers import Lumina2Pipeline
5051
51-
>>> pipe = Lumina2Text2ImgPipeline.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", torch_dtype=torch.bfloat16)
52+
>>> pipe = Lumina2Pipeline.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", torch_dtype=torch.bfloat16)
5253
>>> # Enable memory optimizations.
5354
>>> pipe.enable_model_cpu_offload()
5455
@@ -132,7 +133,7 @@ def retrieve_timesteps(
132133
return timesteps, num_inference_steps
133134

134135

135-
class Lumina2Text2ImgPipeline(DiffusionPipeline):
136+
class Lumina2Pipeline(DiffusionPipeline):
136137
r"""
137138
Pipeline for text-to-image generation using Lumina-T2I.
138139
@@ -757,3 +758,10 @@ def __call__(
757758
return (image,)
758759

759760
return ImagePipelineOutput(images=image)
761+
762+
763+
class Lumina2Text2ImgPipeline(Lumina2Pipeline):
764+
def __init__(self, *args, **kwargs):
765+
deprecation_message = "`Lumina2Text2ImgPipeline` has been renamed to `Lumina2Pipeline` and will be removed in a future version. Please use `Lumina2Pipeline` instead."
766+
deprecate("diffusers.pipelines.lumina2.pipeline_lumina2.Lumina2Text2ImgPipeline", "0.34", deprecation_message)
767+
super().__init__(*args, **kwargs)

0 commit comments

Comments
 (0)