Skip to content

Commit b67c879

Browse files
authored
Merge branch 'main' into modular-standard-repo
2 parents 796c992 + 638cc03 commit b67c879

20 files changed

+324
-243
lines changed

docs/source/en/_toctree.yml

Lines changed: 164 additions & 153 deletions
Large diffs are not rendered by default.

docs/source/en/optimization/fp16.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,12 @@ The `step()` function is [called](https://github.com/huggingface/diffusers/blob/
239239

240240
In general, the `sigmas` should [stay on the CPU](https://github.com/huggingface/diffusers/blob/35a969d297cba69110d175ee79c59312b9f49e1e/src/diffusers/schedulers/scheduling_euler_discrete.py#L240) to avoid the communication sync and latency.
241241

242+
<Tip>
243+
244+
Refer to the [torch.compile and Diffusers: A Hands-On Guide to Peak Performance](https://pytorch.org/blog/torch-compile-and-diffusers-a-hands-on-guide-to-peak-performance/) blog post for maximizing performance with `torch.compile` for diffusion models.
245+
246+
</Tip>
247+
242248
### Benchmarks
243249

244250
Refer to the [diffusers/benchmarks](https://huggingface.co/datasets/diffusers/benchmarks) dataset to see inference latency and memory usage data for compiled pipelines.
@@ -298,4 +304,6 @@ pipeline.fuse_qkv_projections()
298304

299305
- Read the [Presenting Flux Fast: Making Flux go brrr on H100s](https://pytorch.org/blog/presenting-flux-fast-making-flux-go-brrr-on-h100s/) blog post to learn more about how you can combine all of these optimizations with [TorchInductor](https://docs.pytorch.org/docs/stable/torch.compiler.html) and [AOTInductor](https://docs.pytorch.org/docs/stable/torch.compiler_aot_inductor.html) for a ~2.5x speedup using recipes from [flux-fast](https://github.com/huggingface/flux-fast).
300306

301-
These recipes support AMD hardware and [Flux.1 Kontext Dev](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev).
307+
These recipes support AMD hardware and [Flux.1 Kontext Dev](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev).
308+
- Read the [torch.compile and Diffusers: A Hands-On Guide to Peak Performance](https://pytorch.org/blog/torch-compile-and-diffusers-a-hands-on-guide-to-peak-performance/) blog post
309+
to maximize performance when using `torch.compile`.

docs/source/en/tutorials/tutorial_overview.md

Lines changed: 0 additions & 23 deletions
This file was deleted.

docs/source/en/using-diffusers/overview_techniques.md

Lines changed: 0 additions & 18 deletions
This file was deleted.

examples/dreambooth/train_dreambooth_lora_flux_kontext.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1614,7 +1614,7 @@ def load_model_hook(models, input_dir):
16141614
)
16151615
if args.cond_image_column is not None:
16161616
logger.info("I2I fine-tuning enabled.")
1617-
batch_sampler = BucketBatchSampler(train_dataset, batch_size=args.train_batch_size, drop_last=False)
1617+
batch_sampler = BucketBatchSampler(train_dataset, batch_size=args.train_batch_size, drop_last=True)
16181618
train_dataloader = torch.utils.data.DataLoader(
16191619
train_dataset,
16201620
batch_sampler=batch_sampler,

src/diffusers/modular_pipelines/components_manager.py

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -386,6 +386,7 @@ def add(self, name: str, component: Any, collection: Optional[str] = None):
386386
id(component) is Python's built-in unique identifier for the object
387387
"""
388388
component_id = f"{name}_{id(component)}"
389+
is_new_component = True
389390

390391
# check for duplicated components
391392
for comp_id, comp in self.components.items():
@@ -394,6 +395,7 @@ def add(self, name: str, component: Any, collection: Optional[str] = None):
394395
if comp_name == name:
395396
logger.warning(f"ComponentsManager: component '{name}' already exists as '{comp_id}'")
396397
component_id = comp_id
398+
is_new_component = False
397399
break
398400
else:
399401
logger.warning(
@@ -426,19 +428,39 @@ def add(self, name: str, component: Any, collection: Optional[str] = None):
426428
logger.warning(
427429
f"ComponentsManager: removing existing {name} from collection '{collection}': {comp_id}"
428430
)
429-
self.remove(comp_id)
431+
# remove existing component from this collection (if it is not in any other collection, will be removed from ComponentsManager)
432+
self.remove_from_collection(comp_id, collection)
433+
430434
self.collections[collection].add(component_id)
431435
logger.info(
432436
f"ComponentsManager: added component '{name}' in collection '{collection}': {component_id}"
433437
)
434438
else:
435439
logger.info(f"ComponentsManager: added component '{name}' as '{component_id}'")
436440

437-
if self._auto_offload_enabled:
441+
if self._auto_offload_enabled and is_new_component:
438442
self.enable_auto_cpu_offload(self._auto_offload_device)
439443

440444
return component_id
441445

446+
def remove_from_collection(self, component_id: str, collection: str):
447+
"""
448+
Remove a component from a collection.
449+
"""
450+
if collection not in self.collections:
451+
logger.warning(f"Collection '{collection}' not found in ComponentsManager")
452+
return
453+
if component_id not in self.collections[collection]:
454+
logger.warning(f"Component '{component_id}' not found in collection '{collection}'")
455+
return
456+
# remove from the collection
457+
self.collections[collection].remove(component_id)
458+
# check if this component is in any other collection
459+
comp_colls = [coll for coll, comps in self.collections.items() if component_id in comps]
460+
if not comp_colls: # only if no other collection contains this component, remove it
461+
logger.warning(f"ComponentsManager: removing component '{component_id}' from ComponentsManager")
462+
self.remove(component_id)
463+
442464
def remove(self, component_id: str = None):
443465
"""
444466
Remove a component from the ComponentsManager.

src/diffusers/modular_pipelines/modular_pipeline.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,7 @@ class ModularPipelineBlocks(ConfigMixin, PushToHubMixin):
332332
"""
333333

334334
config_name = "config.json"
335+
model_name = None
335336

336337
@classmethod
337338
def _get_signature_keys(cls, obj):
@@ -342,6 +343,14 @@ def _get_signature_keys(cls, obj):
342343

343344
return expected_modules, optional_parameters
344345

346+
@property
347+
def expected_components(self) -> List[ComponentSpec]:
348+
return []
349+
350+
@property
351+
def expected_configs(self) -> List[ConfigSpec]:
352+
return []
353+
345354
@classmethod
346355
def from_pretrained(
347356
cls,
@@ -367,7 +376,9 @@ def from_pretrained(
367376
trust_remote_code, pretrained_model_name_or_path, has_remote_code
368377
)
369378
if not (has_remote_code and trust_remote_code):
370-
raise ValueError("TODO")
379+
raise ValueError(
380+
"Selected model repository does not happear to have any custom code or does not have a valid `config.json` file."
381+
)
371382

372383
class_ref = config["auto_map"][cls.__name__]
373384
module_file, class_name = class_ref.split(".")
@@ -376,7 +387,6 @@ def from_pretrained(
376387
pretrained_model_name_or_path,
377388
module_file=module_file,
378389
class_name=class_name,
379-
is_modular=True,
380390
**hub_kwargs,
381391
**kwargs,
382392
)

src/diffusers/modular_pipelines/modular_pipeline_utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ class ComponentSpec:
9393
config: Optional[FrozenDict] = None
9494
# YiYi Notes: should we change it to pretrained_model_name_or_path for consistency? a bit long for a field name
9595
repo: Optional[Union[str, List[str]]] = field(default=None, metadata={"loading": True})
96-
subfolder: Optional[str] = field(default=None, metadata={"loading": True})
96+
subfolder: Optional[str] = field(default="", metadata={"loading": True})
9797
variant: Optional[str] = field(default=None, metadata={"loading": True})
9898
revision: Optional[str] = field(default=None, metadata={"loading": True})
9999
default_creation_method: Literal["from_config", "from_pretrained"] = "from_pretrained"
@@ -185,6 +185,8 @@ def load_id(self) -> str:
185185
Unique identifier for this spec's pretrained load, composed of repo|subfolder|variant|revision (no empty
186186
segments).
187187
"""
188+
if self.default_creation_method == "from_config":
189+
return "null"
188190
parts = [getattr(self, k) for k in self.loading_fields()]
189191
parts = ["null" if p is None else p for p in parts]
190192
return "|".join(p for p in parts if p)

src/diffusers/pipelines/dit/pipeline_dit.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,9 @@ class DiTPipeline(DiffusionPipeline):
4646
4747
Parameters:
4848
transformer ([`DiTTransformer2DModel`]):
49-
A class conditioned `DiTTransformer2DModel` to denoise the encoded image latents.
49+
A class conditioned `DiTTransformer2DModel` to denoise the encoded image latents. Initially published as
50+
[`Transformer2DModel`](https://huggingface.co/facebook/DiT-XL-2-256/blob/main/transformer/config.json#L2)
51+
in the config, but the mismatch can be ignored.
5052
vae ([`AutoencoderKL`]):
5153
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
5254
scheduler ([`DDIMScheduler`]):

src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,9 @@ class PixArtAlphaPipeline(DiffusionPipeline):
256256
Tokenizer of class
257257
[T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
258258
transformer ([`PixArtTransformer2DModel`]):
259-
A text conditioned `PixArtTransformer2DModel` to denoise the encoded image latents.
259+
A text conditioned `PixArtTransformer2DModel` to denoise the encoded image latents. Initially published as
260+
[`Transformer2DModel`](https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS/blob/main/transformer/config.json#L2)
261+
in the config, but the mismatch can be ignored.
260262
scheduler ([`SchedulerMixin`]):
261263
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
262264
"""

0 commit comments

Comments
 (0)