Skip to content

Commit ba6c4a8

Browse files
authored
Merge branch 'main' into groupwise-offloading
2 parents 954bb7d + d43ce14 commit ba6c4a8

File tree

2 files changed

+2
-3
lines changed

2 files changed

+2
-3
lines changed

examples/dreambooth/train_dreambooth_lora_sana.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -995,7 +995,8 @@ def main(args):
995995
if args.enable_npu_flash_attention:
996996
if is_torch_npu_available():
997997
logger.info("npu flash attention enabled.")
998-
transformer.enable_npu_flash_attention()
998+
for block in transformer.transformer_blocks:
999+
block.attn2.set_use_npu_flash_attention(True)
9991000
else:
10001001
raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ")
10011002

src/diffusers/loaders/transformer_flux.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,5 +177,3 @@ def _load_ip_adapter_weights(self, state_dicts, low_cpu_mem_usage=False):
177177

178178
self.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers)
179179
self.config.encoder_hid_dim_type = "ip_image_proj"
180-
181-
self.to(dtype=self.dtype, device=self.device)

0 commit comments

Comments
 (0)