Skip to content

Commit d932f67

Browse files
back
1 parent 95a103f commit d932f67

File tree

10 files changed

+20
-17
lines changed

10 files changed

+20
-17
lines changed

src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -788,7 +788,7 @@ def check_inputs(
788788

789789
if transcription is None:
790790
if self.text_encoder_2.config.model_type == "vits":
791-
raise ValueError("Cannot forward without transcription. Please make sure to have transcription")
791+
raise ValueError("Cannot forward without transcription. Please make sure to" " have transcription")
792792
elif transcription is not None and (
793793
not isinstance(transcription, str) and not isinstance(transcription, list)
794794
):

src/diffusers/pipelines/shap_e/renderer.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -983,9 +983,9 @@ def decode_to_mesh(
983983
fields = torch.cat(fields, dim=1)
984984
fields = fields.float()
985985

986-
assert len(fields.shape) == 3 and fields.shape[-1] == 1, (
987-
f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}"
988-
)
986+
assert (
987+
len(fields.shape) == 3 and fields.shape[-1] == 1
988+
), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}"
989989

990990
fields = fields.reshape(1, *([grid_size] * 3))
991991

@@ -1039,9 +1039,9 @@ def decode_to_mesh(
10391039
textures = textures.float()
10401040

10411041
# 3.3 augument the mesh with texture data
1042-
assert len(textures.shape) == 3 and textures.shape[-1] == len(texture_channels), (
1043-
f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}"
1044-
)
1042+
assert len(textures.shape) == 3 and textures.shape[-1] == len(
1043+
texture_channels
1044+
), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}"
10451045

10461046
for m, texture in zip(raw_meshes, textures):
10471047
texture = texture[: len(m.verts)]

src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ def __call__(
584584

585585
if audio_end_in_s - audio_start_in_s > max_audio_length_in_s:
586586
raise ValueError(
587-
f"The total audio length requested ({audio_end_in_s - audio_start_in_s}s) is longer than the model maximum possible length ({max_audio_length_in_s}). Make sure that 'audio_end_in_s-audio_start_in_s<={max_audio_length_in_s}'."
587+
f"The total audio length requested ({audio_end_in_s-audio_start_in_s}s) is longer than the model maximum possible length ({max_audio_length_in_s}). Make sure that 'audio_end_in_s-audio_start_in_s<={max_audio_length_in_s}'."
588588
)
589589

590590
waveform_start = int(audio_start_in_s * self.vae.config.sampling_rate)

src/diffusers/schedulers/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@
4444
_import_structure["scheduling_consistency_models"] = ["CMStochasticIterativeScheduler"]
4545
_import_structure["scheduling_ddim"] = ["DDIMScheduler"]
4646
_import_structure["scheduling_ddim_cogvideox"] = ["CogVideoXDDIMScheduler"]
47-
_import_structure["scheduling_ddim_cogview4"] = ["CogView4DDIMScheduler"]
4847
_import_structure["scheduling_ddim_inverse"] = ["DDIMInverseScheduler"]
4948
_import_structure["scheduling_ddim_parallel"] = ["DDIMParallelScheduler"]
5049
_import_structure["scheduling_ddpm"] = ["DDPMScheduler"]
@@ -145,7 +144,6 @@
145144
from .scheduling_consistency_models import CMStochasticIterativeScheduler
146145
from .scheduling_ddim import DDIMScheduler
147146
from .scheduling_ddim_cogvideox import CogVideoXDDIMScheduler
148-
from .scheduling_ddim_cogview4 import CogView4DDIMScheduler
149147
from .scheduling_ddim_inverse import DDIMInverseScheduler
150148
from .scheduling_ddim_parallel import DDIMParallelScheduler
151149
from .scheduling_ddpm import DDPMScheduler

src/diffusers/schedulers/scheduling_consistency_models.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,8 @@ def set_timesteps(
203203

204204
if timesteps[0] >= self.config.num_train_timesteps:
205205
raise ValueError(
206-
f"`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}."
206+
f"`timesteps` must start before `self.config.train_timesteps`:"
207+
f" {self.config.num_train_timesteps}."
207208
)
208209

209210
timesteps = np.array(timesteps, dtype=np.int64)

src/diffusers/schedulers/scheduling_ddpm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,8 @@ def set_timesteps(
279279

280280
if timesteps[0] >= self.config.num_train_timesteps:
281281
raise ValueError(
282-
f"`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}."
282+
f"`timesteps` must start before `self.config.train_timesteps`:"
283+
f" {self.config.num_train_timesteps}."
283284
)
284285

285286
timesteps = np.array(timesteps, dtype=np.int64)

src/diffusers/schedulers/scheduling_ddpm_parallel.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,8 @@ def set_timesteps(
289289

290290
if timesteps[0] >= self.config.num_train_timesteps:
291291
raise ValueError(
292-
f"`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}."
292+
f"`timesteps` must start before `self.config.train_timesteps`:"
293+
f" {self.config.num_train_timesteps}."
293294
)
294295

295296
timesteps = np.array(timesteps, dtype=np.int64)

src/diffusers/schedulers/scheduling_lcm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,8 @@ def set_timesteps(
413413

414414
if timesteps[0] >= self.config.num_train_timesteps:
415415
raise ValueError(
416-
f"`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}."
416+
f"`timesteps` must start before `self.config.train_timesteps`:"
417+
f" {self.config.num_train_timesteps}."
417418
)
418419

419420
# Raise warning if timestep schedule does not start with self.config.num_train_timesteps - 1

src/diffusers/schedulers/scheduling_tcd.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -431,7 +431,8 @@ def set_timesteps(
431431

432432
if timesteps[0] >= self.config.num_train_timesteps:
433433
raise ValueError(
434-
f"`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}."
434+
f"`timesteps` must start before `self.config.train_timesteps`:"
435+
f" {self.config.num_train_timesteps}."
435436
)
436437

437438
# Raise warning if timestep schedule does not start with self.config.num_train_timesteps - 1

src/diffusers/training_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def _set_state_dict_into_text_encoder(
241241
"""
242242

243243
text_encoder_state_dict = {
244-
f"{k.replace(prefix, '')}": v for k, v in lora_state_dict.items() if k.startswith(prefix)
244+
f'{k.replace(prefix, "")}': v for k, v in lora_state_dict.items() if k.startswith(prefix)
245245
}
246246
text_encoder_state_dict = convert_state_dict_to_peft(convert_state_dict_to_diffusers(text_encoder_state_dict))
247247
set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name="default")
@@ -578,7 +578,7 @@ def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None:
578578
"""
579579

580580
if self.temp_stored_params is None:
581-
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights to `restore()`")
581+
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`")
582582
if self.foreach:
583583
torch._foreach_copy_(
584584
[param.data for param in parameters], [c_param.data for c_param in self.temp_stored_params]

0 commit comments

Comments
 (0)