Skip to content

Commit f8d4891

Browse files
committed
additional styling
1 parent 57c15f9 commit f8d4891

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

comfy/ldm/higgsv2/preprocess.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -366,13 +366,13 @@ def convert_nan_to_none(obj):
366366
audio_contents.append(content)
367367
if role == "user" or role == "system":
368368
text_tokens = tokenizer.encode(
369-
f"<|audio_bos|><|AUDIO|><|audio_eos|>",
369+
"<|audio_bos|><|AUDIO|><|audio_eos|>",
370370
add_special_tokens=False,
371371
)
372372
input_tokens.extend(text_tokens)
373373
elif role == "assistant":
374374
text_tokens = tokenizer.encode(
375-
f"<|audio_out_bos|><|AUDIO_OUT|><|audio_eos|>",
375+
"<|audio_out_bos|><|AUDIO_OUT|><|audio_eos|>",
376376
add_special_tokens=False,
377377
)
378378
input_tokens.extend(text_tokens)
@@ -587,7 +587,7 @@ def __call__(self, batch: List[ChatMLDatasetSample]):
587587
# I tried to remove the for-loop in original implementation
588588
# but to do batching with padding caused problem so I turned it into a list compre.
589589
lengths = [seg.shape[1] for seg in audio_in_ids_l]
590-
aug_lengths = [l + 2 for l in lengths]
590+
aug_lengths = [length + 2 for length in lengths]
591591
audio_in_ids_start = torch.cumsum(
592592
torch.tensor([0] + aug_lengths[:-1], dtype=torch.long), dim=0
593593
)

comfy/ldm/higgsv2/tokenizer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import os
21
import math
32
import torch
43
import torch.nn as nn

0 commit comments

Comments
 (0)