Skip to content

Commit 195e0b0

Browse files
Remove useless code. (#10223)
1 parent 187f436 commit 195e0b0

File tree

1 file changed

+0
-11
lines changed

1 file changed

+0
-11
lines changed

comfy/ldm/ace/vae/music_dcae_pipeline.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,6 @@ def __init__(self, source_sample_rate=None, dcae_config={}, vocoder_config={}):
2323
else:
2424
self.source_sample_rate = source_sample_rate
2525

26-
# self.resampler = torchaudio.transforms.Resample(source_sample_rate, 44100)
27-
2826
self.transform = transforms.Compose([
2927
transforms.Normalize(0.5, 0.5),
3028
])
@@ -37,10 +35,6 @@ def __init__(self, source_sample_rate=None, dcae_config={}, vocoder_config={}):
3735
self.scale_factor = 0.1786
3836
self.shift_factor = -1.9091
3937

40-
def load_audio(self, audio_path):
41-
audio, sr = torchaudio.load(audio_path)
42-
return audio, sr
43-
4438
def forward_mel(self, audios):
4539
mels = []
4640
for i in range(len(audios)):
@@ -73,10 +67,8 @@ def encode(self, audios, audio_lengths=None, sr=None):
7367
latent = self.dcae.encoder(mel.unsqueeze(0))
7468
latents.append(latent)
7569
latents = torch.cat(latents, dim=0)
76-
# latent_lengths = (audio_lengths / sr * 44100 / 512 / self.time_dimention_multiple).long()
7770
latents = (latents - self.shift_factor) * self.scale_factor
7871
return latents
79-
# return latents, latent_lengths
8072

8173
@torch.no_grad()
8274
def decode(self, latents, audio_lengths=None, sr=None):
@@ -91,17 +83,14 @@ def decode(self, latents, audio_lengths=None, sr=None):
9183
wav = self.vocoder.decode(mels[0]).squeeze(1)
9284

9385
if sr is not None:
94-
# resampler = torchaudio.transforms.Resample(44100, sr).to(latents.device).to(latents.dtype)
9586
wav = torchaudio.functional.resample(wav, 44100, sr)
96-
# wav = resampler(wav)
9787
else:
9888
sr = 44100
9989
pred_wavs.append(wav)
10090

10191
if audio_lengths is not None:
10292
pred_wavs = [wav[:, :length].cpu() for wav, length in zip(pred_wavs, audio_lengths)]
10393
return torch.stack(pred_wavs)
104-
# return sr, pred_wavs
10594

10695
def forward(self, audios, audio_lengths=None, sr=None):
10796
latents, latent_lengths = self.encode(audios=audios, audio_lengths=audio_lengths, sr=sr)

0 commit comments

Comments
 (0)