1
- import torch , os , traceback , sys , warnings , shutil , numpy as np
1
+ import os
2
+ import shutil
3
+ import sys
4
+ import traceback
5
+ import warnings
6
+
7
+ import numpy as np
8
+ import torch
2
9
3
10
os .environ ["no_proxy" ] = "localhost, 127.0.0.1, ::1"
11
+ import logging
4
12
import threading
5
- from time import sleep
13
+ from random import shuffle
6
14
from subprocess import Popen
15
+ from time import sleep
16
+
7
17
import faiss
8
- from random import shuffle
18
+ import ffmpeg
19
+ import gradio as gr
20
+ import soundfile as sf
21
+ from config import Config
22
+ from fairseq import checkpoint_utils
23
+ from i18n import I18nAuto
24
+ from infer_pack .models import (
25
+ SynthesizerTrnMs256NSFsid ,
26
+ SynthesizerTrnMs256NSFsid_nono ,
27
+ SynthesizerTrnMs768NSFsid ,
28
+ SynthesizerTrnMs768NSFsid_nono ,
29
+ )
30
+ from infer_pack .models_onnx import SynthesizerTrnMsNSFsidM
31
+ from infer_uvr5 import _audio_pre_ , _audio_pre_new
32
+ from MDXNet import MDXNetDereverb
33
+ from my_utils import load_audio
34
+ from train .process_ckpt import change_info , extract_small_model , merge , show_info
35
+ from vc_infer_pipeline import VC
36
+
37
+ # from trainset_preprocess_pipeline import PreProcess
38
+
39
+ logging .getLogger ("numba" ).setLevel (logging .WARNING )
9
40
10
41
now_dir = os .getcwd ()
11
42
sys .path .append (now_dir )
19
50
os .environ ["TEMP" ] = tmp
20
51
warnings .filterwarnings ("ignore" )
21
52
torch .manual_seed (114514 )
22
- from i18n import I18nAuto
23
- import ffmpeg
24
- from MDXNet import MDXNetDereverb
25
53
54
+
55
+ config = Config ()
26
56
i18n = I18nAuto ()
27
57
i18n .print ()
28
58
# 判断是否有能用来训练和加速推理的N卡
29
59
ngpu = torch .cuda .device_count ()
30
60
gpu_infos = []
31
61
mem = []
32
- if (not torch .cuda .is_available ()) or ngpu == 0 :
33
- if_gpu_ok = False
34
- else :
35
- if_gpu_ok = False
62
+ if_gpu_ok = False
63
+
64
+ if torch .cuda .is_available () or ngpu != 0 :
36
65
for i in range (ngpu ):
37
66
gpu_name = torch .cuda .get_device_name (i )
38
- if (
39
- "10" in gpu_name
40
- or "16" in gpu_name
41
- or "20" in gpu_name
42
- or "30" in gpu_name
43
- or "40" in gpu_name
44
- or "A2" in gpu_name .upper ()
45
- or "A3" in gpu_name .upper ()
46
- or "A4" in gpu_name .upper ()
47
- or "P4" in gpu_name .upper ()
48
- or "A50" in gpu_name .upper ()
49
- or "A60" in gpu_name .upper ()
50
- or "70" in gpu_name
51
- or "80" in gpu_name
52
- or "90" in gpu_name
53
- or "M4" in gpu_name .upper ()
54
- or "T4" in gpu_name .upper ()
55
- or "TITAN" in gpu_name .upper ()
56
- ): # A10#A100#V100#A40#P40#M40#K80#A4500
67
+ if any (
68
+ value in gpu_name .upper ()
69
+ for value in [
70
+ "10" ,
71
+ "16" ,
72
+ "20" ,
73
+ "30" ,
74
+ "40" ,
75
+ "A2" ,
76
+ "A3" ,
77
+ "A4" ,
78
+ "P4" ,
79
+ "A50" ,
80
+ "A60" ,
81
+ "70" ,
82
+ "80" ,
83
+ "90" ,
84
+ "M4" ,
85
+ "T4" ,
86
+ "TITAN" ,
87
+ ]
88
+ ):
89
+ # A10#A100#V100#A40#P40#M40#K80#A4500
57
90
if_gpu_ok = True # 至少有一张能用的N卡
58
91
gpu_infos .append ("%s\t %s" % (i , gpu_name ))
59
92
mem .append (
65
98
+ 0.4
66
99
)
67
100
)
68
- if if_gpu_ok == True and len (gpu_infos ) > 0 :
101
+ if if_gpu_ok and len (gpu_infos ) > 0 :
69
102
gpu_info = "\n " .join (gpu_infos )
70
103
default_batch_size = min (mem ) // 2
71
104
else :
72
105
gpu_info = i18n ("很遗憾您这没有能用的显卡来支持您训练" )
73
106
default_batch_size = 1
74
107
gpus = "-" .join ([i [0 ] for i in gpu_infos ])
75
- from infer_pack .models import (
76
- SynthesizerTrnMs256NSFsid ,
77
- SynthesizerTrnMs256NSFsid_nono ,
78
- SynthesizerTrnMs768NSFsid ,
79
- SynthesizerTrnMs768NSFsid_nono ,
80
- )
81
- import soundfile as sf
82
- from fairseq import checkpoint_utils
83
- import gradio as gr
84
- import logging
85
- from vc_infer_pipeline import VC
86
- from config import Config
87
- from infer_uvr5 import _audio_pre_ , _audio_pre_new
88
- from my_utils import load_audio
89
- from train .process_ckpt import show_info , change_info , merge , extract_small_model
90
-
91
- config = Config ()
92
- # from trainset_preprocess_pipeline import PreProcess
93
- logging .getLogger ("numba" ).setLevel (logging .WARNING )
94
108
95
109
96
110
class ToolButton (gr .Button , gr .components .FormComponent ):
@@ -164,7 +178,7 @@ def vc_single(
164
178
if audio_max > 1 :
165
179
audio /= audio_max
166
180
times = [0 , 0 , 0 ]
167
- if hubert_model == None :
181
+ if not hubert_model :
168
182
load_hubert ()
169
183
if_f0 = cpt .get ("f0" , 1 )
170
184
file_index = (
@@ -203,7 +217,7 @@ def vc_single(
203
217
protect ,
204
218
f0_file = f0_file ,
205
219
)
206
- if resample_sr >= 16000 and tgt_sr != resample_sr :
220
+ if tgt_sr != resample_sr >= 16000 :
207
221
tgt_sr = resample_sr
208
222
index_info = (
209
223
"Using index:%s." % file_index
@@ -385,7 +399,7 @@ def get_vc(sid):
385
399
global n_spk , tgt_sr , net_g , vc , cpt , version
386
400
if sid == "" or sid == []:
387
401
global hubert_model
388
- if hubert_model != None : # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
402
+ if hubert_model is not None : # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
389
403
print ("clean_empty_cache" )
390
404
del net_g , n_spk , vc , hubert_model , tgt_sr # ,cpt
391
405
hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
@@ -471,7 +485,7 @@ def clean():
471
485
472
486
def if_done (done , p ):
473
487
while 1 :
474
- if p .poll () == None :
488
+ if p .poll () is None :
475
489
sleep (0.5 )
476
490
else :
477
491
break
@@ -484,7 +498,7 @@ def if_done_multi(done, ps):
484
498
# 只要有一个进程未结束都不停
485
499
flag = 1
486
500
for p in ps :
487
- if p .poll () == None :
501
+ if p .poll () is None :
488
502
flag = 0
489
503
sleep (0.5 )
490
504
break
@@ -519,7 +533,7 @@ def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
519
533
with open ("%s/logs/%s/preprocess.log" % (now_dir , exp_dir ), "r" ) as f :
520
534
yield (f .read ())
521
535
sleep (1 )
522
- if done [0 ] == True :
536
+ if done [0 ]:
523
537
break
524
538
with open ("%s/logs/%s/preprocess.log" % (now_dir , exp_dir ), "r" ) as f :
525
539
log = f .read ()
@@ -557,7 +571,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19):
557
571
) as f :
558
572
yield (f .read ())
559
573
sleep (1 )
560
- if done [0 ] == True :
574
+ if done [0 ]:
561
575
break
562
576
with open ("%s/logs/%s/extract_f0_feature.log" % (now_dir , exp_dir ), "r" ) as f :
563
577
log = f .read ()
@@ -605,7 +619,7 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19):
605
619
with open ("%s/logs/%s/extract_f0_feature.log" % (now_dir , exp_dir ), "r" ) as f :
606
620
yield (f .read ())
607
621
sleep (1 )
608
- if done [0 ] == True :
622
+ if done [0 ]:
609
623
break
610
624
with open ("%s/logs/%s/extract_f0_feature.log" % (now_dir , exp_dir ), "r" ) as f :
611
625
log = f .read ()
@@ -616,51 +630,98 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19):
616
630
def change_sr2 (sr2 , if_f0_3 , version19 ):
617
631
path_str = "" if version19 == "v1" else "_v2"
618
632
f0_str = "f0" if if_f0_3 else ""
619
- if_pretrained_generator_exist = os .access ("pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ), os .F_OK )
620
- if_pretrained_discriminator_exist = os .access ("pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ), os .F_OK )
621
- if (if_pretrained_generator_exist == False ):
622
- print ("pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ), "not exist, will not use pretrained model" )
623
- if (if_pretrained_discriminator_exist == False ):
624
- print ("pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ), "not exist, will not use pretrained model" )
633
+ if_pretrained_generator_exist = os .access (
634
+ "pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ), os .F_OK
635
+ )
636
+ if_pretrained_discriminator_exist = os .access (
637
+ "pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ), os .F_OK
638
+ )
639
+ if if_pretrained_generator_exist is not False :
640
+ print (
641
+ "pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ),
642
+ "not exist, will not use pretrained model" ,
643
+ )
644
+ if if_pretrained_discriminator_exist is not False :
645
+ print (
646
+ "pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ),
647
+ "not exist, will not use pretrained model" ,
648
+ )
625
649
return (
626
- ("pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 )) if if_pretrained_generator_exist else "" ,
627
- ("pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 )) if if_pretrained_discriminator_exist else "" ,
628
- {"visible" : True , "__type__" : "update" }
650
+ "pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 )
651
+ if if_pretrained_generator_exist
652
+ else "" ,
653
+ "pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 )
654
+ if if_pretrained_discriminator_exist
655
+ else "" ,
656
+ {"visible" : True , "__type__" : "update" },
629
657
)
630
658
659
+
631
660
def change_version19 (sr2 , if_f0_3 , version19 ):
632
661
path_str = "" if version19 == "v1" else "_v2"
633
662
f0_str = "f0" if if_f0_3 else ""
634
- if_pretrained_generator_exist = os .access ("pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ), os .F_OK )
635
- if_pretrained_discriminator_exist = os .access ("pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ), os .F_OK )
636
- if (if_pretrained_generator_exist == False ):
637
- print ("pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ), "not exist, will not use pretrained model" )
638
- if (if_pretrained_discriminator_exist == False ):
639
- print ("pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ), "not exist, will not use pretrained model" )
663
+ if_pretrained_generator_exist = os .access (
664
+ "pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ), os .F_OK
665
+ )
666
+ if_pretrained_discriminator_exist = os .access (
667
+ "pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ), os .F_OK
668
+ )
669
+ if not if_pretrained_generator_exist :
670
+ print (
671
+ "pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 ),
672
+ "not exist, will not use pretrained model" ,
673
+ )
674
+ if not if_pretrained_discriminator_exist :
675
+ print (
676
+ "pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 ),
677
+ "not exist, will not use pretrained model" ,
678
+ )
640
679
return (
641
- ("pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 )) if if_pretrained_generator_exist else "" ,
642
- ("pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 )) if if_pretrained_discriminator_exist else "" ,
680
+ "pretrained%s/%sG%s.pth" % (path_str , f0_str , sr2 )
681
+ if if_pretrained_generator_exist
682
+ else "" ,
683
+ "pretrained%s/%sD%s.pth" % (path_str , f0_str , sr2 )
684
+ if if_pretrained_discriminator_exist
685
+ else "" ,
643
686
)
644
687
645
688
646
689
def change_f0 (if_f0_3 , sr2 , version19 ): # f0method8,pretrained_G14,pretrained_D15
647
690
path_str = "" if version19 == "v1" else "_v2"
648
- if_pretrained_generator_exist = os .access ("pretrained%s/f0G%s.pth" % (path_str , sr2 ), os .F_OK )
649
- if_pretrained_discriminator_exist = os .access ("pretrained%s/f0D%s.pth" % (path_str , sr2 ), os .F_OK )
650
- if (if_pretrained_generator_exist == False ):
651
- print ("pretrained%s/f0G%s.pth" % (path_str , sr2 ), "not exist, will not use pretrained model" )
652
- if (if_pretrained_discriminator_exist == False ):
653
- print ("pretrained%s/f0D%s.pth" % (path_str , sr2 ), "not exist, will not use pretrained model" )
691
+ if_pretrained_generator_exist = os .access (
692
+ "pretrained%s/f0G%s.pth" % (path_str , sr2 ), os .F_OK
693
+ )
694
+ if_pretrained_discriminator_exist = os .access (
695
+ "pretrained%s/f0D%s.pth" % (path_str , sr2 ), os .F_OK
696
+ )
697
+ if not if_pretrained_generator_exist :
698
+ print (
699
+ "pretrained%s/f0G%s.pth" % (path_str , sr2 ),
700
+ "not exist, will not use pretrained model" ,
701
+ )
702
+ if not if_pretrained_discriminator_exist :
703
+ print (
704
+ "pretrained%s/f0D%s.pth" % (path_str , sr2 ),
705
+ "not exist, will not use pretrained model" ,
706
+ )
654
707
if if_f0_3 :
655
708
return (
656
709
{"visible" : True , "__type__" : "update" },
657
- "pretrained%s/f0G%s.pth" % (path_str , sr2 ) if if_pretrained_generator_exist else "" ,
658
- "pretrained%s/f0D%s.pth" % (path_str , sr2 ) if if_pretrained_discriminator_exist else "" ,
710
+ "pretrained%s/f0G%s.pth" % (path_str , sr2 )
711
+ if if_pretrained_generator_exist
712
+ else "" ,
713
+ "pretrained%s/f0D%s.pth" % (path_str , sr2 )
714
+ if if_pretrained_discriminator_exist
715
+ else "" ,
659
716
)
660
717
return (
661
718
{"visible" : False , "__type__" : "update" },
662
- ("pretrained%s/G%s.pth" % (path_str , sr2 )) if if_pretrained_generator_exist else "" ,
663
- ("pretrained%s/D%s.pth" % (path_str , sr2 )) if if_pretrained_discriminator_exist else "" ,
719
+ ("pretrained%s/G%s.pth" % (path_str , sr2 ))
720
+ if if_pretrained_generator_exist
721
+ else "" ,
722
+ ("pretrained%s/D%s.pth" % (path_str , sr2 ))
723
+ if if_pretrained_discriminator_exist
724
+ else "" ,
664
725
)
665
726
666
727
@@ -809,7 +870,7 @@ def train_index(exp_dir1, version19):
809
870
if version19 == "v1"
810
871
else "%s/3_feature768" % (exp_dir )
811
872
)
812
- if os .path .exists (feature_dir ) == False :
873
+ if not os .path .exists (feature_dir ):
813
874
return "请先进行特征提取!"
814
875
listdir_res = list (os .listdir (feature_dir ))
815
876
if len (listdir_res ) == 0 :
@@ -1014,7 +1075,7 @@ def get_info_str(strr):
1014
1075
if gpus16 :
1015
1076
cmd = (
1016
1077
config .python_cmd
1017
- + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1078
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1018
1079
% (
1019
1080
exp_dir1 ,
1020
1081
sr2 ,
@@ -1098,10 +1159,7 @@ def get_info_str(strr):
1098
1159
1099
1160
# ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
1100
1161
def change_info_ (ckpt_path ):
1101
- if (
1102
- os .path .exists (ckpt_path .replace (os .path .basename (ckpt_path ), "train.log" ))
1103
- == False
1104
- ):
1162
+ if not os .path .exists (ckpt_path .replace (os .path .basename (ckpt_path ), "train.log" )):
1105
1163
return {"__type__" : "update" }, {"__type__" : "update" }, {"__type__" : "update" }
1106
1164
try :
1107
1165
with open (
@@ -1116,8 +1174,6 @@ def change_info_(ckpt_path):
1116
1174
return {"__type__" : "update" }, {"__type__" : "update" }, {"__type__" : "update" }
1117
1175
1118
1176
1119
- from infer_pack .models_onnx import SynthesizerTrnMsNSFsidM
1120
-
1121
1177
1122
1178
def export_onnx (ModelPath , ExportedPath ):
1123
1179
cpt = torch .load (ModelPath , map_location = "cpu" )
0 commit comments