|
| 1 | +import os,sys,pdb,torch |
| 2 | +now_dir = os.getcwd() |
| 3 | +sys.path.append(now_dir) |
| 4 | +import argparse |
| 5 | +import glob |
| 6 | +import sys |
| 7 | +import torch |
| 8 | +import numpy as np |
| 9 | +from multiprocessing import cpu_count |
| 10 | + |
| 11 | +#### |
| 12 | +#USAGE |
| 13 | +# |
| 14 | +#In your Terminal or CMD or whatever |
| 15 | +#python infer_cli.py [TRANSPOSE_VALUE] "[INPUT_PATH]" "[OUTPUT_PATH]" "[MODEL_PATH]" "[INDEX_FILE_PATH]" "[INFERENCE_DEVICE]" "[METHOD]" |
| 16 | + |
| 17 | +using_cli = False |
| 18 | +device = "cuda:0" |
| 19 | +is_half = False |
| 20 | + |
| 21 | +if(len(sys.argv) > 0): |
| 22 | + f0_up_key=int(sys.argv[1]) #transpose value |
| 23 | + input_path=sys.argv[2] |
| 24 | + output_path=sys.argv[3] |
| 25 | + model_path=sys.argv[4] |
| 26 | + file_index=sys.argv[5] #.index file |
| 27 | + device=sys.argv[6] |
| 28 | + f0_method=sys.argv[7] #pm or harvest or crepe |
| 29 | + |
| 30 | + using_cli = True |
| 31 | + |
| 32 | + #file_index2=sys.argv[8] |
| 33 | + #index_rate=float(sys.argv[10]) #search feature ratio |
| 34 | + #filter_radius=float(sys.argv[11]) #median filter |
| 35 | + #resample_sr=float(sys.argv[12]) #resample audio in post processing |
| 36 | + #rms_mix_rate=float(sys.argv[13]) #search feature |
| 37 | + print(sys.argv) |
| 38 | + |
| 39 | +class Config: |
| 40 | + def __init__(self,device,is_half): |
| 41 | + self.device = device |
| 42 | + self.is_half = is_half |
| 43 | + self.n_cpu = 0 |
| 44 | + self.gpu_name = None |
| 45 | + self.gpu_mem = None |
| 46 | + self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() |
| 47 | + |
| 48 | + def device_config(self) -> tuple: |
| 49 | + if torch.cuda.is_available() and device != "cpu": |
| 50 | + i_device = int(self.device.split(":")[-1]) |
| 51 | + self.gpu_name = torch.cuda.get_device_name(i_device) |
| 52 | + if ( |
| 53 | + ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) |
| 54 | + or "P40" in self.gpu_name.upper() |
| 55 | + or "1060" in self.gpu_name |
| 56 | + or "1070" in self.gpu_name |
| 57 | + or "1080" in self.gpu_name |
| 58 | + ): |
| 59 | + print("16系/10系显卡和P40强制单精度") |
| 60 | + self.is_half = False |
| 61 | + for config_file in ["32k.json", "40k.json", "48k.json"]: |
| 62 | + with open(f"configs/{config_file}", "r") as f: |
| 63 | + strr = f.read().replace("true", "false") |
| 64 | + with open(f"configs/{config_file}", "w") as f: |
| 65 | + f.write(strr) |
| 66 | + with open("trainset_preprocess_pipeline_print.py", "r") as f: |
| 67 | + strr = f.read().replace("3.7", "3.0") |
| 68 | + with open("trainset_preprocess_pipeline_print.py", "w") as f: |
| 69 | + f.write(strr) |
| 70 | + else: |
| 71 | + self.gpu_name = None |
| 72 | + self.gpu_mem = int( |
| 73 | + torch.cuda.get_device_properties(i_device).total_memory |
| 74 | + / 1024 |
| 75 | + / 1024 |
| 76 | + / 1024 |
| 77 | + + 0.4 |
| 78 | + ) |
| 79 | + if self.gpu_mem <= 4: |
| 80 | + with open("trainset_preprocess_pipeline_print.py", "r") as f: |
| 81 | + strr = f.read().replace("3.7", "3.0") |
| 82 | + with open("trainset_preprocess_pipeline_print.py", "w") as f: |
| 83 | + f.write(strr) |
| 84 | + elif torch.backends.mps.is_available(): |
| 85 | + print("没有发现支持的N卡, 使用MPS进行推理") |
| 86 | + self.device = "mps" |
| 87 | + else: |
| 88 | + print("没有发现支持的N卡, 使用CPU进行推理") |
| 89 | + self.device = "cpu" |
| 90 | + self.is_half = False |
| 91 | + |
| 92 | + if self.n_cpu == 0: |
| 93 | + self.n_cpu = cpu_count() |
| 94 | + |
| 95 | + if self.is_half: |
| 96 | + # 6G显存配置 |
| 97 | + x_pad = 3 |
| 98 | + x_query = 10 |
| 99 | + x_center = 60 |
| 100 | + x_max = 65 |
| 101 | + else: |
| 102 | + # 5G显存配置 |
| 103 | + x_pad = 1 |
| 104 | + x_query = 6 |
| 105 | + x_center = 38 |
| 106 | + x_max = 41 |
| 107 | + |
| 108 | + if self.gpu_mem != None and self.gpu_mem <= 4: |
| 109 | + x_pad = 1 |
| 110 | + x_query = 5 |
| 111 | + x_center = 30 |
| 112 | + x_max = 32 |
| 113 | + |
| 114 | + return x_pad, x_query, x_center, x_max |
| 115 | + |
| 116 | +config=Config(device,is_half) |
| 117 | +now_dir=os.getcwd() |
| 118 | +sys.path.append(now_dir) |
| 119 | +from vc_infer_pipeline import VC |
| 120 | +from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono |
| 121 | +from my_utils import load_audio |
| 122 | +from fairseq import checkpoint_utils |
| 123 | +from scipy.io import wavfile |
| 124 | + |
| 125 | +hubert_model=None |
| 126 | +def load_hubert(): |
| 127 | + global hubert_model |
| 128 | + models, _, _ = checkpoint_utils.load_model_ensemble_and_task( |
| 129 | + ["hubert_base.pt"], |
| 130 | + suffix="", |
| 131 | + ) |
| 132 | + hubert_model = models[0] |
| 133 | + hubert_model = hubert_model.to(config.device) |
| 134 | + if config.is_half: |
| 135 | + hubert_model = hubert_model.half() |
| 136 | + else: |
| 137 | + hubert_model = hubert_model.float() |
| 138 | + hubert_model.eval() |
| 139 | + |
| 140 | +def vc_single( |
| 141 | + sid=0, |
| 142 | + input_audio_path=None, |
| 143 | + f0_up_key=0, |
| 144 | + f0_file=None, |
| 145 | + f0_method="pm", |
| 146 | + file_index="", #.index file |
| 147 | + file_index2="", |
| 148 | + # file_big_npy, |
| 149 | + index_rate=1.0, |
| 150 | + filter_radius=3, |
| 151 | + resample_sr=0, |
| 152 | + rms_mix_rate=1.0, |
| 153 | + model_path="", |
| 154 | + output_path="", |
| 155 | + protect=0.33 |
| 156 | +): |
| 157 | + global tgt_sr, net_g, vc, hubert_model, version |
| 158 | + get_vc(model_path) |
| 159 | + if input_audio_path is None: |
| 160 | + return "You need to upload an audio file", None |
| 161 | + |
| 162 | + f0_up_key = int(f0_up_key) |
| 163 | + audio = load_audio(input_audio_path, 16000) |
| 164 | + audio_max = np.abs(audio).max() / 0.95 |
| 165 | + |
| 166 | + if audio_max > 1: |
| 167 | + audio /= audio_max |
| 168 | + times = [0, 0, 0] |
| 169 | + |
| 170 | + if hubert_model == None: |
| 171 | + load_hubert() |
| 172 | + |
| 173 | + if_f0 = cpt.get("f0", 1) |
| 174 | + |
| 175 | + file_index = ( |
| 176 | + ( |
| 177 | + file_index.strip(" ") |
| 178 | + .strip('"') |
| 179 | + .strip("\n") |
| 180 | + .strip('"') |
| 181 | + .strip(" ") |
| 182 | + .replace("trained", "added") |
| 183 | + ) |
| 184 | + if file_index != "" |
| 185 | + else file_index2 |
| 186 | + ) |
| 187 | + |
| 188 | + audio_opt = vc.pipeline( |
| 189 | + hubert_model, |
| 190 | + net_g, |
| 191 | + sid, |
| 192 | + audio, |
| 193 | + input_audio_path, |
| 194 | + times, |
| 195 | + f0_up_key, |
| 196 | + f0_method, |
| 197 | + file_index, |
| 198 | + # file_big_npy, |
| 199 | + index_rate, |
| 200 | + if_f0, |
| 201 | + filter_radius, |
| 202 | + tgt_sr, |
| 203 | + resample_sr, |
| 204 | + rms_mix_rate, |
| 205 | + version, |
| 206 | + f0_file=f0_file, |
| 207 | + protect=protect |
| 208 | + ) |
| 209 | + wavfile.write(output_path, tgt_sr, audio_opt) |
| 210 | + return('processed') |
| 211 | + |
| 212 | + |
| 213 | +def get_vc(model_path): |
| 214 | + global n_spk,tgt_sr,net_g,vc,cpt,device,is_half, version |
| 215 | + print("loading pth %s"%model_path) |
| 216 | + cpt = torch.load(model_path, map_location="cpu") |
| 217 | + tgt_sr = cpt["config"][-1] |
| 218 | + cpt["config"][-3]=cpt["weight"]["emb_g.weight"].shape[0]#n_spk |
| 219 | + if_f0=cpt.get("f0",1) |
| 220 | + version = cpt.get("version", "v1") |
| 221 | + if(if_f0==1): |
| 222 | + net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) |
| 223 | + else: |
| 224 | + net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) |
| 225 | + del net_g.enc_q |
| 226 | + print(net_g.load_state_dict(cpt["weight"], strict=False)) |
| 227 | + net_g.eval().to(device) |
| 228 | + if (is_half):net_g = net_g.half() |
| 229 | + else:net_g = net_g.float() |
| 230 | + vc = VC(tgt_sr, config) |
| 231 | + n_spk=cpt["config"][-3] |
| 232 | + # return {"visible": True,"maximum": n_spk, "__type__": "update"} |
| 233 | + |
| 234 | +if(using_cli): |
| 235 | + vc_single(sid=0,input_audio_path=input_path,f0_up_key=f0_up_key,f0_file=None,f0_method=f0_method,file_index=file_index,file_index2="",index_rate=1,filter_radius=3,resample_sr=0,rms_mix_rate=0,model_path=model_path,output_path=output_path) |
0 commit comments