Skip to content

Commit 0333064

Browse files
committed
fix(train): cannot extract feature on non-cuda devices (fix #123)
1 parent 55a981d commit 0333064

File tree

4 files changed

+43
-74
lines changed

4 files changed

+43
-74
lines changed

configs/config.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ def __init__(self):
5353
self.instead = ""
5454
self.preprocess_per = 3.7
5555
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
56+
self.default_batch_size = self.get_default_batch_size()
5657

5758
@staticmethod
5859
def load_config_json() -> dict:
@@ -136,6 +137,32 @@ def use_insecure_load():
136137
logging.warning("Using insecure weight loading for fairseq dictionary")
137138
except AttributeError:
138139
pass
140+
141+
@staticmethod
142+
def get_default_batch_size() -> int:
143+
if not torch.cuda.is_available():
144+
#TODO: add non-cuda multicards
145+
return 1
146+
# 判断是否有能用来训练和加速推理的N卡
147+
ngpu = torch.cuda.device_count()
148+
if not ngpu:
149+
return 1
150+
mem = []
151+
if_gpu_ok = False
152+
153+
for i in range(ngpu):
154+
if_gpu_ok = True # 至少有一张能用的N卡
155+
mem.append(
156+
int(
157+
torch.cuda.get_device_properties(i).total_memory
158+
/ 1024 / 1024 / 1024 + 0.4
159+
)
160+
)
161+
if if_gpu_ok:
162+
default_batch_size = min(mem) // 2
163+
else:
164+
default_batch_size = 1
165+
return default_batch_size
139166

140167
def use_fp32_config(self):
141168
for config_file in version_config_list:

infer/lib/rvcmd.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -194,11 +194,11 @@ def download_all_assets(tmpdir: str, version="0.2.5"):
194194
if not architecture:
195195
logger.error(f"architecture {architecture} is not supported")
196196
exit(1)
197+
BASE_URL = "https://github.com/fumiama/RVC-Models-Downloader/releases/download/"
198+
suffix = "zip" if is_win else "tar.gz"
199+
RVCMD_URL = BASE_URL + f"v{version}/rvcmd_{system_type}_{architecture}.{suffix}"
200+
cmdfile = os.path.join(tmpdir, "rvcmd")
197201
try:
198-
BASE_URL = "https://github.com/fumiama/RVC-Models-Downloader/releases/download/"
199-
suffix = "zip" if is_win else "tar.gz"
200-
RVCMD_URL = BASE_URL + f"v{version}/rvcmd_{system_type}_{architecture}.{suffix}"
201-
cmdfile = os.path.join(tmpdir, "rvcmd")
202202
if is_win:
203203
download_and_extract_zip(RVCMD_URL, tmpdir)
204204
cmdfile += ".exe"

infer/modules/train/extract_feature_print.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -10,19 +10,17 @@
1010
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
1111
os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
1212

13+
if len(sys.argv) != 8:
14+
sys.exit(0)
15+
1316
device = sys.argv[1]
1417
n_part = int(sys.argv[2])
1518
i_part = int(sys.argv[3])
16-
if len(sys.argv) == 7:
17-
exp_dir = sys.argv[4]
18-
version = sys.argv[5]
19-
is_half = sys.argv[6].lower() == "true"
20-
else:
21-
i_gpu = sys.argv[4]
22-
exp_dir = sys.argv[5]
23-
os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
24-
version = sys.argv[6]
25-
is_half = sys.argv[7].lower() == "true"
19+
i_gpu = sys.argv[4]
20+
os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
21+
exp_dir = sys.argv[5]
22+
version = sys.argv[6]
23+
is_half = sys.argv[7].lower() == "true"
2624

2725
import fairseq
2826
import numpy as np

web.py

Lines changed: 4 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -78,63 +78,6 @@ def forward_dml(ctx, x, scale):
7878

7979
i18n = I18nAuto()
8080
logger.info(i18n)
81-
# 判断是否有能用来训练和加速推理的N卡
82-
ngpu = torch.cuda.device_count()
83-
gpu_infos = []
84-
mem = []
85-
if_gpu_ok = False
86-
87-
if torch.cuda.is_available() or ngpu != 0:
88-
for i in range(ngpu):
89-
gpu_name = torch.cuda.get_device_name(i)
90-
if any(
91-
value in gpu_name.upper()
92-
for value in [
93-
"10",
94-
"16",
95-
"20",
96-
"30",
97-
"40",
98-
"A2",
99-
"A3",
100-
"A4",
101-
"P4",
102-
"A50",
103-
"500",
104-
"A60",
105-
"70",
106-
"80",
107-
"90",
108-
"M4",
109-
"T4",
110-
"TITAN",
111-
"4060",
112-
"L",
113-
"6000",
114-
]
115-
):
116-
# A10#A100#V100#A40#P40#M40#K80#A4500
117-
if_gpu_ok = True # 至少有一张能用的N卡
118-
gpu_infos.append("%s\t%s" % (i, gpu_name))
119-
mem.append(
120-
int(
121-
torch.cuda.get_device_properties(i).total_memory
122-
/ 1024
123-
/ 1024
124-
/ 1024
125-
+ 0.4
126-
)
127-
)
128-
if if_gpu_ok and len(gpu_infos) > 0:
129-
gpu_info = "\n".join(gpu_infos)
130-
default_batch_size = min(mem) // 2
131-
else:
132-
gpu_info = i18n(
133-
"Unfortunately, there is no compatible GPU available to support your training."
134-
)
135-
default_batch_size = 1
136-
gpus = "-".join([i[0] for i in gpu_infos])
137-
13881

13982
weight_root = os.getenv("weight_root")
14083
weight_uvr5_root = os.getenv("weight_uvr5_root")
@@ -314,6 +257,7 @@ def extract_f0_feature(n_p, f0method, if_f0, exp_dir, version19):
314257
exp_dir=sys.argv[4]
315258
os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu)
316259
"""
260+
gpus = [config.device]
317261
leng = len(gpus)
318262
ps = []
319263
for idx, n_g in enumerate(gpus):
@@ -1201,7 +1145,7 @@ def change_info_(ckpt_path):
12011145
with gr.Column():
12021146
gpu_info9 = gr.Textbox(
12031147
label=i18n("GPU Information"),
1204-
value=gpu_info,
1148+
value=config.device,
12051149
)
12061150
f0method8 = gr.Radio(
12071151
label=i18n(
@@ -1254,7 +1198,7 @@ def change_info_(ckpt_path):
12541198
maximum=40,
12551199
step=1,
12561200
label=i18n("Batch size per GPU"),
1257-
value=default_batch_size,
1201+
value=config.default_batch_size,
12581202
interactive=True,
12591203
)
12601204
if_save_latest13 = gr.Radio(
@@ -1296,7 +1240,7 @@ def change_info_(ckpt_path):
12961240
label=i18n(
12971241
"Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2"
12981242
),
1299-
value=gpus,
1243+
value="0",
13001244
interactive=True,
13011245
)
13021246
sr2.change(

0 commit comments

Comments
 (0)