Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion configs/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from .config import singleton_variable, Config, CPUConfig
from .config import Singleton, Config, CPUConfig
35 changes: 24 additions & 11 deletions configs/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,16 @@
]


def singleton_variable(func):
def wrapper(*args, **kwargs):
if wrapper.instance is None:
wrapper.instance = func(*args, **kwargs)
return wrapper.instance
class Singleton(type):
_instances = {}

wrapper.instance = None
return wrapper
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]


@singleton_variable
class Config:
class Config(metaclass=Singleton):
def __init__(self):
self.device = "cuda:0"
self.is_half = True
Expand Down Expand Up @@ -129,6 +127,16 @@ def has_xpu() -> bool:
else:
return False

@staticmethod
def use_insecure_load():
try:
from fairseq.data.dictionary import Dictionary

torch.serialization.add_safe_globals([Dictionary])
logging.warning("Using insecure weight loading for fairseq dictionary")
except AttributeError:
pass

def use_fp32_config(self):
for config_file in version_config_list:
self.json_config[config_file]["train"]["fp16_run"] = False
Expand Down Expand Up @@ -210,15 +218,20 @@ def device_config(self):
else:
if self.instead:
logger.info(f"Use {self.instead} instead")

logger.info(
"Half-precision floating-point: %s, device: %s"
% (self.is_half, self.device)
)

# Check if the pytorch is 2.6 or higher
if tuple(map(int, torch.__version__.split("+")[0].split("."))) >= (2, 6, 0):
self.use_insecure_load()

return x_pad, x_query, x_center, x_max


@singleton_variable
class CPUConfig:
class CPUConfig(metaclass=Singleton):
def __init__(self):
self.device = "cpu"
self.is_half = False
Expand Down
5 changes: 2 additions & 3 deletions i18n/i18n.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json
import locale
import os
from configs import singleton_variable
from configs import Singleton


def load_language_list(language):
Expand All @@ -10,8 +10,7 @@ def load_language_list(language):
return language_list


@singleton_variable
class I18nAuto:
class I18nAuto(metaclass=Singleton):
def __init__(self, language=None):
if language in ["Auto", None]:
language = locale.getdefaultlocale(
Expand Down
4 changes: 4 additions & 0 deletions infer/modules/train/extract_f0_print.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ def go(self, paths, f0_method):
# exp_dir=r"E:\codes\py39\dataset\mi-test"
# n_p=16
# f = open("%s/log_extract_f0.log"%exp_dir, "w")

from configs import Config
Config.use_insecure_load()

printt(" ".join(sys.argv))
featureInput = FeatureInput(is_half, device)
paths = []
Expand Down
4 changes: 4 additions & 0 deletions infer/modules/train/extract_feature_print.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,15 @@
os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
version = sys.argv[6]
is_half = sys.argv[7].lower() == "true"

import fairseq
import numpy as np
import torch
import torch.nn.functional as F

from configs import Config
Config.use_insecure_load()

if "privateuseone" not in device:
device = "cpu"
if torch.cuda.is_available():
Expand Down
2 changes: 2 additions & 0 deletions infer/modules/train/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,4 +142,6 @@ def preprocess_trainset(inp_root, sr, n_p, exp_dir, per):


if __name__ == "__main__":
from configs import Config
Config.use_insecure_load()
preprocess_trainset(inp_root, sr, n_p, exp_dir, per)
14 changes: 8 additions & 6 deletions infer/modules/vc/hash.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@
import torch
import hashlib
import pathlib

from functools import lru_cache
from scipy.fft import fft
from pybase16384 import encode_to_string, decode_from_string

from configs import CPUConfig, singleton_variable
from configs import CPUConfig
from rvc.synthesizer import get_synthesizer

from .pipeline import Pipeline
Expand All @@ -29,27 +31,27 @@ def __exit__(self, type, value, traceback):
expand_factor = 65536 * 8


@singleton_variable
@lru_cache(None) # None 表示无限缓存
def original_audio_storage():
return np.load(pathlib.Path(__file__).parent / "lgdsng.npz")


@singleton_variable
@lru_cache(None)
def original_audio():
return original_audio_storage()["a"]


@singleton_variable
@lru_cache(None)
def original_audio_time_minus():
return original_audio_storage()["t"]


@singleton_variable
@lru_cache(None)
def original_audio_freq_minus():
return original_audio_storage()["f"]


@singleton_variable
@lru_cache(None)
def original_rmvpe_f0():
x = original_audio_storage()
return x["pitch"], x["pitchf"]
Expand Down