|
| 1 | +import phonemizer |
| 2 | +import re |
| 3 | +import torch |
| 4 | + |
| 5 | + |
| 6 | +def split_num(num): |
| 7 | + num = num.group() |
| 8 | + if "." in num: |
| 9 | + return num |
| 10 | + elif ":" in num: |
| 11 | + h, m = [int(n) for n in num.split(":")] |
| 12 | + if m == 0: |
| 13 | + return f"{h} o'clock" |
| 14 | + elif m < 10: |
| 15 | + return f"{h} oh {m}" |
| 16 | + return f"{h} {m}" |
| 17 | + year = int(num[:4]) |
| 18 | + if year < 1100 or year % 1000 < 10: |
| 19 | + return num |
| 20 | + left, right = num[:2], int(num[2:4]) |
| 21 | + s = "s" if num.endswith("s") else "" |
| 22 | + if 100 <= year % 1000 <= 999: |
| 23 | + if right == 0: |
| 24 | + return f"{left} hundred{s}" |
| 25 | + elif right < 10: |
| 26 | + return f"{left} oh {right}{s}" |
| 27 | + return f"{left} {right}{s}" |
| 28 | + |
| 29 | + |
| 30 | +def flip_money(m): |
| 31 | + m = m.group() |
| 32 | + bill = "dollar" if m[0] == "$" else "pound" |
| 33 | + if m[-1].isalpha(): |
| 34 | + return f"{m[1:]} {bill}s" |
| 35 | + elif "." not in m: |
| 36 | + s = "" if m[1:] == "1" else "s" |
| 37 | + return f"{m[1:]} {bill}{s}" |
| 38 | + b, c = m[1:].split(".") |
| 39 | + s = "" if b == "1" else "s" |
| 40 | + c = int(c.ljust(2, "0")) |
| 41 | + coins = ( |
| 42 | + f"cent{'' if c == 1 else 's'}" |
| 43 | + if m[0] == "$" |
| 44 | + else ("penny" if c == 1 else "pence") |
| 45 | + ) |
| 46 | + return f"{b} {bill}{s} and {c} {coins}" |
| 47 | + |
| 48 | + |
| 49 | +def point_num(num): |
| 50 | + a, b = num.group().split(".") |
| 51 | + return " point ".join([a, " ".join(b)]) |
| 52 | + |
| 53 | + |
| 54 | +def normalize_text(text): |
| 55 | + text = text.replace(chr(8216), "'").replace(chr(8217), "'") |
| 56 | + text = text.replace("«", chr(8220)).replace("»", chr(8221)) |
| 57 | + text = text.replace(chr(8220), '"').replace(chr(8221), '"') |
| 58 | + text = text.replace("(", "«").replace(")", "»") |
| 59 | + for a, b in zip("、。!,:;?", ",.!,:;?"): |
| 60 | + text = text.replace(a, b + " ") |
| 61 | + text = re.sub(r"[^\S \n]", " ", text) |
| 62 | + text = re.sub(r" +", " ", text) |
| 63 | + text = re.sub(r"(?<=\n) +(?=\n)", "", text) |
| 64 | + text = re.sub(r"\bD[Rr]\.(?= [A-Z])", "Doctor", text) |
| 65 | + text = re.sub(r"\b(?:Mr\.|MR\.(?= [A-Z]))", "Mister", text) |
| 66 | + text = re.sub(r"\b(?:Ms\.|MS\.(?= [A-Z]))", "Miss", text) |
| 67 | + text = re.sub(r"\b(?:Mrs\.|MRS\.(?= [A-Z]))", "Mrs", text) |
| 68 | + text = re.sub(r"\betc\.(?! [A-Z])", "etc", text) |
| 69 | + text = re.sub(r"(?i)\b(y)eah?\b", r"\1e'a", text) |
| 70 | + text = re.sub( |
| 71 | + r"\d*\.\d+|\b\d{4}s?\b|(?<!:)\b(?:[1-9]|1[0-2]):[0-5]\d\b(?!:)", split_num, text |
| 72 | + ) |
| 73 | + text = re.sub(r"(?<=\d),(?=\d)", "", text) |
| 74 | + text = re.sub( |
| 75 | + r"(?i)[$£]\d+(?:\.\d+)?(?: hundred| thousand| (?:[bm]|tr)illion)*\b|[$£]\d+\.\d\d?\b", |
| 76 | + flip_money, |
| 77 | + text, |
| 78 | + ) |
| 79 | + text = re.sub(r"\d*\.\d+", point_num, text) |
| 80 | + text = re.sub(r"(?<=\d)-(?=\d)", " to ", text) |
| 81 | + text = re.sub(r"(?<=\d)S", " S", text) |
| 82 | + text = re.sub(r"(?<=[BCDFGHJ-NP-TV-Z])'?s\b", "'S", text) |
| 83 | + text = re.sub(r"(?<=X')S\b", "s", text) |
| 84 | + text = re.sub( |
| 85 | + r"(?:[A-Za-z]\.){2,} [a-z]", lambda m: m.group().replace(".", "-"), text |
| 86 | + ) |
| 87 | + text = re.sub(r"(?i)(?<=[A-Z])\.(?=[A-Z])", "-", text) |
| 88 | + return text.strip() |
| 89 | + |
| 90 | + |
| 91 | +def get_vocab(): |
| 92 | + _pad = "$" |
| 93 | + _punctuation = ';:,.!?¡¿—…"«»“” ' |
| 94 | + _letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" |
| 95 | + _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" |
| 96 | + symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa) |
| 97 | + dicts = {} |
| 98 | + for i in range(len((symbols))): |
| 99 | + dicts[symbols[i]] = i |
| 100 | + return dicts |
| 101 | + |
| 102 | + |
| 103 | +VOCAB = get_vocab() |
| 104 | + |
| 105 | + |
| 106 | +def tokenize(ps): |
| 107 | + return [i for i in map(VOCAB.get, ps) if i is not None] |
| 108 | + |
| 109 | + |
| 110 | +phonemizers = dict( |
| 111 | + a=phonemizer.backend.EspeakBackend( |
| 112 | + language="en-us", preserve_punctuation=True, with_stress=True |
| 113 | + ), |
| 114 | + b=phonemizer.backend.EspeakBackend( |
| 115 | + language="en-gb", preserve_punctuation=True, with_stress=True |
| 116 | + ), |
| 117 | +) |
| 118 | + |
| 119 | + |
| 120 | +def phonemize(text, lang, norm=True): |
| 121 | + if norm: |
| 122 | + text = normalize_text(text) |
| 123 | + ps = phonemizers[lang].phonemize([text]) |
| 124 | + ps = ps[0] if ps else "" |
| 125 | + # https://en.wiktionary.org/wiki/kokoro#English |
| 126 | + ps = ps.replace("kəkˈoːɹoʊ", "kˈoʊkəɹoʊ").replace("kəkˈɔːɹəʊ", "kˈəʊkəɹəʊ") |
| 127 | + ps = ps.replace("ʲ", "j").replace("r", "ɹ").replace("x", "k").replace("ɬ", "l") |
| 128 | + ps = re.sub(r"(?<=[a-zɹː])(?=hˈʌndɹɪd)", " ", ps) |
| 129 | + ps = re.sub(r' z(?=[;:,.!?¡¿—…"«»“” ]|$)', "z", ps) |
| 130 | + if lang == "a": |
| 131 | + ps = re.sub(r"(?<=nˈaɪn)ti(?!ː)", "di", ps) |
| 132 | + ps = "".join(filter(lambda p: p in VOCAB, ps)) |
| 133 | + return ps.strip() |
| 134 | + |
| 135 | + |
| 136 | +def length_to_mask(lengths): |
| 137 | + mask = ( |
| 138 | + torch.arange(lengths.max()) |
| 139 | + .unsqueeze(0) |
| 140 | + .expand(lengths.shape[0], -1) |
| 141 | + .type_as(lengths) |
| 142 | + ) |
| 143 | + mask = torch.gt(mask + 1, lengths.unsqueeze(1)) |
| 144 | + return mask |
| 145 | + |
| 146 | + |
| 147 | +@torch.no_grad() |
| 148 | +def forward(model, tokens, ref_s, speed): |
| 149 | + device = ref_s.device |
| 150 | + tokens = torch.LongTensor([[0, *tokens, 0]]).to(device) |
| 151 | + input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device) |
| 152 | + text_mask = length_to_mask(input_lengths).to(device) |
| 153 | + bert_dur = model["bert"](tokens, attention_mask=(~text_mask).int()) |
| 154 | + d_en = model["bert_encoder"](bert_dur).transpose(-1, -2) |
| 155 | + s = ref_s[:, 128:] |
| 156 | + d = model["predictor"].text_encoder(d_en, s, input_lengths, text_mask) |
| 157 | + x, _ = model["predictor"].lstm(d) |
| 158 | + duration = model["predictor"].duration_proj(x) |
| 159 | + duration = torch.sigmoid(duration).sum(axis=-1) / speed |
| 160 | + pred_dur = torch.round(duration).clamp(min=1).long() |
| 161 | + pred_aln_trg = torch.zeros(input_lengths, pred_dur.sum().item()) |
| 162 | + c_frame = 0 |
| 163 | + for i in range(pred_aln_trg.size(0)): |
| 164 | + pred_aln_trg[i, c_frame : c_frame + pred_dur[0, i].item()] = 1 |
| 165 | + c_frame += pred_dur[0, i].item() |
| 166 | + en = d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device) |
| 167 | + F0_pred, N_pred = model["predictor"].F0Ntrain(en, s) |
| 168 | + t_en = model["text_encoder"](tokens, input_lengths, text_mask) |
| 169 | + asr = t_en @ pred_aln_trg.unsqueeze(0).to(device) |
| 170 | + return ( |
| 171 | + model["decoder"](asr, F0_pred, N_pred, ref_s[:, :128]).squeeze().cpu().numpy() |
| 172 | + ) |
| 173 | + |
| 174 | + |
| 175 | +def generate(model, text, voicepack, lang="a", speed=1, ps=None): |
| 176 | + ps = ps or phonemize(text, lang) |
| 177 | + tokens = tokenize(ps) |
| 178 | + if not tokens: |
| 179 | + return None |
| 180 | + elif len(tokens) > 510: |
| 181 | + tokens = tokens[:510] |
| 182 | + print("Truncated to 510 tokens") |
| 183 | + ref_s = voicepack[len(tokens)] |
| 184 | + return forward(model, tokens, ref_s, speed) |
0 commit comments