Skip to content

Commit 1436e68

Browse files
authored
test exported sense voice models (k2-fsa#1147)
1 parent 8de7144 commit 1436e68

File tree

3 files changed

+263
-0
lines changed

3 files changed

+263
-0
lines changed

scripts/sense-voice/export-onnx.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ def main():
158158
meta_data = {
159159
"lfr_window_size": lfr_window_size,
160160
"lfr_window_shift": lfr_window_shift,
161+
"normalize_samples": 0, # input should be in the range [-32768, 32767]
161162
"neg_mean": neg_mean,
162163
"inv_stddev": inv_stddev,
163164
"model_type": "sense_voice_ctc",

scripts/sense-voice/run.sh

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,3 +35,28 @@ echo "pwd: $PWD"
3535
./show-info.py
3636

3737
ls -lh
38+
39+
# Download test wavs
40+
curl -SL -O https://huggingface.co/csukuangfj/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/resolve/main/test_wavs/en.wav
41+
curl -SL -O https://huggingface.co/csukuangfj/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/resolve/main/test_wavs/zh.wav
42+
curl -SL -O https://huggingface.co/csukuangfj/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/resolve/main/test_wavs/ja.wav
43+
curl -SL -O https://huggingface.co/csukuangfj/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/resolve/main/test_wavs/ko.wav
44+
curl -SL -O https://huggingface.co/csukuangfj/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/resolve/main/test_wavs/yue.wav
45+
46+
for m in model.onnx model.int8.onnx; do
47+
for w in en zh ja ko yue; do
48+
echo "----------test $m $w.wav----------"
49+
50+
echo "without inverse text normalization, lang auto"
51+
./test.py --model $m --tokens ./tokens.txt --wav $w.wav --use-itn 0
52+
53+
echo "with inverse text normalization, lang auto"
54+
./test.py --model $m --tokens ./tokens.txt --wav $w.wav --use-itn 1
55+
56+
echo "without inverse text normalization, lang $w"
57+
./test.py --model $m --tokens ./tokens.txt --wav $w.wav --use-itn 0 --lang $w
58+
59+
echo "with inverse text normalization, lang $w"
60+
./test.py --model $m --tokens ./tokens.txt --wav $w.wav --use-itn 1 --lang $w
61+
done
62+
done

scripts/sense-voice/test.py

Lines changed: 237 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,237 @@
1+
#!/usr/bin/env python3
2+
# Copyright 2024 Xiaomi Corp. (authors: Fangjun Kuang)
3+
4+
import argparse
5+
from typing import Tuple
6+
7+
import kaldi_native_fbank as knf
8+
import numpy as np
9+
import onnxruntime
10+
import onnxruntime as ort
11+
import soundfile as sf
12+
import torch
13+
14+
15+
def get_args():
16+
parser = argparse.ArgumentParser(
17+
formatter_class=argparse.ArgumentDefaultsHelpFormatter
18+
)
19+
20+
parser.add_argument(
21+
"--model",
22+
type=str,
23+
required=True,
24+
help="Path to model.onnx",
25+
)
26+
27+
parser.add_argument(
28+
"--tokens",
29+
type=str,
30+
required=True,
31+
help="Path to tokens.txt",
32+
)
33+
34+
parser.add_argument(
35+
"--wave",
36+
type=str,
37+
required=True,
38+
help="The input wave to be recognized",
39+
)
40+
41+
parser.add_argument(
42+
"--language",
43+
type=str,
44+
default="auto",
45+
help="the language of the input wav file. Supported values: zh, en, ja, ko, yue, auto",
46+
)
47+
48+
parser.add_argument(
49+
"--use-itn",
50+
type=int,
51+
default=0,
52+
help="1 to use inverse text normalization. 0 to not use inverse text normalization",
53+
)
54+
55+
return parser.parse_args()
56+
57+
58+
class OnnxModel:
59+
def __init__(self, filename):
60+
session_opts = ort.SessionOptions()
61+
session_opts.inter_op_num_threads = 1
62+
session_opts.intra_op_num_threads = 1
63+
64+
self.session_opts = session_opts
65+
66+
self.model = ort.InferenceSession(
67+
filename,
68+
sess_options=self.session_opts,
69+
providers=["CPUExecutionProvider"],
70+
)
71+
72+
meta = self.model.get_modelmeta().custom_metadata_map
73+
74+
self.window_size = int(meta["lfr_window_size"]) # lfr_m
75+
self.window_shift = int(meta["lfr_window_shift"]) # lfr_n
76+
77+
lang_zh = int(meta["lang_zh"])
78+
lang_en = int(meta["lang_en"])
79+
lang_ja = int(meta["lang_ja"])
80+
lang_ko = int(meta["lang_ko"])
81+
lang_auto = int(meta["lang_auto"])
82+
83+
self.lang_id = {
84+
"zh": lang_zh,
85+
"en": lang_en,
86+
"ja": lang_ja,
87+
"ko": lang_ko,
88+
"auto": lang_auto,
89+
}
90+
self.with_itn = int(meta["with_itn"])
91+
self.without_itn = int(meta["without_itn"])
92+
93+
neg_mean = meta["neg_mean"].split(",")
94+
neg_mean = list(map(lambda x: float(x), neg_mean))
95+
96+
inv_stddev = meta["inv_stddev"].split(",")
97+
inv_stddev = list(map(lambda x: float(x), inv_stddev))
98+
99+
self.neg_mean = np.array(neg_mean, dtype=np.float32)
100+
self.inv_stddev = np.array(inv_stddev, dtype=np.float32)
101+
102+
def __call__(self, x, x_length, language, text_norm):
103+
logits = self.model.run(
104+
[
105+
self.model.get_outputs()[0].name,
106+
],
107+
{
108+
self.model.get_inputs()[0].name: x.numpy(),
109+
self.model.get_inputs()[1].name: x_length.numpy(),
110+
self.model.get_inputs()[2].name: language.numpy(),
111+
self.model.get_inputs()[3].name: text_norm.numpy(),
112+
},
113+
)[0]
114+
115+
return torch.from_numpy(logits)
116+
117+
118+
def load_audio(filename: str) -> Tuple[np.ndarray, int]:
119+
data, sample_rate = sf.read(
120+
filename,
121+
always_2d=True,
122+
dtype="float32",
123+
)
124+
data = data[:, 0] # use only the first channel
125+
samples = np.ascontiguousarray(data)
126+
return samples, sample_rate
127+
128+
129+
def load_tokens(filename):
130+
ans = dict()
131+
i = 0
132+
with open(filename, encoding="utf-8") as f:
133+
for line in f:
134+
ans[i] = line.strip().split()[0]
135+
i += 1
136+
return ans
137+
138+
139+
def compute_feat(
140+
samples,
141+
sample_rate,
142+
neg_mean: np.ndarray,
143+
inv_stddev: np.ndarray,
144+
window_size: int = 7, # lfr_m
145+
window_shift: int = 6, # lfr_n
146+
):
147+
opts = knf.FbankOptions()
148+
opts.frame_opts.dither = 0
149+
opts.frame_opts.snip_edges = False
150+
opts.frame_opts.window_type = "hamming"
151+
opts.frame_opts.samp_freq = sample_rate
152+
opts.mel_opts.num_bins = 80
153+
154+
online_fbank = knf.OnlineFbank(opts)
155+
online_fbank.accept_waveform(sample_rate, (samples * 32768).tolist())
156+
online_fbank.input_finished()
157+
158+
features = np.stack(
159+
[online_fbank.get_frame(i) for i in range(online_fbank.num_frames_ready)]
160+
)
161+
assert features.data.contiguous is True
162+
assert features.dtype == np.float32, features.dtype
163+
164+
T = (features.shape[0] - window_size) // window_shift + 1
165+
features = np.lib.stride_tricks.as_strided(
166+
features,
167+
shape=(T, features.shape[1] * window_size),
168+
strides=((window_shift * features.shape[1]) * 4, 4),
169+
)
170+
171+
features = (features + neg_mean) * inv_stddev
172+
173+
return features
174+
175+
176+
def main():
177+
args = get_args()
178+
print(vars(args))
179+
samples, sample_rate = load_audio(args.wave)
180+
if sample_rate != 16000:
181+
import librosa
182+
183+
samples = librosa.resample(samples, orig_sr=sample_rate, target_sr=16000)
184+
sample_rate = 16000
185+
186+
model = OnnxModel(filename=args.model)
187+
188+
features = compute_feat(
189+
samples=samples,
190+
sample_rate=sample_rate,
191+
neg_mean=model.neg_mean,
192+
inv_stddev=model.inv_stddev,
193+
window_size=model.window_size,
194+
window_shift=model.window_shift,
195+
)
196+
197+
features = torch.from_numpy(features).unsqueeze(0)
198+
features_length = torch.tensor([features.size(1)], dtype=torch.int32)
199+
200+
language = model.lang_id["auto"]
201+
if args.language in model.lang_id:
202+
language = model.lang_id[args.language]
203+
else:
204+
print(f"Invalid language: '{args.language}'")
205+
print("Use auto")
206+
207+
if args.use_itn:
208+
text_norm = model.with_itn
209+
else:
210+
text_norm = model.without_itn
211+
212+
language = torch.tensor([language], dtype=torch.int32)
213+
text_norm = torch.tensor([text_norm], dtype=torch.int32)
214+
215+
logits = model(
216+
x=features,
217+
x_length=features_length,
218+
language=language,
219+
text_norm=text_norm,
220+
)
221+
222+
idx = logits.squeeze(0).argmax(dim=-1)
223+
# idx is of shape (T,)
224+
idx = torch.unique_consecutive(idx)
225+
226+
blank_id = 0
227+
idx = idx[idx != blank_id].tolist()
228+
229+
tokens = load_tokens(args.tokens)
230+
text = "".join([tokens[i] for i in idx])
231+
232+
text = text.replace("▁", " ")
233+
print(text)
234+
235+
236+
if __name__ == "__main__":
237+
main()

0 commit comments

Comments
 (0)