|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +""" |
| 4 | +This file shows how to use a non-streaming SenseVoice CTC model from |
| 5 | +https://github.com/FunAudioLLM/SenseVoice |
| 6 | +to decode files. |
| 7 | +
|
| 8 | +Please download model files from |
| 9 | +https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models |
| 10 | +
|
| 11 | +For instance, |
| 12 | +
|
| 13 | +wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 |
| 14 | +tar xvf sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 |
| 15 | +rm sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17.tar.bz2 |
| 16 | +""" |
| 17 | + |
| 18 | +from pathlib import Path |
| 19 | + |
| 20 | +import sherpa_onnx |
| 21 | +import soundfile as sf |
| 22 | + |
| 23 | + |
| 24 | +def create_recognizer(): |
| 25 | + model = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/model.int8.onnx" |
| 26 | + tokens = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/tokens.txt" |
| 27 | + test_wav = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/zh.wav" |
| 28 | + # test_wav = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/en.wav" |
| 29 | + # test_wav = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/ja.wav" |
| 30 | + # test_wav = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/ko.wav" |
| 31 | + # test_wav = "./sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2024-07-17/test_wavs/yue.wav" |
| 32 | + |
| 33 | + if not Path(model).is_file() or not Path(test_wav).is_file(): |
| 34 | + raise ValueError( |
| 35 | + """Please download model files from |
| 36 | + https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models |
| 37 | + """ |
| 38 | + ) |
| 39 | + return ( |
| 40 | + sherpa_onnx.OfflineRecognizer.from_sense_voice( |
| 41 | + model=model, |
| 42 | + tokens=tokens, |
| 43 | + use_itn=True, |
| 44 | + debug=True, |
| 45 | + ), |
| 46 | + test_wav, |
| 47 | + ) |
| 48 | + |
| 49 | + |
| 50 | +def main(): |
| 51 | + recognizer, wave_filename = create_recognizer() |
| 52 | + |
| 53 | + audio, sample_rate = sf.read(wave_filename, dtype="float32", always_2d=True) |
| 54 | + audio = audio[:, 0] # only use the first channel |
| 55 | + |
| 56 | + # audio is a 1-D float32 numpy array normalized to the range [-1, 1] |
| 57 | + # sample_rate does not need to be 16000 Hz |
| 58 | + |
| 59 | + stream = recognizer.create_stream() |
| 60 | + stream.accept_waveform(sample_rate, audio) |
| 61 | + recognizer.decode_stream(stream) |
| 62 | + print(wave_filename) |
| 63 | + print(stream.result) |
| 64 | + |
| 65 | + |
| 66 | +if __name__ == "__main__": |
| 67 | + main() |
0 commit comments