-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSTT.py
More file actions
116 lines (94 loc) · 3.93 KB
/
STT.py
File metadata and controls
116 lines (94 loc) · 3.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
""" # STT wrapper class
This class is a wrapper for Vosk STT library.
It can transcribe audio files in Persian and English.
It uses Vosk models for Persian and English.
It can also apply some basic audio enhancements.
## Usage:
>>> persian_stt = SpeechToText(lang="fa") # or "en" for english
>>> with open("audio.mp3", "rb") as f:
>>> text = persian_stt.transcribe_mp3_fileobj(f)
>>> print(text)
"""
import gc
import os
import wave
import json
from io import BytesIO
from pydub import AudioSegment
from vosk import Model, KaldiRecognizer, SetLogLevel
SetLogLevel(-1) # no log for vosk
class SpeechToText:
def __init__(self, lang: str, model_size: str = 'small'):
model_dir = 'models/' + lang
if lang == 'fa':
model_dir += '/vosk-model-small-fa-0.42'
elif lang == 'en':
model_dir += '/vosk-model-small-en-us-0.15'
if not os.path.exists(model_dir):
raise FileNotFoundError(
f"Model directory '{model_dir}' not found.")
self.model = Model(model_dir)
def transcribe_wav_fileobj(self, file_obj, max_alternatives: int = 0) -> str:
# Read WAV file-like object and ensure mono PCM @ 16kHz
audio = AudioSegment.from_file(file_obj, format="wav")
audio = audio.set_channels(1).set_frame_rate(16000)
# Optional: Enhance audio
audio = audio.normalize()
audio = audio.low_pass_filter(4000)
# Export to WAV in-memory for Vosk
wav_io = BytesIO()
audio.export(wav_io, format="wav")
wav_io.seek(0)
# Validate WAV format for Vosk
wf = wave.open(wav_io, "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getframerate() != 16000:
raise ValueError("Audio must be mono PCM WAV at 16kHz.")
# Initialize recognizer
rec = KaldiRecognizer(self.model, wf.getframerate())
rec.SetWords(True)
rec.SetMaxAlternatives(max_alternatives)
# Transcribe
result_text = ""
while True:
data = wf.readframes(4000)
if not data:
break
if rec.AcceptWaveform(data):
result = json.loads(rec.Result())
result_text += result.get("text", "") + " "
final_result = json.loads(rec.FinalResult())
result_text += final_result.get("text", "")
del audio, wav_io, wf, rec
gc.collect()
return result_text.strip()
def transcribe_mp3_fileobj(self, file_obj, max_alternatives: int = 0) -> str:
# Convert MP3 file-like object to WAV in memory
audio = AudioSegment.from_file(file_obj, format="wav")
audio = audio.set_channels(1).set_frame_rate(16000)
# Optional: Apply audio enhancements (normalization, noise reduction)
audio = audio.normalize()
audio = audio.low_pass_filter(4000) # Reduce high-frequency noise
wav_io = BytesIO()
audio.export(wav_io, format="wav")
wav_io.seek(0)
# Read WAV data
wf = wave.open(wav_io, "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getframerate() != 16000:
raise ValueError("Audio must be mono PCM WAV at 16kHz.")
# Enable more tolerant settings
rec = KaldiRecognizer(self.model, wf.getframerate())
rec.SetWords(True) # Enable word-level details
rec.SetMaxAlternatives(max_alternatives) # Get alternatives if > 0
result_text = ""
while True:
data = wf.readframes(4000)
if not data:
break
if rec.AcceptWaveform(data):
result = json.loads(rec.Result())
result_text += result.get("text", "") + " "
final_result = json.loads(rec.FinalResult())
result_text += final_result.get("text", "")
# Optional: Post-process text (spell check, etc.)
# result_text = self._post_process(result_text)
return result_text.strip()