Skip to content

Commit 15ec70c

Browse files
committed
feat: Interface with Qwen Omni speech to text model
1 parent 33806f6 commit 15ec70c

File tree

7 files changed

+197
-3
lines changed

7 files changed

+197
-3
lines changed

apps/locales/en_US/LC_MESSAGES/django.po

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8648,4 +8648,13 @@ msgid "Multiple dialects, supporting 23 dialects"
86488648
msgstr ""
86498649

86508650
msgid "This interface is used to recognize short audio files within 60 seconds. Supports Mandarin Chinese, English, Cantonese, Japanese, Vietnamese, Malay, Indonesian, Filipino, Thai, Portuguese, Turkish, Arabic, Hindi, French, German, and 23 Chinese dialects."
8651+
msgstr ""
8652+
8653+
msgid "CueWord"
8654+
msgstr ""
8655+
8656+
msgid "If not passed, the default value is What is this audio saying? Only answer the audio content"
8657+
msgstr ""
8658+
8659+
msgid "The Qwen Omni series model supports inputting multiple modalities of data, including video, audio, images, and text, and outputting audio and text."
86518660
msgstr ""

apps/locales/zh_CN/LC_MESSAGES/django.po

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8774,4 +8774,13 @@ msgid "Multiple dialects, supporting 23 dialects"
87748774
msgstr "多种方言,支持 23 种方言"
87758775

87768776
msgid "This interface is used to recognize short audio files within 60 seconds. Supports Mandarin Chinese, English, Cantonese, Japanese, Vietnamese, Malay, Indonesian, Filipino, Thai, Portuguese, Turkish, Arabic, Hindi, French, German, and 23 Chinese dialects."
8777-
msgstr "本接口用于识别 60 秒之内的短音频文件。支持中文普通话、英语、粤语、日语、越南语、马来语、印度尼西亚语、菲律宾语、泰语、葡萄牙语、土耳其语、阿拉伯语、印地语、法语、德语及 23 种汉语方言。"
8777+
msgstr "本接口用于识别 60 秒之内的短音频文件。支持中文普通话、英语、粤语、日语、越南语、马来语、印度尼西亚语、菲律宾语、泰语、葡萄牙语、土耳其语、阿拉伯语、印地语、法语、德语及 23 种汉语方言。"
8778+
8779+
msgid "CueWord"
8780+
msgstr "提示词"
8781+
8782+
msgid "If not passed, the default value is What is this audio saying? Only answer the audio content"
8783+
msgstr "如果未传递,默认值为 这段音频在说什么,只回答音频的内容"
8784+
8785+
msgid "The Qwen Omni series model supports inputting multiple modalities of data, including video, audio, images, and text, and outputting audio and text."
8786+
msgstr "Qwen-Omni 系列模型支持输入多种模态的数据,包括视频、音频、图片、文本,并输出音频与文本"

apps/locales/zh_Hant/LC_MESSAGES/django.po

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8774,4 +8774,13 @@ msgid "Multiple dialects, supporting 23 dialects"
87748774
msgstr "多種方言,支持 23 種方言"
87758775

87768776
msgid "This interface is used to recognize short audio files within 60 seconds. Supports Mandarin Chinese, English, Cantonese, Japanese, Vietnamese, Malay, Indonesian, Filipino, Thai, Portuguese, Turkish, Arabic, Hindi, French, German, and 23 Chinese dialects."
8777-
msgstr "本介面用於識別 60 秒之內的短音頻文件。支援中文普通話、英語、粵語、日語、越南語、馬來語、印度尼西亞語、菲律賓語、泰語、葡萄牙語、土耳其語、阿拉伯語、印地語、法語、德語及 23 種漢語方言。"
8777+
msgstr "本介面用於識別 60 秒之內的短音頻文件。支援中文普通話、英語、粵語、日語、越南語、馬來語、印度尼西亞語、菲律賓語、泰語、葡萄牙語、土耳其語、阿拉伯語、印地語、法語、德語及 23 種漢語方言。"
8778+
8779+
msgid "CueWord"
8780+
msgstr "提示詞"
8781+
8782+
msgid "If not passed, the default value is What is this audio saying? Only answer the audio content"
8783+
msgstr "如果未傳遞,預設值為這段音訊在說什麼,只回答音訊的內容"
8784+
8785+
msgid "The Qwen Omni series model supports inputting multiple modalities of data, including video, audio, images, and text, and outputting audio and text."
8786+
msgstr "Qwen-Omni系列模型支持輸入多種模態的數據,包括視頻、音訊、圖片、文字,並輸出音訊與文字"

apps/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
AliyunBaiLianEmbeddingCredential
1616
from models_provider.impl.aliyun_bai_lian_model_provider.credential.image import QwenVLModelCredential
1717
from models_provider.impl.aliyun_bai_lian_model_provider.credential.llm import BaiLianLLMModelCredential
18+
from models_provider.impl.aliyun_bai_lian_model_provider.credential.omi_stt import AliyunBaiLianOmiSTTModelCredential
1819
from models_provider.impl.aliyun_bai_lian_model_provider.credential.reranker import \
1920
AliyunBaiLianRerankerCredential
2021
from models_provider.impl.aliyun_bai_lian_model_provider.credential.stt import AliyunBaiLianSTTModelCredential
@@ -23,6 +24,7 @@
2324
from models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding
2425
from models_provider.impl.aliyun_bai_lian_model_provider.model.image import QwenVLChatModel
2526
from models_provider.impl.aliyun_bai_lian_model_provider.model.llm import BaiLianChatModel
27+
from models_provider.impl.aliyun_bai_lian_model_provider.model.omi_stt import AliyunBaiLianOmiSpeechToText
2628
from models_provider.impl.aliyun_bai_lian_model_provider.model.reranker import AliyunBaiLianReranker
2729
from models_provider.impl.aliyun_bai_lian_model_provider.model.stt import AliyunBaiLianSpeechToText
2830
from models_provider.impl.aliyun_bai_lian_model_provider.model.tti import QwenTextToImageModel
@@ -33,6 +35,7 @@
3335
aliyun_bai_lian_model_credential = AliyunBaiLianRerankerCredential()
3436
aliyun_bai_lian_tts_model_credential = AliyunBaiLianTTSModelCredential()
3537
aliyun_bai_lian_stt_model_credential = AliyunBaiLianSTTModelCredential()
38+
aliyun_bai_lian_omi_stt_model_credential = AliyunBaiLianOmiSTTModelCredential()
3639
aliyun_bai_lian_embedding_model_credential = AliyunBaiLianEmbeddingCredential()
3740
aliyun_bai_lian_llm_model_credential = BaiLianLLMModelCredential()
3841
qwenvl_model_credential = QwenVLModelCredential()
@@ -73,7 +76,10 @@
7376
ModelInfo('qwen-plus', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
7477
BaiLianChatModel),
7578
ModelInfo('qwen-max', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
76-
BaiLianChatModel)
79+
BaiLianChatModel),
80+
ModelInfo('qwen-omni-turbo',
81+
_('The Qwen Omni series model supports inputting multiple modalities of data, including video, audio, images, and text, and outputting audio and text.'),
82+
ModelTypeConst.STT, aliyun_bai_lian_omi_stt_model_credential, AliyunBaiLianOmiSpeechToText),
7783
]
7884

7985
module_info_vl_list = [
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
# coding=utf-8
2+
import traceback
3+
from typing import Dict, Any
4+
5+
from common import forms
6+
from common.exception.app_exception import AppApiException
7+
from common.forms import BaseForm, PasswordInputField, TooltipLabel
8+
from models_provider.base_model_provider import BaseModelCredential, ValidCode
9+
from django.utils.translation import gettext as _
10+
11+
class AliyunBaiLianOmiSTTModelParams(BaseForm):
12+
CueWord = forms.TextInputField(
13+
TooltipLabel(_('CueWord'), _('If not passed, the default value is What is this audio saying? Only answer the audio content')),
14+
required=True,
15+
default_value='这段音频在说什么,只回答音频的内容',
16+
)
17+
18+
19+
class AliyunBaiLianOmiSTTModelCredential(BaseForm, BaseModelCredential):
20+
api_key = PasswordInputField("API key", required=True)
21+
22+
def is_valid(self,
23+
model_type: str,
24+
model_name: str,
25+
model_credential: Dict[str, Any],
26+
model_params: Dict[str, Any],
27+
provider,
28+
raise_exception: bool = False
29+
) -> bool:
30+
31+
model_type_list = provider.get_model_type_list()
32+
if not any(mt.get('value') == model_type for mt in model_type_list):
33+
raise AppApiException(
34+
ValidCode.valid_error.value,
35+
_('{model_type} Model type is not supported').format(model_type=model_type)
36+
)
37+
38+
required_keys = ['api_key']
39+
for key in required_keys:
40+
if key not in model_credential:
41+
if raise_exception:
42+
raise AppApiException(
43+
ValidCode.valid_error.value,
44+
_('{key} is required').format(key=key)
45+
)
46+
return False
47+
48+
try:
49+
model = provider.get_model(model_type, model_name, model_credential)
50+
except Exception as e:
51+
traceback.print_exc()
52+
if isinstance(e, AppApiException):
53+
raise e
54+
if raise_exception:
55+
raise AppApiException(
56+
ValidCode.valid_error.value,
57+
_('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))
58+
)
59+
return False
60+
return True
61+
62+
def encryption_dict(self, model: Dict[str, object]) -> Dict[str, object]:
63+
64+
return {
65+
**model,
66+
'api_key': super().encryption(model.get('api_key', ''))
67+
}
68+
69+
70+
def get_model_params_setting_form(self, model_name):
71+
72+
return AliyunBaiLianOmiSTTModelParams()
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
import base64
2+
import os
3+
import traceback
4+
from typing import Dict
5+
6+
from openai import OpenAI
7+
8+
from common.utils.logger import maxkb_logger
9+
from models_provider.base_model_provider import MaxKBBaseModel
10+
from models_provider.impl.base_stt import BaseSpeechToText
11+
12+
13+
class AliyunBaiLianOmiSpeechToText(MaxKBBaseModel, BaseSpeechToText):
14+
api_key: str
15+
model: str
16+
params: dict
17+
18+
def __init__(self, **kwargs):
19+
super().__init__(**kwargs)
20+
self.api_key = kwargs.get('api_key')
21+
self.model = kwargs.get('model')
22+
self.params = kwargs.get('params')
23+
24+
@staticmethod
25+
def is_cache_model():
26+
return False
27+
28+
@staticmethod
29+
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
30+
return AliyunBaiLianOmiSpeechToText(
31+
model=model_name,
32+
api_key=model_credential.get('api_key'),
33+
params= model_kwargs,
34+
**model_kwargs
35+
)
36+
37+
38+
def check_auth(self):
39+
cwd = os.path.dirname(os.path.abspath(__file__))
40+
with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as audio_file:
41+
self.speech_to_text(audio_file)
42+
43+
44+
45+
def speech_to_text(self, audio_file):
46+
try:
47+
client = OpenAI(
48+
# 若没有配置环境变量,请用阿里云百炼API Key将下行替换为:api_key="sk-xxx",
49+
api_key=self.api_key,
50+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
51+
)
52+
53+
base64_audio = base64.b64encode(audio_file.read()).decode("utf-8")
54+
55+
completion = client.chat.completions.create(
56+
model="qwen-omni-turbo-0119",
57+
messages=[
58+
{
59+
"role": "user",
60+
"content": [
61+
{
62+
"type": "input_audio",
63+
"input_audio": {
64+
"data": f"data:;base64,{base64_audio}",
65+
"format": "mp3",
66+
},
67+
},
68+
{"type": "text", "text": self.params.get('CueWord')},
69+
],
70+
},
71+
],
72+
# 设置输出数据的模态,当前支持两种:["text","audio"]、["text"]
73+
modalities=["text"],
74+
audio={"voice": "Cherry", "format": "mp3"},
75+
# stream 必须设置为 True,否则会报错
76+
stream=True,
77+
stream_options={"include_usage": True},
78+
)
79+
result = []
80+
for chunk in completion:
81+
if chunk.choices and hasattr(chunk.choices[0].delta, 'audio'):
82+
transcript = chunk.choices[0].delta.audio.get('transcript')
83+
result.append(transcript)
84+
return "".join(result)
85+
86+
except Exception as err:
87+
maxkb_logger.error(f":Error: {str(err)}: {traceback.format_exc()}")

apps/models_provider/impl/tencent_model_provider/model/stt.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import base64
22
import json
33
import os
4+
import traceback
45
from typing import Dict
56

67
from tencentcloud.asr.v20190614 import asr_client, models
@@ -9,6 +10,7 @@
910
from tencentcloud.common.profile.client_profile import ClientProfile
1011
from tencentcloud.common.profile.http_profile import HttpProfile
1112

13+
from common.utils.logger import maxkb_logger
1214
from models_provider.base_model_provider import MaxKBBaseModel
1315
from models_provider.impl.base_stt import BaseSpeechToText
1416

0 commit comments

Comments
 (0)