Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions astrbot/core/pipeline/process_stage/method/llm_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
ResultContentType,
MessageChain,
)
from astrbot.core.message.components import Image
from astrbot.core.message.components import Image, Record
from astrbot.core import logger
from astrbot.core.utils.metrics import Metric
from astrbot.core.provider.entities import (
Expand Down Expand Up @@ -77,16 +77,29 @@ async def process(
)

else:
req = ProviderRequest(prompt="", image_urls=[])
req = ProviderRequest(prompt="", image_urls=[], audio_urls=[])
if self.provider_wake_prefix:
if not event.message_str.startswith(self.provider_wake_prefix):
return
req.prompt = event.message_str[len(self.provider_wake_prefix) :]
req.func_tool = self.ctx.plugin_manager.context.get_llm_tool_manager()

# 处理消息中的图片和音频
has_audio = False
for comp in event.message_obj.message:
if isinstance(comp, Image):
image_path = await comp.convert_to_file_path()
req.image_urls.append(image_path)
elif isinstance(comp, Record):
# 处理音频消息
audio_path = await comp.convert_to_file_path()
logger.info(f"检测到音频消息,路径: {audio_path}")
has_audio = True
req.audio_urls.append(audio_path)

# 如果只有音频没有文本,添加默认文本
if not req.prompt and has_audio:
req.prompt = "[用户发送的音频将其视为文本输入与其进行聊天]"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

refactor一下:

            # 处理消息中的图片和音频
            for comp in event.message_obj.message:
                if isinstance(comp, Image):
                    # 处理图片消息
                    image_path = await comp.convert_to_file_path()
                    req.image_urls.append(image_path)
                elif isinstance(comp, Record):
                    # 处理音频消息
                    audio_path = await comp.convert_to_file_path()
                    req.audio_urls.append(audio_path)
            
            # 如果只有音频没有文本,添加默认文本
            if not req.prompt and req.audio_urls:
                req.prompt = "[用户发送的音频将其视为文本输入与其进行聊天]"


# 获取对话上下文
conversation_id = await self.conv_manager.get_curr_conversation_id(
Expand Down
2 changes: 2 additions & 0 deletions astrbot/core/provider/entities.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ class ProviderRequest:
"""会话 ID"""
image_urls: List[str] = None
"""图片 URL 列表"""
audio_urls: List[str] = None
"""音频 URL 列表"""
func_tool: FuncCall = None
"""可用的函数工具"""
contexts: List = None
Expand Down
128 changes: 101 additions & 27 deletions astrbot/core/provider/sources/gemini_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
import json
import logging
import random
import os
import mimetypes
from typing import Dict, List, Optional
from collections.abc import AsyncGenerator

Expand Down Expand Up @@ -193,6 +195,12 @@ def process_image_url(image_url_dict: dict) -> types.Part:
mime_type = url.split(":")[1].split(";")[0]
image_bytes = base64.b64decode(url.split(",", 1)[1])
return types.Part.from_bytes(data=image_bytes, mime_type=mime_type)

def process_inline_data(inline_data_dict: dict) -> types.Part:
"""处理内联数据,如音频""" # TODO: 处理视频?
mime_type = inline_data_dict["mime_type"]
data = inline_data_dict.get("data", "")
return types.Part.from_bytes(data=data, mime_type=mime_type)

def append_or_extend(contents: list[types.Content], part: list[types.Part], content_cls: type[types.Content]) -> None:
if contents and isinstance(contents[-1], content_cls):
Expand All @@ -212,12 +220,15 @@ def append_or_extend(contents: list[types.Content], part: list[types.Part], cont

if role == "user":
if isinstance(content, list):
parts = [
types.Part.from_text(text=item["text"] or " ")
if item["type"] == "text"
else process_image_url(item["image_url"])
for item in content
]
parts = []
for item in content:
if item["type"] == "text":
parts.append(types.Part.from_text(text=item["text"] or " "))
elif item["type"] == "image_url":
parts.append(process_image_url(item["image_url"]))
elif item["type"] == "inline_data":
# 处理内联数据,如音频
parts.append(process_inline_data(item["inline_data"]))
else:
parts = [create_text_part(content)]
append_or_extend(gemini_contents, parts, types.UserContent)
Expand Down Expand Up @@ -447,13 +458,14 @@ async def text_chat(
prompt: str,
session_id: str = None,
image_urls: List[str] = None,
audio_urls: List[str] = None,
func_tool: FuncCall = None,
contexts=[],
system_prompt=None,
tool_calls_result=None,
**kwargs,
) -> LLMResponse:
new_record = await self.assemble_context(prompt, image_urls)
new_record = await self.assemble_context(prompt, image_urls, audio_urls)
context_query = [*contexts, new_record]
if system_prompt:
context_query.insert(0, {"role": "system", "content": system_prompt})
Expand Down Expand Up @@ -486,14 +498,15 @@ async def text_chat_stream(
self,
prompt: str,
session_id: str = None,
image_urls: List[str] = [],
image_urls: List[str] = None,
audio_urls: List[str] = None,
func_tool: FuncCall = None,
contexts=[],
system_prompt=None,
tool_calls_result=None,
**kwargs,
) -> AsyncGenerator[LLMResponse, None]:
new_record = await self.assemble_context(prompt, image_urls)
new_record = await self.assemble_context(prompt, image_urls, audio_urls)
context_query = [*contexts, new_record]
if system_prompt:
context_query.insert(0, {"role": "system", "content": system_prompt})
Expand Down Expand Up @@ -545,30 +558,55 @@ def set_key(self, key):
self.chosen_api_key = key
self._init_client()

async def assemble_context(self, text: str, image_urls: List[str] = None):
async def assemble_context(self, text: str, image_urls: List[str] = None, audio_urls: List[str] = None):
"""
组装上下文。
"""
if image_urls:
has_media = (image_urls and len(image_urls) > 0) or (audio_urls and len(audio_urls) > 0)

if has_media:
user_content = {
"role": "user",
"content": [{"type": "text", "text": text if text else "[图片]"}],
"content": [{"type": "text", "text": text if text else "[媒体内容]"}],
}
for image_url in image_urls:
if image_url.startswith("http"):
image_path = await download_image_by_url(image_url)
image_data = await self.encode_image_bs64(image_path)
elif image_url.startswith("file:///"):
image_path = image_url.replace("file:///", "")
image_data = await self.encode_image_bs64(image_path)
else:
image_data = await self.encode_image_bs64(image_url)
if not image_data:
logger.warning(f"图片 {image_url} 得到的结果为空,将忽略。")
continue
user_content["content"].append(
{"type": "image_url", "image_url": {"url": image_data}}
)

# 处理图片
if image_urls:
for image_url in image_urls:
if image_url.startswith("http"):
image_path = await download_image_by_url(image_url)
image_data = await self.encode_image_bs64(image_path)
elif image_url.startswith("file:///"):
image_path = image_url.replace("file:///", "")
image_data = await self.encode_image_bs64(image_path)
else:
image_data = await self.encode_image_bs64(image_url)
if not image_data:
logger.warning(f"图片 {image_url} 得到的结果为空,将忽略。")
continue
user_content["content"].append(
{"type": "image_url", "image_url": {"url": image_data}}
)

# 处理音频
if audio_urls:
for audio_url in audio_urls:
audio_bytes, mime_type = await self.encode_audio_data(audio_url)
if not audio_bytes or not mime_type:
logger.warning(f"音频 {audio_url} 处理失败,将忽略。")
continue

# 添加音频数据
user_content["content"].append(
{
"type": "inline_data",
"inline_data": {
"mime_type": mime_type,
"data": audio_bytes
}
}
)

return user_content
else:
return {"role": "user", "content": text}
Expand All @@ -584,5 +622,41 @@ async def encode_image_bs64(self, image_url: str) -> str:
return "data:image/jpeg;base64," + image_bs64
return ""

async def encode_audio_data(self, audio_url: str) -> tuple:
"""
读取音频文件并返回二进制数据

Returns:
tuple: (音频二进制数据, MIME类型)
"""
try:
# 直接读取文件二进制数据
with open(audio_url, "rb") as f:
audio_bytes = f.read()

# 推断 MIME 类型
mime_type = mimetypes.guess_type(audio_url)[0]
if not mime_type:
# 根据文件扩展名确定 MIME 类型
extension = os.path.splitext(audio_url)[1].lower()
if extension == '.wav':
mime_type = 'audio/wav'
elif extension == '.mp3':
mime_type = 'audio/mpeg'
elif extension == '.ogg':
mime_type = 'audio/ogg'
elif extension == '.flac':
mime_type = 'audio/flac'
elif extension == '.m4a':
mime_type = 'audio/mp4'
else:
mime_type = 'audio/wav' # 默认

logger.info(f"音频文件处理成功: {audio_url},mime类型: {mime_type},大小: {len(audio_bytes)} 字节")
return audio_bytes, mime_type
except Exception as e:
logger.error(f"音频文件处理失败: {e}")
return None, None

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

refactor一下:

    async def encode_audio_data(self, audio_url: str) -> tuple[Optional[bytes], Optional[str]]:
        """
        读取音频文件并返回二进制数据

        Args:
            audio_url (str): 音频文件路径
        
        Returns:
            tuple: (音频二进制数据, MIME类型)
        """
        # 推断 MIME 类型
        mime_type = mimetypes.guess_type(audio_url)[0]
        if not mime_type:
            extension_to_mime = {
                ".wav": "audio/wav",
                ".mp3": "audio/mpeg",
                ".ogg": "audio/ogg",
                ".flac": "audio/flac",
                ".m4a": "audio/mp4",
            }
            extension = os.path.splitext(audio_url)[1].lower()
            mime_type = extension_to_mime.get(extension, "application/octet-stream")

        try:
            # 直接读取文件二进制数据
            with open(audio_url, "rb") as f:
                audio_bytes = f.read()

            logger.info(f"音频文件处理成功: {audio_url},mime类型: {mime_type},大小: {len(audio_bytes)} 字节")

async def terminate(self):
logger.info("Google GenAI 适配器已终止。")