diff --git a/astrbot/core/config/default.py b/astrbot/core/config/default.py index 7a4c2b6b0..32f02875e 100644 --- a/astrbot/core/config/default.py +++ b/astrbot/core/config/default.py @@ -45,6 +45,7 @@ "friend_message_needs_wake_prefix": False, "ignore_bot_self_message": False, "ignore_at_all": False, + "request_queue": False, }, "provider": [], "provider_settings": { @@ -468,6 +469,10 @@ "type": "bool", "hint": "启用后,机器人会忽略 @ 全体成员 的消息事件。", }, + "request_queue": { + "type": "bool", + "hint": "启用请求队列功能。当同一会话中同时收到多个请求时,会将这些请求排队依次处理,确保上下文连续性,避免并发处理导致的混乱。", + }, "segmented_reply": { "type": "object", "items": { @@ -1903,31 +1908,17 @@ "_special": "select_provider", "hint": "留空代表不使用。可用于不支持视觉模态的聊天模型。", }, - "provider_stt_settings.enable": { - "description": "默认启用语音转文本", - "type": "bool", - }, "provider_stt_settings.provider_id": { "description": "语音转文本模型", "type": "string", "hint": "留空代表不使用。", "_special": "select_provider_stt", - "condition": { - "provider_stt_settings.enable": True, - }, - }, - "provider_tts_settings.enable": { - "description": "默认启用文本转语音", - "type": "bool", }, "provider_tts_settings.provider_id": { "description": "文本转语音模型", "type": "string", "hint": "留空代表不使用。", "_special": "select_provider_tts", - "condition": { - "provider_tts_settings.enable": True, - }, }, "provider_settings.image_caption_prompt": { "description": "图片转述提示词", @@ -2216,6 +2207,11 @@ "description": "用户权限不足时是否回复", "type": "bool", }, + "platform_settings.request_queue": { + "description": "启用请求队列功能", + "type": "bool", + "hint": "当同一会话中同时收到多个请求时,会将这些请求排队依次处理,确保上下文连续性,避免并发处理导致的混乱。", + }, }, }, }, diff --git a/astrbot/core/pipeline/process_stage/method/llm_request.py b/astrbot/core/pipeline/process_stage/method/llm_request.py index 6035b21e9..dc62d4392 100644 --- a/astrbot/core/pipeline/process_stage/method/llm_request.py +++ b/astrbot/core/pipeline/process_stage/method/llm_request.py @@ -3,6 +3,7 @@ """ import asyncio +from collections import defaultdict import copy import json import traceback @@ -286,6 +287,10 @@ async def run_agent( ) +# 用于将并行请求转化为队列的锁 +user_llm_locks = defaultdict(asyncio.Lock) + + class LLMRequestSubStage(Stage): async def initialize(self, ctx: PipelineContext) -> None: self.ctx = ctx @@ -339,6 +344,12 @@ async def _get_session_conv(self, event: AstrMessageEvent): conversation = await conv_mgr.get_conversation(umo, cid) return conversation + def _unlock(self, cid: str): + # 释放锁 + if cid in user_llm_locks and user_llm_locks[cid].locked(): + user_llm_locks[cid].release() + logger.info(f"用户(cid: {cid}) 的请求已完成,锁已释放。") + async def process( self, event: AstrMessageEvent, _nested: bool = False ) -> Union[None, AsyncGenerator[None, None]]: @@ -390,8 +401,21 @@ async def process( if not req.prompt and not req.image_urls: return + # 控制请求队列 + if self.ctx.astrbot_config["platform_settings"]["request_queue"]: + cid = req.conversation.cid + lock = user_llm_locks[cid] + if lock.locked(): + logger.info(f"用户(cid: {cid}) 的新请求正在等待上一次请求完成...") + await lock.acquire() + # 更新到最新的上下文 + conversation = await self._get_session_conv(event) + req.conversation = conversation + req.contexts = json.loads(conversation.history) + # 执行请求 LLM 前事件钩子。 if await call_event_hook(event, EventType.OnLLMRequestEvent, req): + self._unlock(req.conversation.cid) return if isinstance(req.contexts, str): @@ -505,6 +529,8 @@ async def process( if event.get_platform_name() == "webchat": asyncio.create_task(self._handle_webchat(event, req, provider)) + self._unlock(req.conversation.cid) + async def _handle_webchat( self, event: AstrMessageEvent, req: ProviderRequest, prov: Provider ):