|
| 1 | +#!/usr/bin/env python |
| 2 | +# -*- coding: utf-8 -*- |
| 3 | +# @Desc : |
| 4 | + |
| 5 | +import json |
| 6 | + |
| 7 | +from metagpt.configs.llm_config import LLMConfig, LLMType |
| 8 | +from metagpt.const import USE_CONFIG_TIMEOUT |
| 9 | +from metagpt.logs import log_llm_stream |
| 10 | +from metagpt.provider.base_llm import BaseLLM |
| 11 | +from metagpt.provider.general_api_requestor import GeneralAPIRequestor, OpenAIResponse |
| 12 | +from metagpt.provider.llm_provider_registry import register_provider |
| 13 | + |
| 14 | + |
| 15 | +@register_provider([LLMType.OPENROUTER_REASONING]) |
| 16 | +class OpenrouterReasoningLLM(BaseLLM): |
| 17 | + def __init__(self, config: LLMConfig): |
| 18 | + self.client = GeneralAPIRequestor(base_url=config.base_url) |
| 19 | + self.config = config |
| 20 | + self.model = self.config.model |
| 21 | + self.http_method = "post" |
| 22 | + self.base_url = "https://openrouter.ai/api/v1" |
| 23 | + self.url_suffix = "/chat/completions" |
| 24 | + self.headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.config.api_key}"} |
| 25 | + |
| 26 | + def decode(self, response: OpenAIResponse) -> dict: |
| 27 | + return json.loads(response.data.decode("utf-8")) |
| 28 | + |
| 29 | + def _const_kwargs( |
| 30 | + self, messages: list[dict], stream: bool = False, timeout=USE_CONFIG_TIMEOUT, **extra_kwargs |
| 31 | + ) -> dict: |
| 32 | + kwargs = { |
| 33 | + "messages": messages, |
| 34 | + "include_reasoning": True, |
| 35 | + "max_tokens": self.config.max_token, |
| 36 | + "temperature": self.config.temperature, |
| 37 | + "model": self.model, |
| 38 | + "stream": stream, |
| 39 | + } |
| 40 | + return kwargs |
| 41 | + |
| 42 | + def get_choice_text(self, rsp: dict) -> str: |
| 43 | + if "reasoning" in rsp["choices"][0]["message"]: |
| 44 | + self.reasoning_content = rsp["choices"][0]["message"]["reasoning"] |
| 45 | + return rsp["choices"][0]["message"]["content"] |
| 46 | + |
| 47 | + async def _achat_completion(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> dict: |
| 48 | + payload = self._const_kwargs(messages) |
| 49 | + resp, _, _ = await self.client.arequest( |
| 50 | + url=self.url_suffix, method=self.http_method, params=payload, headers=self.headers # empty |
| 51 | + ) |
| 52 | + resp = resp.decode_asjson() |
| 53 | + self._update_costs(resp["usage"], model=self.model) |
| 54 | + return resp |
| 55 | + |
| 56 | + async def acompletion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT) -> dict: |
| 57 | + return await self._achat_completion(messages, timeout=self.get_timeout(timeout)) |
| 58 | + |
| 59 | + async def _achat_completion_stream(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> str: |
| 60 | + self.headers["Content-Type"] = "text/event-stream" # update header to adapt the client |
| 61 | + payload = self._const_kwargs(messages, stream=True) |
| 62 | + resp, _, _ = await self.client.arequest( |
| 63 | + url=self.url_suffix, method=self.http_method, params=payload, headers=self.headers, stream=True # empty |
| 64 | + ) |
| 65 | + collected_content = [] |
| 66 | + collected_reasoning_content = [] |
| 67 | + usage = {} |
| 68 | + async for chunk in resp: |
| 69 | + chunk = chunk.decode_asjson() |
| 70 | + if not chunk: |
| 71 | + continue |
| 72 | + delta = chunk["choices"][0]["delta"] |
| 73 | + if "reasoning" in delta and delta["reasoning"]: |
| 74 | + collected_reasoning_content.append(delta["reasoning"]) |
| 75 | + elif delta["content"]: |
| 76 | + collected_content.append(delta["content"]) |
| 77 | + log_llm_stream(delta["content"]) |
| 78 | + |
| 79 | + usage = chunk.get("usage") |
| 80 | + |
| 81 | + log_llm_stream("\n") |
| 82 | + self._update_costs(usage, model=self.model) |
| 83 | + full_content = "".join(collected_content) |
| 84 | + if collected_reasoning_content: |
| 85 | + self.reasoning_content = "".join(collected_reasoning_content) |
| 86 | + return full_content |
0 commit comments