|
1 | | -# todo |
| 1 | +from typing import Dict, Optional, Any, Iterator, cast, Mapping |
| 2 | + |
| 3 | +from langchain_core.language_models import LanguageModelInput |
| 4 | +from langchain_core.messages import BaseMessage, BaseMessageChunk, HumanMessageChunk, AIMessageChunk, \ |
| 5 | + SystemMessageChunk, FunctionMessageChunk, ChatMessageChunk |
| 6 | +from langchain_core.messages.ai import UsageMetadata |
| 7 | +from langchain_core.messages.tool import tool_call_chunk, ToolMessageChunk |
| 8 | +from langchain_core.outputs import ChatGenerationChunk |
| 9 | +from langchain_core.runnables import RunnableConfig, ensure_config |
| 10 | +from langchain_openai import ChatOpenAI |
| 11 | +from langchain_openai.chat_models.base import _create_usage_metadata |
| 12 | + |
| 13 | + |
| 14 | +def _convert_delta_to_message_chunk( |
| 15 | + _dict: Mapping[str, Any], default_class: type[BaseMessageChunk] |
| 16 | +) -> BaseMessageChunk: |
| 17 | + id_ = _dict.get("id") |
| 18 | + role = cast(str, _dict.get("role")) |
| 19 | + content = cast(str, _dict.get("content") or "") |
| 20 | + additional_kwargs: dict = {} |
| 21 | + if 'reasoning_content' in _dict: |
| 22 | + additional_kwargs['reasoning_content'] = _dict.get('reasoning_content') |
| 23 | + if _dict.get("function_call"): |
| 24 | + function_call = dict(_dict["function_call"]) |
| 25 | + if "name" in function_call and function_call["name"] is None: |
| 26 | + function_call["name"] = "" |
| 27 | + additional_kwargs["function_call"] = function_call |
| 28 | + tool_call_chunks = [] |
| 29 | + if raw_tool_calls := _dict.get("tool_calls"): |
| 30 | + additional_kwargs["tool_calls"] = raw_tool_calls |
| 31 | + try: |
| 32 | + tool_call_chunks = [ |
| 33 | + tool_call_chunk( |
| 34 | + name=rtc["function"].get("name"), |
| 35 | + args=rtc["function"].get("arguments"), |
| 36 | + id=rtc.get("id"), |
| 37 | + index=rtc["index"], |
| 38 | + ) |
| 39 | + for rtc in raw_tool_calls |
| 40 | + ] |
| 41 | + except KeyError: |
| 42 | + pass |
| 43 | + |
| 44 | + if role == "user" or default_class == HumanMessageChunk: |
| 45 | + return HumanMessageChunk(content=content, id=id_) |
| 46 | + elif role == "assistant" or default_class == AIMessageChunk: |
| 47 | + return AIMessageChunk( |
| 48 | + content=content, |
| 49 | + additional_kwargs=additional_kwargs, |
| 50 | + id=id_, |
| 51 | + tool_call_chunks=tool_call_chunks, # type: ignore[arg-type] |
| 52 | + ) |
| 53 | + elif role in ("system", "developer") or default_class == SystemMessageChunk: |
| 54 | + if role == "developer": |
| 55 | + additional_kwargs = {"__openai_role__": "developer"} |
| 56 | + else: |
| 57 | + additional_kwargs = {} |
| 58 | + return SystemMessageChunk( |
| 59 | + content=content, id=id_, additional_kwargs=additional_kwargs |
| 60 | + ) |
| 61 | + elif role == "function" or default_class == FunctionMessageChunk: |
| 62 | + return FunctionMessageChunk(content=content, name=_dict["name"], id=id_) |
| 63 | + elif role == "tool" or default_class == ToolMessageChunk: |
| 64 | + return ToolMessageChunk( |
| 65 | + content=content, tool_call_id=_dict["tool_call_id"], id=id_ |
| 66 | + ) |
| 67 | + elif role or default_class == ChatMessageChunk: |
| 68 | + return ChatMessageChunk(content=content, role=role, id=id_) |
| 69 | + else: |
| 70 | + return default_class(content=content, id=id_) |
| 71 | + |
| 72 | + |
| 73 | +class BaseChatOpenAI(ChatOpenAI): |
| 74 | + usage_metadata: dict = {} |
| 75 | + |
| 76 | + # custom_get_token_ids = custom_get_token_ids |
| 77 | + |
| 78 | + def get_last_generation_info(self) -> Optional[Dict[str, Any]]: |
| 79 | + return self.usage_metadata |
| 80 | + |
| 81 | + def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]: |
| 82 | + kwargs['stream_usage'] = True |
| 83 | + for chunk in super()._stream(*args, **kwargs): |
| 84 | + if chunk.message.usage_metadata is not None: |
| 85 | + self.usage_metadata = chunk.message.usage_metadata |
| 86 | + yield chunk |
| 87 | + |
| 88 | + def _convert_chunk_to_generation_chunk( |
| 89 | + self, |
| 90 | + chunk: dict, |
| 91 | + default_chunk_class: type, |
| 92 | + base_generation_info: Optional[dict], |
| 93 | + ) -> Optional[ChatGenerationChunk]: |
| 94 | + if chunk.get("type") == "content.delta": # from beta.chat.completions.stream |
| 95 | + return None |
| 96 | + token_usage = chunk.get("usage") |
| 97 | + choices = ( |
| 98 | + chunk.get("choices", []) |
| 99 | + # from beta.chat.completions.stream |
| 100 | + or chunk.get("chunk", {}).get("choices", []) |
| 101 | + ) |
| 102 | + |
| 103 | + usage_metadata: Optional[UsageMetadata] = ( |
| 104 | + _create_usage_metadata(token_usage) if token_usage and token_usage.get("prompt_tokens") else None |
| 105 | + ) |
| 106 | + if len(choices) == 0: |
| 107 | + # logprobs is implicitly None |
| 108 | + generation_chunk = ChatGenerationChunk( |
| 109 | + message=default_chunk_class(content="", usage_metadata=usage_metadata) |
| 110 | + ) |
| 111 | + return generation_chunk |
| 112 | + |
| 113 | + choice = choices[0] |
| 114 | + if choice["delta"] is None: |
| 115 | + return None |
| 116 | + |
| 117 | + message_chunk = _convert_delta_to_message_chunk( |
| 118 | + choice["delta"], default_chunk_class |
| 119 | + ) |
| 120 | + generation_info = {**base_generation_info} if base_generation_info else {} |
| 121 | + |
| 122 | + if finish_reason := choice.get("finish_reason"): |
| 123 | + generation_info["finish_reason"] = finish_reason |
| 124 | + if model_name := chunk.get("model"): |
| 125 | + generation_info["model_name"] = model_name |
| 126 | + if system_fingerprint := chunk.get("system_fingerprint"): |
| 127 | + generation_info["system_fingerprint"] = system_fingerprint |
| 128 | + |
| 129 | + logprobs = choice.get("logprobs") |
| 130 | + if logprobs: |
| 131 | + generation_info["logprobs"] = logprobs |
| 132 | + |
| 133 | + if usage_metadata and isinstance(message_chunk, AIMessageChunk): |
| 134 | + message_chunk.usage_metadata = usage_metadata |
| 135 | + |
| 136 | + generation_chunk = ChatGenerationChunk( |
| 137 | + message=message_chunk, generation_info=generation_info or None |
| 138 | + ) |
| 139 | + return generation_chunk |
| 140 | + |
| 141 | + def invoke( |
| 142 | + self, |
| 143 | + input: LanguageModelInput, |
| 144 | + config: Optional[RunnableConfig] = None, |
| 145 | + *, |
| 146 | + stop: Optional[list[str]] = None, |
| 147 | + **kwargs: Any, |
| 148 | + ) -> BaseMessage: |
| 149 | + config = ensure_config(config) |
| 150 | + chat_result = cast( |
| 151 | + "ChatGeneration", |
| 152 | + self.generate_prompt( |
| 153 | + [self._convert_input(input)], |
| 154 | + stop=stop, |
| 155 | + callbacks=config.get("callbacks"), |
| 156 | + tags=config.get("tags"), |
| 157 | + metadata=config.get("metadata"), |
| 158 | + run_name=config.get("run_name"), |
| 159 | + run_id=config.pop("run_id", None), |
| 160 | + **kwargs, |
| 161 | + ).generations[0][0], |
| 162 | + |
| 163 | + ).message |
| 164 | + |
| 165 | + self.usage_metadata = chat_result.response_metadata[ |
| 166 | + 'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata |
| 167 | + return chat_result |
0 commit comments