Skip to content

Commit 25fee6a

Browse files
author
harvey_xiang
committed
feat: add openai request body log
1 parent 00a1f04 commit 25fee6a

File tree

1 file changed

+27
-22
lines changed

1 file changed

+27
-22
lines changed

src/memos/llms/openai.py

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -31,21 +31,23 @@ def __init__(self, config: OpenAILLMConfig):
3131
@timed_with_status(
3232
log_prefix="OpenAI LLM",
3333
log_extra_args=lambda self, messages, **kwargs: {
34-
"model_name_or_path": kwargs.get("model_name_or_path", self.config.model_name_or_path)
34+
"model_name_or_path": kwargs.get("model_name_or_path", self.config.model_name_or_path),
35+
"messages": messages,
3536
},
3637
)
3738
def generate(self, messages: MessageList, **kwargs) -> str:
3839
"""Generate a response from OpenAI LLM, optionally overriding generation params."""
39-
response = self.client.chat.completions.create(
40-
model=kwargs.get("model_name_or_path", self.config.model_name_or_path),
41-
messages=messages,
42-
temperature=kwargs.get("temperature", self.config.temperature),
43-
max_tokens=kwargs.get("max_tokens", self.config.max_tokens),
44-
top_p=kwargs.get("top_p", self.config.top_p),
45-
extra_body=kwargs.get("extra_body", self.config.extra_body),
46-
tools=kwargs.get("tools", NOT_GIVEN),
47-
timeout=kwargs.get("timeout", 30),
48-
)
40+
request_body = {
41+
"model": kwargs.get("model_name_or_path", self.config.model_name_or_path),
42+
"messages": messages,
43+
"temperature": kwargs.get("temperature", self.config.temperature),
44+
"max_tokens": kwargs.get("max_tokens", self.config.max_tokens),
45+
"top_p": kwargs.get("top_p", self.config.top_p),
46+
"extra_body": kwargs.get("extra_body", self.config.extra_body),
47+
"tools": kwargs.get("tools", NOT_GIVEN),
48+
}
49+
logger.info(f"OpenAI LLM Request body: {request_body}")
50+
response = self.client.chat.completions.create(**request_body)
4951
logger.info(f"Response from OpenAI: {response.model_dump_json()}")
5052
tool_calls = getattr(response.choices[0].message, "tool_calls", None)
5153
if isinstance(tool_calls, list) and len(tool_calls) > 0:
@@ -61,7 +63,7 @@ def generate(self, messages: MessageList, **kwargs) -> str:
6163
return response_content
6264

6365
@timed_with_status(
64-
log_prefix="OpenAI LLM",
66+
log_prefix="OpenAI LLM Stream",
6567
log_extra_args=lambda self, messages, **kwargs: {
6668
"model_name_or_path": self.config.model_name_or_path
6769
},
@@ -72,16 +74,19 @@ def generate_stream(self, messages: MessageList, **kwargs) -> Generator[str, Non
7274
logger.info("stream api not support tools")
7375
return
7476

75-
response = self.client.chat.completions.create(
76-
model=self.config.model_name_or_path,
77-
messages=messages,
78-
stream=True,
79-
temperature=kwargs.get("temperature", self.config.temperature),
80-
max_tokens=kwargs.get("max_tokens", self.config.max_tokens),
81-
top_p=kwargs.get("top_p", self.config.top_p),
82-
extra_body=kwargs.get("extra_body", self.config.extra_body),
83-
tools=kwargs.get("tools", NOT_GIVEN),
84-
)
77+
request_body = {
78+
"model": self.config.model_name_or_path,
79+
"messages": messages,
80+
"stream": True,
81+
"temperature": kwargs.get("temperature", self.config.temperature),
82+
"max_tokens": kwargs.get("max_tokens", self.config.max_tokens),
83+
"top_p": kwargs.get("top_p", self.config.top_p),
84+
"extra_body": kwargs.get("extra_body", self.config.extra_body),
85+
"tools": kwargs.get("tools", NOT_GIVEN),
86+
}
87+
88+
logger.info(f"OpenAI LLM Stream Request body: {request_body}")
89+
response = self.client.chat.completions.create(**request_body)
8590

8691
reasoning_started = False
8792

0 commit comments

Comments
 (0)