Skip to content

Commit 4c6a114

Browse files
CarltonXiangharvey_xiang
andauthored
feat: add openai request body log (#763)
* feat: timer false * feat: add openai request body log * feat: add openai request body log * feat: add openai request body log --------- Co-authored-by: harvey_xiang <[email protected]>
1 parent c716d1a commit 4c6a114

File tree

4 files changed

+110
-90
lines changed

4 files changed

+110
-90
lines changed

docker/.env.example

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -167,11 +167,6 @@ OSS_ACCESS_KEY_ID=
167167
OSS_ACCESS_KEY_SECRET=
168168
OSS_PUBLIC_BASE_URL=
169169

170-
## Logging / external sink
171-
CUSTOM_LOGGER_URL=
172-
CUSTOM_LOGGER_TOKEN=
173-
CUSTOM_LOGGER_WORKERS=2
174-
175170
## SDK / external client
176171
MEMOS_API_KEY=
177172
MEMOS_BASE_URL=https://memos.memtensor.cn/api/openmem/v1

src/memos/api/client.py

Lines changed: 78 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -10,18 +10,19 @@
1010
MemOSAddFeedBackResponse,
1111
MemOSAddKnowledgebaseFileResponse,
1212
MemOSAddResponse,
13+
MemOSChatResponse,
1314
MemOSCreateKnowledgebaseResponse,
1415
MemOSDeleteKnowledgebaseResponse,
1516
MemOSDeleteMemoryResponse,
1617
MemOSGetKnowledgebaseFileResponse,
1718
MemOSGetMemoryResponse,
1819
MemOSGetMessagesResponse,
1920
MemOSGetTaskStatusResponse,
20-
MemOSSearchResponse, MemOSChatResponse,
21+
MemOSSearchResponse,
2122
)
22-
2323
from memos.log import get_logger
2424

25+
2526
logger = get_logger(__name__)
2627

2728
MAX_RETRY_COUNT = 3
@@ -32,7 +33,7 @@ class MemOSClient:
3233

3334
def __init__(self, api_key: str | None = None, base_url: str | None = None):
3435
self.base_url = (
35-
base_url or os.getenv("MEMOS_BASE_URL") or "https://memos.memtensor.cn/api/openmem/v1"
36+
base_url or os.getenv("MEMOS_BASE_URL") or "https://memos.memtensor.cn/api/openmem/v1"
3637
)
3738
api_key = api_key or os.getenv("MEMOS_API_KEY")
3839

@@ -48,12 +49,12 @@ def _validate_required_params(self, **params):
4849
raise ValueError(f"{param_name} is required")
4950

5051
def get_message(
51-
self,
52-
user_id: str,
53-
conversation_id: str | None = None,
54-
conversation_limit_number: int = 6,
55-
message_limit_number: int = 6,
56-
source: str | None = None,
52+
self,
53+
user_id: str,
54+
conversation_id: str | None = None,
55+
conversation_limit_number: int = 6,
56+
message_limit_number: int = 6,
57+
source: str | None = None,
5758
) -> MemOSGetMessagesResponse | None:
5859
"""Get messages"""
5960
# Validate required parameters
@@ -82,18 +83,18 @@ def get_message(
8283
raise
8384

8485
def add_message(
85-
self,
86-
messages: list[dict[str, Any]],
87-
user_id: str,
88-
conversation_id: str,
89-
info: dict[str, Any] | None = None,
90-
source: str | None = None,
91-
app_id: str | None = None,
92-
agent_id: str | None = None,
93-
async_mode: bool = True,
94-
tags: list[str] | None = None,
95-
allow_public: bool = False,
96-
allow_knowledgebase_ids: list[str] | None = None,
86+
self,
87+
messages: list[dict[str, Any]],
88+
user_id: str,
89+
conversation_id: str,
90+
info: dict[str, Any] | None = None,
91+
source: str | None = None,
92+
app_id: str | None = None,
93+
agent_id: str | None = None,
94+
async_mode: bool = True,
95+
tags: list[str] | None = None,
96+
allow_public: bool = False,
97+
allow_knowledgebase_ids: list[str] | None = None,
9798
) -> MemOSAddResponse | None:
9899
"""Add message"""
99100
# Validate required parameters
@@ -130,18 +131,18 @@ def add_message(
130131
raise
131132

132133
def search_memory(
133-
self,
134-
query: str,
135-
user_id: str,
136-
conversation_id: str,
137-
memory_limit_number: int = 6,
138-
include_preference: bool = True,
139-
knowledgebase_ids: list[str] | None = None,
140-
filter: dict[str, Any] | None = None,
141-
source: str | None = None,
142-
include_tool_memory: bool = False,
143-
preference_limit_number: int = 6,
144-
tool_memory_limit_number: int = 6,
134+
self,
135+
query: str,
136+
user_id: str,
137+
conversation_id: str,
138+
memory_limit_number: int = 6,
139+
include_preference: bool = True,
140+
knowledgebase_ids: list[str] | None = None,
141+
filter: dict[str, Any] | None = None,
142+
source: str | None = None,
143+
include_tool_memory: bool = False,
144+
preference_limit_number: int = 6,
145+
tool_memory_limit_number: int = 6,
145146
) -> MemOSSearchResponse | None:
146147
"""Search memories"""
147148
# Validate required parameters
@@ -202,7 +203,7 @@ def get_memory(self, user_id: str, include_preference: str) -> MemOSGetMemoryRes
202203
raise
203204

204205
def create_knowledgebase(
205-
self, knowledgebase_name: str, knowledgebase_description: str
206+
self, knowledgebase_name: str, knowledgebase_description: str
206207
) -> MemOSCreateKnowledgebaseResponse | None:
207208
"""
208209
Create knowledgebase
@@ -234,7 +235,7 @@ def create_knowledgebase(
234235
raise
235236

236237
def delete_knowledgebase(
237-
self, knowledgebase_id: str
238+
self, knowledgebase_id: str
238239
) -> MemOSDeleteKnowledgebaseResponse | None:
239240
"""
240241
Delete knowledgebase
@@ -262,7 +263,7 @@ def delete_knowledgebase(
262263
raise
263264

264265
def add_knowledgebase_file_json(
265-
self, knowledgebase_id: str, file: list[dict[str, Any]]
266+
self, knowledgebase_id: str, file: list[dict[str, Any]]
266267
) -> MemOSAddKnowledgebaseFileResponse | None:
267268
"""
268269
add knowledgebase-file from json
@@ -291,7 +292,7 @@ def add_knowledgebase_file_json(
291292
raise
292293

293294
def add_knowledgebase_file_form(
294-
self, knowledgebase_id: str, files: list[str]
295+
self, knowledgebase_id: str, files: list[str]
295296
) -> MemOSAddKnowledgebaseFileResponse | None:
296297
"""
297298
add knowledgebase-file from form
@@ -328,7 +329,6 @@ def build_file_form_param(file_path):
328329
headers=headers,
329330
timeout=30,
330331
files=[build_file_form_param(file_path) for file_path in files],
331-
332332
)
333333
response.raise_for_status()
334334
response_data = response.json()
@@ -341,7 +341,7 @@ def build_file_form_param(file_path):
341341
raise
342342

343343
def delete_knowledgebase_file(
344-
self, file_ids: list[str]
344+
self, file_ids: list[str]
345345
) -> MemOSDeleteKnowledgebaseResponse | None:
346346
"""
347347
delete knowledgebase-file
@@ -369,7 +369,7 @@ def delete_knowledgebase_file(
369369
raise
370370

371371
def get_knowledgebase_file(
372-
self, file_ids: list[str]
372+
self, file_ids: list[str]
373373
) -> MemOSGetKnowledgebaseFileResponse | None:
374374
"""
375375
get knowledgebase-file
@@ -423,15 +423,15 @@ def get_task_status(self, task_id: str) -> MemOSGetTaskStatusResponse | None:
423423
raise
424424

425425
def add_feedback(
426-
self,
427-
user_id: str,
428-
conversation_id: str,
429-
feedback_content: str,
430-
agent_id: str | None = None,
431-
app_id: str | None = None,
432-
feedback_time: str | None = None,
433-
allow_public: bool = False,
434-
allow_knowledgebase_ids: list[str] | None = None,
426+
self,
427+
user_id: str,
428+
conversation_id: str,
429+
feedback_content: str,
430+
agent_id: str | None = None,
431+
app_id: str | None = None,
432+
feedback_time: str | None = None,
433+
allow_public: bool = False,
434+
allow_knowledgebase_ids: list[str] | None = None,
435435
) -> MemOSAddFeedBackResponse | None:
436436
"""Add feedback"""
437437
# Validate required parameters
@@ -465,7 +465,7 @@ def add_feedback(
465465
raise
466466

467467
def delete_memory(
468-
self, user_ids: list[str], memory_ids: list[str]
468+
self, user_ids: list[str], memory_ids: list[str]
469469
) -> MemOSDeleteMemoryResponse | None:
470470
"""delete_memory memories"""
471471
# Validate required parameters
@@ -492,18 +492,37 @@ def delete_memory(
492492
raise
493493

494494
def chat(
495-
self, user_id: str, conversation_id: str, query: str, internet_search: bool = False,
496-
force_stop: bool = False, use_mem_os_cube: bool = False, source: str | None = None,
497-
system_prompt: str | None = None, model_name: str | None = None, knowledgebase_ids: list[str] | None = None,
498-
filter: dict[str: Any] | None = None, add_message_on_answer: bool = False, app_id: str | None = None,
499-
agent_id: str | None = None, async_mode: bool = True, tags: list[str] | None = None,
500-
info: dict[str:Any] | None = None, allow_public: bool = False, max_tokens: int = 8192,
501-
temperature: float | None = None, top_p: float | None = None, include_preference: bool = True,
502-
preference_limit_number: int = 6, memory_limit_number: int = 6,
495+
self,
496+
user_id: str,
497+
conversation_id: str,
498+
query: str,
499+
internet_search: bool = False,
500+
force_stop: bool = False,
501+
use_mem_os_cube: bool = False,
502+
source: str | None = None,
503+
system_prompt: str | None = None,
504+
model_name: str | None = None,
505+
knowledgebase_ids: list[str] | None = None,
506+
filter: dict[str:Any] | None = None,
507+
add_message_on_answer: bool = False,
508+
app_id: str | None = None,
509+
agent_id: str | None = None,
510+
async_mode: bool = True,
511+
tags: list[str] | None = None,
512+
info: dict[str:Any] | None = None,
513+
allow_public: bool = False,
514+
max_tokens: int = 8192,
515+
temperature: float | None = None,
516+
top_p: float | None = None,
517+
include_preference: bool = True,
518+
preference_limit_number: int = 6,
519+
memory_limit_number: int = 6,
503520
) -> MemOSChatResponse | None:
504521
"""chat"""
505522
# Validate required parameters
506-
self._validate_required_params(user_id=user_id, conversation_id=conversation_id, query=query)
523+
self._validate_required_params(
524+
user_id=user_id, conversation_id=conversation_id, query=query
525+
)
507526

508527
url = f"{self.base_url}/chat"
509528
payload = {
@@ -531,7 +550,6 @@ def chat(
531550
"include_preference": include_preference,
532551
"preference_limit_number": preference_limit_number,
533552
"memory_limit_number": memory_limit_number,
534-
535553
}
536554

537555
for retry in range(MAX_RETRY_COUNT):

src/memos/api/product_models.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -874,6 +874,7 @@ class DeleteMessageData(BaseModel):
874874

875875
success: bool = Field(..., description="Operation success status")
876876

877+
877878
class ChatMessageData(BaseModel):
878879
"""Data model for chat Message based on actual API."""
879880

@@ -950,6 +951,7 @@ def success(self) -> bool:
950951
"""Convenient access to success status."""
951952
return self.data.success
952953

954+
953955
class MemOSChatResponse(BaseModel):
954956
"""Response model for chat operation based on actual API."""
955957

@@ -968,11 +970,11 @@ class MemOSGetTaskStatusResponse(BaseModel):
968970

969971
code: int = Field(..., description="Response status code")
970972
message: str = Field(..., description="Response message")
971-
data: list[GetTaskStatusMessageData] = Field(..., description="delete results data")
973+
data: list[GetTaskStatusMessageData] = Field(..., description="Task status data")
972974

973975
@property
974-
def data(self) -> list[GetTaskStatusMessageData]:
975-
"""Convenient access to task status."""
976+
def messages(self) -> list[GetTaskStatusMessageData]:
977+
"""Convenient access to task status messages."""
976978
return self.data
977979

978980

src/memos/llms/openai.py

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -31,21 +31,23 @@ def __init__(self, config: OpenAILLMConfig):
3131
@timed_with_status(
3232
log_prefix="OpenAI LLM",
3333
log_extra_args=lambda self, messages, **kwargs: {
34-
"model_name_or_path": kwargs.get("model_name_or_path", self.config.model_name_or_path)
34+
"model_name_or_path": kwargs.get("model_name_or_path", self.config.model_name_or_path),
35+
"messages": messages,
3536
},
3637
)
3738
def generate(self, messages: MessageList, **kwargs) -> str:
3839
"""Generate a response from OpenAI LLM, optionally overriding generation params."""
39-
response = self.client.chat.completions.create(
40-
model=kwargs.get("model_name_or_path", self.config.model_name_or_path),
41-
messages=messages,
42-
temperature=kwargs.get("temperature", self.config.temperature),
43-
max_tokens=kwargs.get("max_tokens", self.config.max_tokens),
44-
top_p=kwargs.get("top_p", self.config.top_p),
45-
extra_body=kwargs.get("extra_body", self.config.extra_body),
46-
tools=kwargs.get("tools", NOT_GIVEN),
47-
timeout=kwargs.get("timeout", 30),
48-
)
40+
request_body = {
41+
"model": kwargs.get("model_name_or_path", self.config.model_name_or_path),
42+
"messages": messages,
43+
"temperature": kwargs.get("temperature", self.config.temperature),
44+
"max_tokens": kwargs.get("max_tokens", self.config.max_tokens),
45+
"top_p": kwargs.get("top_p", self.config.top_p),
46+
"extra_body": kwargs.get("extra_body", self.config.extra_body),
47+
"tools": kwargs.get("tools", NOT_GIVEN),
48+
}
49+
logger.info(f"OpenAI LLM Request body: {request_body}")
50+
response = self.client.chat.completions.create(**request_body)
4951
logger.info(f"Response from OpenAI: {response.model_dump_json()}")
5052
tool_calls = getattr(response.choices[0].message, "tool_calls", None)
5153
if isinstance(tool_calls, list) and len(tool_calls) > 0:
@@ -61,7 +63,7 @@ def generate(self, messages: MessageList, **kwargs) -> str:
6163
return response_content
6264

6365
@timed_with_status(
64-
log_prefix="OpenAI LLM",
66+
log_prefix="OpenAI LLM Stream",
6567
log_extra_args=lambda self, messages, **kwargs: {
6668
"model_name_or_path": self.config.model_name_or_path
6769
},
@@ -72,16 +74,19 @@ def generate_stream(self, messages: MessageList, **kwargs) -> Generator[str, Non
7274
logger.info("stream api not support tools")
7375
return
7476

75-
response = self.client.chat.completions.create(
76-
model=self.config.model_name_or_path,
77-
messages=messages,
78-
stream=True,
79-
temperature=kwargs.get("temperature", self.config.temperature),
80-
max_tokens=kwargs.get("max_tokens", self.config.max_tokens),
81-
top_p=kwargs.get("top_p", self.config.top_p),
82-
extra_body=kwargs.get("extra_body", self.config.extra_body),
83-
tools=kwargs.get("tools", NOT_GIVEN),
84-
)
77+
request_body = {
78+
"model": self.config.model_name_or_path,
79+
"messages": messages,
80+
"stream": True,
81+
"temperature": kwargs.get("temperature", self.config.temperature),
82+
"max_tokens": kwargs.get("max_tokens", self.config.max_tokens),
83+
"top_p": kwargs.get("top_p", self.config.top_p),
84+
"extra_body": kwargs.get("extra_body", self.config.extra_body),
85+
"tools": kwargs.get("tools", NOT_GIVEN),
86+
}
87+
88+
logger.info(f"OpenAI LLM Stream Request body: {request_body}")
89+
response = self.client.chat.completions.create(**request_body)
8590

8691
reasoning_started = False
8792

0 commit comments

Comments
 (0)