Skip to content

Commit f9a10d1

Browse files
CarltonXiangCaralHsiyuan.wangharvey_xiang
authored
Feat/log rotating (#507)
* Update API Reference link in README.md * hotfix bug in pref init * feat: log support time rotating * feat: log support time rotating * feat: log support time rotating * feat: delete useless log * feat: delete useless log * feat: add time log * feat: add time log --------- Co-authored-by: CaralHsi <[email protected]> Co-authored-by: yuan.wang <[email protected]> Co-authored-by: harvey_xiang <[email protected]>
1 parent 02284a4 commit f9a10d1

File tree

4 files changed

+14
-5
lines changed

4 files changed

+14
-5
lines changed

src/memos/embedders/universal_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def __init__(self, config: UniversalAPIEmbedderConfig):
2626
else:
2727
raise ValueError(f"Embeddings unsupported provider: {self.provider}")
2828

29-
@timed(log=True, log_prefix="EmbedderAPI")
29+
@timed(log=True, log_prefix="model_timed_embedding")
3030
def embed(self, texts: list[str]) -> list[list[float]]:
3131
if self.provider == "openai" or self.provider == "azure":
3232
try:

src/memos/llms/openai.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import hashlib
22
import json
3+
import time
34

45
from collections.abc import Generator
56
from typing import ClassVar
@@ -57,12 +58,15 @@ def clear_cache(cls):
5758
cls._instances.clear()
5859
logger.info("OpenAI LLM instance cache cleared")
5960

60-
@timed(log=True, log_prefix="OpenAI LLM")
61+
@timed(log=True, log_prefix="model_timed_openai")
6162
def generate(self, messages: MessageList, **kwargs) -> str:
6263
"""Generate a response from OpenAI LLM, optionally overriding generation params."""
6364
temperature = kwargs.get("temperature", self.config.temperature)
6465
max_tokens = kwargs.get("max_tokens", self.config.max_tokens)
6566
top_p = kwargs.get("top_p", self.config.top_p)
67+
start_time = time.time()
68+
logger.info(f"openai model request start, model_name: {self.config.model_name_or_path}")
69+
6670
response = self.client.chat.completions.create(
6771
model=self.config.model_name_or_path,
6872
messages=messages,
@@ -71,7 +75,11 @@ def generate(self, messages: MessageList, **kwargs) -> str:
7175
max_tokens=max_tokens,
7276
top_p=top_p,
7377
)
74-
logger.info(f"Response from OpenAI: {response.model_dump_json()}")
78+
79+
end_time = time.time()
80+
logger.info(
81+
f"openai model request end, time_cost: {end_time - start_time:.0f} ms, response from OpenAI: {response.model_dump_json()}"
82+
)
7583
response_content = response.choices[0].message.content
7684
if self.config.remove_think_prefix:
7785
return remove_thinking_tags(response_content)

src/memos/reranker/http_bge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def __init__(
119119
self.warn_unknown_filter_keys = bool(warn_unknown_filter_keys)
120120
self._warned_missing_keys: set[str] = set()
121121

122-
@timed(log=True, log_prefix="RerankerAPI")
122+
@timed(log=True, log_prefix="model_timed_rerank")
123123
def rerank(
124124
self,
125125
query: str,

src/memos/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,9 @@ def wrapper(*args, **kwargs):
1717
start = time.perf_counter()
1818
result = fn(*args, **kwargs)
1919
elapsed = time.perf_counter() - start
20+
elapsed_ms = elapsed * 1000.0
2021
if log:
21-
logger.info(f"[TIMER] {log_prefix or fn.__name__} took {elapsed:.2f} seconds")
22+
logger.info(f"[TIMER] {log_prefix or fn.__name__} took {elapsed_ms:.0f} ms")
2223
return result
2324

2425
return wrapper

0 commit comments

Comments
 (0)