Skip to content

Commit 4e8084a

Browse files
authored
Add basic logging capabilities (#18)
* wip add some basic logging capabilities * Use rich for logging * Do more to prevent logs from being dropped when a Live() display is active * Throw error instead of warn if tool parameters aren't valid JSON * Update readme
1 parent 3b7ccea commit 4e8084a

File tree

12 files changed

+138
-48
lines changed

12 files changed

+138
-48
lines changed

README.md

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -250,13 +250,12 @@ If the problem isn't self-evident, you can also reach into the `.last_turn()`, w
250250
</div>
251251

252252

253-
254-
255-
Also, since `chatlas` builds on top of packages like `anthropic` and `openai`, you can also enable their debug logging to get even more detailed information about what's going on under the hood:
253+
For monitoring issues in a production (or otherwise non-interactive) environment, you may want to enabling logging. Also, since `chatlas` builds on top of packages like `anthropic` and `openai`, you can also enable their debug logging to get lower-level information, like HTTP requests and response codes.
256254

257255
```shell
258-
$ export ANTHROPIC_LOG=info
256+
$ export CHATLAS_LOG=info
259257
$ export OPENAI_LOG=info
258+
$ export ANTHROPIC_LOG=info
260259
```
261260

262261
### Next steps

chatlas/_anthropic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616
ContentToolRequest,
1717
ContentToolResult,
1818
)
19+
from ._logging import log_model_default
1920
from ._provider import Provider
2021
from ._tokens import tokens_log
2122
from ._tools import Tool, basemodel_to_param_schema
2223
from ._turn import Turn, normalize_turns
23-
from ._utils import log_model_default
2424

2525
if TYPE_CHECKING:
2626
from anthropic.types import (

chatlas/_chat.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
MarkdownDisplay,
3535
MockMarkdownDisplay,
3636
)
37+
from ._logging import log_tool_error
3738
from ._provider import Provider
3839
from ._tools import Tool
3940
from ._turn import Turn, user_turn
@@ -1035,6 +1036,7 @@ def _invoke_tool(
10351036

10361037
return ContentToolResult(id_, result, None)
10371038
except Exception as e:
1039+
log_tool_error(func.__name__, str(arguments), e)
10381040
return ContentToolResult(id_, None, str(e))
10391041

10401042
@staticmethod
@@ -1054,6 +1056,7 @@ async def _invoke_tool_async(
10541056

10551057
return ContentToolResult(id_, result, None)
10561058
except Exception as e:
1059+
log_tool_error(func.__name__, str(arguments), e)
10571060
return ContentToolResult(id_, None, str(e))
10581061

10591062
def _markdown_display(

chatlas/_display.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
1+
import logging
12
from abc import ABC, abstractmethod
23
from typing import Any
34
from uuid import uuid4
45

56
from rich.live import Live
7+
from rich.logging import RichHandler
68

9+
from ._logging import logger
710
from ._typing_extensions import TypedDict
811

912

@@ -64,6 +67,15 @@ def update(self, content: str):
6467

6568
def __enter__(self):
6669
self.live.__enter__()
70+
# Live() isn't smart enough to know to automatically display logs when
71+
# when they get handled while it Live() is active.
72+
# However, if the logging handler is a RichHandler, it can be told
73+
# about the live console so it can add logs to the top of the Live console.
74+
handlers = [*logging.getLogger().handlers, *logger.handlers]
75+
for h in handlers:
76+
if isinstance(h, RichHandler):
77+
h.console = self.live.console
78+
6779
return self
6880

6981
def __exit__(self, exc_type, exc_value, traceback):

chatlas/_github.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
from typing import TYPE_CHECKING, Optional
55

66
from ._chat import Chat
7+
from ._logging import log_model_default
78
from ._openai import ChatOpenAI
89
from ._turn import Turn
9-
from ._utils import MISSING, MISSING_TYPE, log_model_default
10+
from ._utils import MISSING, MISSING_TYPE
1011

1112
if TYPE_CHECKING:
1213
from ._openai import ChatCompletion

chatlas/_google.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
ContentToolRequest,
1616
ContentToolResult,
1717
)
18+
from ._logging import log_model_default
1819
from ._provider import Provider
1920
from ._tools import Tool, basemodel_to_param_schema
2021
from ._turn import Turn, normalize_turns
21-
from ._utils import log_model_default
2222

2323
if TYPE_CHECKING:
2424
from google.generativeai.types.content_types import (

chatlas/_groq.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
from typing import TYPE_CHECKING, Optional
55

66
from ._chat import Chat
7+
from ._logging import log_model_default
78
from ._openai import ChatOpenAI
89
from ._turn import Turn
9-
from ._utils import MISSING, MISSING_TYPE, log_model_default
10+
from ._utils import MISSING, MISSING_TYPE
1011

1112
if TYPE_CHECKING:
1213
from ._openai import ChatCompletion

chatlas/_logging.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import logging
2+
import os
3+
import warnings
4+
5+
from rich.logging import RichHandler
6+
7+
8+
def _rich_handler() -> RichHandler:
9+
formatter = logging.Formatter("%(name)s - %(message)s")
10+
handler = RichHandler()
11+
handler.setFormatter(formatter)
12+
return handler
13+
14+
15+
logger = logging.getLogger("chatlas")
16+
17+
if os.environ.get("CHATLAS_LOG") == "info":
18+
# By adding a RichHandler to chatlas' logger, we can guarantee that they
19+
# never get dropped, even if the root logger's handlers are not
20+
# RichHandlers.
21+
logger.setLevel(logging.INFO)
22+
logger.addHandler(_rich_handler())
23+
logger.propagate = False
24+
25+
# Add a RichHandler to the root logger if there are no handlers. Note that
26+
# if chatlas is imported before other libraries that set up logging, (like
27+
# openai, anthropic, or httpx), this will ensure that logs from those
28+
# libraries are also displayed in the rich console.
29+
root = logging.getLogger()
30+
if not root.handlers:
31+
root.addHandler(_rich_handler())
32+
33+
# Warn if there are non-RichHandler handlers on the root logger.
34+
# TODO: we could consider something a bit more abusive here, like removing
35+
# non-RichHandler handlers from the root logger, but that could be
36+
# surprising to users.
37+
bad_handlers = [
38+
h.get_name() for h in root.handlers if not isinstance(h, RichHandler)
39+
]
40+
if len(bad_handlers) > 0:
41+
warnings.warn(
42+
"When setting up logging handlers for CHATLAS_LOG, chatlas detected "
43+
f"non-rich handler(s) on the root logger named {bad_handlers}. "
44+
"As a result, logs handled those handlers may be dropped when the "
45+
"`echo` argument of `.chat()`, `.stream()`, etc., is something "
46+
"other than 'none'. This problem can likely be fixed by importing "
47+
"`chatlas` before other libraries that set up logging, or adding a "
48+
"RichHandler to the root logger before loading other libraries.",
49+
)
50+
51+
52+
def log_model_default(model: str) -> str:
53+
logger.info(f"Defaulting to `model = '{model}'`.")
54+
return model
55+
56+
57+
def log_tool_error(name: str, arguments: str, e: Exception):
58+
logger.info(
59+
f"Error invoking tool function '{name}' with arguments: {arguments}. "
60+
f"The error message is: '{e}'",
61+
)

chatlas/_openai.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from __future__ import annotations
22

33
import json
4-
import warnings
54
from typing import TYPE_CHECKING, Any, Literal, Optional, cast, overload
65

76
from pydantic import BaseModel
@@ -16,12 +15,13 @@
1615
ContentToolRequest,
1716
ContentToolResult,
1817
)
18+
from ._logging import log_model_default
1919
from ._merge import merge_dicts
2020
from ._provider import Provider
2121
from ._tokens import tokens_log
2222
from ._tools import Tool, basemodel_to_param_schema
2323
from ._turn import Turn, normalize_turns
24-
from ._utils import MISSING, MISSING_TYPE, is_testing, log_model_default
24+
from ._utils import MISSING, MISSING_TYPE, is_testing
2525

2626
if TYPE_CHECKING:
2727
from openai.types.chat import (
@@ -473,11 +473,12 @@ def _as_turn(
473473
try:
474474
args = json.loads(func.arguments) if func.arguments else {}
475475
except json.JSONDecodeError:
476-
warnings.warn(
476+
raise ValueError(
477477
f"The model's completion included a tool request ({func.name}) "
478-
"with invalid JSON for input arguments: '{func.arguments}'",
479-
InvalidJSONParameterWarning,
480-
stacklevel=2,
478+
"with invalid JSON for input arguments: '{func.arguments}'"
479+
"This can happen if the model hallucinates parameters not defined by "
480+
"your function schema. Try revising your tool description and system "
481+
"prompt to be more specific about the expected input arguments to this function."
481482
)
482483

483484
contents.append(

chatlas/_perplexity.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
from typing import TYPE_CHECKING, Optional
55

66
from ._chat import Chat
7+
from ._logging import log_model_default
78
from ._openai import ChatOpenAI
89
from ._turn import Turn
9-
from ._utils import MISSING, MISSING_TYPE, log_model_default
10+
from ._utils import MISSING, MISSING_TYPE
1011

1112
if TYPE_CHECKING:
1213
from ._openai import ChatCompletion

0 commit comments

Comments
 (0)