Skip to content

Commit 33a4502

Browse files
committed
wip add some basic logging capabilities
1 parent 3b7ccea commit 33a4502

File tree

10 files changed

+86
-39
lines changed

10 files changed

+86
-39
lines changed

chatlas/_anthropic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616
ContentToolRequest,
1717
ContentToolResult,
1818
)
19+
from ._logging import log_model_default
1920
from ._provider import Provider
2021
from ._tokens import tokens_log
2122
from ._tools import Tool, basemodel_to_param_schema
2223
from ._turn import Turn, normalize_turns
23-
from ._utils import log_model_default
2424

2525
if TYPE_CHECKING:
2626
from anthropic.types import (

chatlas/_chat.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
MarkdownDisplay,
3535
MockMarkdownDisplay,
3636
)
37+
from ._logging import log_tool_error
3738
from ._provider import Provider
3839
from ._tools import Tool
3940
from ._turn import Turn, user_turn
@@ -1035,6 +1036,7 @@ def _invoke_tool(
10351036

10361037
return ContentToolResult(id_, result, None)
10371038
except Exception as e:
1039+
log_tool_error(func.__name__, str(arguments), e)
10381040
return ContentToolResult(id_, None, str(e))
10391041

10401042
@staticmethod
@@ -1054,6 +1056,7 @@ async def _invoke_tool_async(
10541056

10551057
return ContentToolResult(id_, result, None)
10561058
except Exception as e:
1059+
log_tool_error(func.__name__, str(arguments), e)
10571060
return ContentToolResult(id_, None, str(e))
10581061

10591062
def _markdown_display(

chatlas/_github.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
from typing import TYPE_CHECKING, Optional
55

66
from ._chat import Chat
7+
from ._logging import log_model_default
78
from ._openai import ChatOpenAI
89
from ._turn import Turn
9-
from ._utils import MISSING, MISSING_TYPE, log_model_default
10+
from ._utils import MISSING, MISSING_TYPE
1011

1112
if TYPE_CHECKING:
1213
from ._openai import ChatCompletion

chatlas/_google.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
ContentToolRequest,
1616
ContentToolResult,
1717
)
18+
from ._logging import log_model_default
1819
from ._provider import Provider
1920
from ._tools import Tool, basemodel_to_param_schema
2021
from ._turn import Turn, normalize_turns
21-
from ._utils import log_model_default
2222

2323
if TYPE_CHECKING:
2424
from google.generativeai.types.content_types import (

chatlas/_groq.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
from typing import TYPE_CHECKING, Optional
55

66
from ._chat import Chat
7+
from ._logging import log_model_default
78
from ._openai import ChatOpenAI
89
from ._turn import Turn
9-
from ._utils import MISSING, MISSING_TYPE, log_model_default
10+
from ._utils import MISSING, MISSING_TYPE
1011

1112
if TYPE_CHECKING:
1213
from ._openai import ChatCompletion

chatlas/_logging.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import logging
2+
import os
3+
4+
logger = logging.getLogger("chatlas")
5+
if len(logger.handlers) == 0:
6+
logger.addHandler(logging.NullHandler())
7+
8+
if os.getenv("CHATLAS_LOG", "").lower() == "info":
9+
print("Setting log level to INFO")
10+
formatter = logging.Formatter("%(asctime)s %(levelname)s - %(name)s - %(message)s")
11+
handler = logging.FileHandler("chatlas.log")
12+
handler.setFormatter(formatter)
13+
handler.setLevel(logging.INFO)
14+
logger.addHandler(handler)
15+
logger.setLevel(logging.INFO)
16+
17+
18+
def log_model_default(model: str) -> str:
19+
logger.info(f"Defaulting to `model = '{model}'`.")
20+
return model
21+
22+
23+
def log_tool_error(name: str, arguments: str, e: Exception):
24+
logger.info(
25+
f"Error invoking tool function '{name}' with arguments: {arguments}. "
26+
f"The error message is: '{e}'",
27+
)
28+
29+

chatlas/_openai.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,13 @@
1616
ContentToolRequest,
1717
ContentToolResult,
1818
)
19+
from ._logging import log_model_default
1920
from ._merge import merge_dicts
2021
from ._provider import Provider
2122
from ._tokens import tokens_log
2223
from ._tools import Tool, basemodel_to_param_schema
2324
from ._turn import Turn, normalize_turns
24-
from ._utils import MISSING, MISSING_TYPE, is_testing, log_model_default
25+
from ._utils import MISSING, MISSING_TYPE, is_testing
2526

2627
if TYPE_CHECKING:
2728
from openai.types.chat import (

chatlas/_perplexity.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
from typing import TYPE_CHECKING, Optional
55

66
from ._chat import Chat
7+
from ._logging import log_model_default
78
from ._openai import ChatOpenAI
89
from ._turn import Turn
9-
from ._utils import MISSING, MISSING_TYPE, log_model_default
10+
from ._utils import MISSING, MISSING_TYPE
1011

1112
if TYPE_CHECKING:
1213
from ._openai import ChatCompletion

chatlas/_utils.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,12 @@
22

33
import functools
44
import inspect
5-
import logging
65
import os
76
import re
87
from typing import Awaitable, Callable, TypeVar, cast
98

109
from ._typing_extensions import ParamSpec, TypeGuard
1110

12-
logger = logging.getLogger("chatlas")
13-
1411
# --------------------------------------------------------------------
1512
# wrap_async() and is_async_callable() was copied from shiny/_utils.py
1613
# --------------------------------------------------------------------
@@ -80,11 +77,6 @@ class MISSING_TYPE:
8077
MISSING = MISSING_TYPE()
8178

8279

83-
def log_model_default(model: str) -> str:
84-
logger.info(f"Defaulting to `model = '{model}'`.")
85-
return model
86-
87-
8880
# --------------------------------------------------------------------
8981
# html_escape was copied from htmltools/_utils.py
9082
# --------------------------------------------------------------------

docs/tool-calling.qmd

Lines changed: 44 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -20,57 +20,58 @@ from chatlas import ChatOpenAI
2020

2121
### Motivating example
2222

23-
Let's take a look at an example where we really need an external tool. Chat models generally do not know the current time, which makes questions like these impossible.
23+
Let's take a look at an example where we really need an external tool. Chat models generally do not have access to "real-time" information, such as current events, weather, etc. Let's see what happens when we ask the chat model about the weather in a specific location:
2424

2525
```{python}
26-
chat = ChatOpenAI(model="gpt-4o")
27-
_ = chat.chat("How long ago exactly was the moment Neil Armstrong touched down on the moon?")
26+
chat = ChatOpenAI(model="gpt-4o-mini")
27+
_ = chat.chat("What's the weather like today in Duluth, MN?")
2828
```
29+
2930

30-
Unfortunately, the LLM doesn't hallucinates the current date. Let's give the chat model the ability to determine the current time and try again.
31+
Fortunately, the model is smart enough to know that it doesn't have access to real-time information, and it doesn't try to make up an answer. However, we can help it out by providing a tool that can fetch the weather for a given location.
3132

3233
### Defining a tool function
3334

34-
The first thing we'll do is define a Python function that returns the current time. This will be our tool.
35+
At it turns out, LLMs are pretty good at figuring out 'structure' like latitude and longitude from 'unstructured' things like a location name. So we can write a tool function that takes a latitude and longitude and returns the current temperature at that location. Here's an example of how you might write such a function using the [Open-Meteo API](https://open-meteo.com/):
3536

3637
```{python}
37-
def get_current_time(tz: str = "UTC") -> str:
38+
import requests
39+
40+
def get_current_temperature(latitude: float, longitude: float):
3841
"""
39-
Gets the current time in the given time zone.
42+
Get the current weather given a latitude and longitude.
4043
4144
Parameters
4245
----------
43-
tz
44-
The time zone to get the current time in. Defaults to "UTC".
45-
46-
Returns
47-
-------
48-
str
49-
The current time in the given time zone.
46+
latitude
47+
The latitude of the location.
48+
longitude
49+
The longitude of the location.
5050
"""
51-
from datetime import datetime
52-
from zoneinfo import ZoneInfo
53-
54-
return datetime.now(ZoneInfo(tz)).strftime("%Y-%m-%d %H:%M:%S %Z")
51+
lat_lng = f"latitude={latitude}&longitude={longitude}"
52+
url = f"https://api.open-meteo.com/v1/forecast?{lat_lng}&current=temperature_2m,wind_speed_10m&hourly=temperature_2m,relative_humidity_2m,wind_speed_10m"
53+
response = requests.get(url)
54+
json = response.json()
55+
return json["current"]
5556
```
5657

5758
Note that we've gone through the trouble of adding the following to our function:
5859

59-
- Type hints for arguments and the return value
60-
- A docstring that explains what the function does and what arguments it expects
60+
- Type hints for function arguments
61+
- A docstring that explains what the function does and what arguments it expects (as well as descriptions for the arguments themselves)
6162

62-
**Providing these hints and context is very important**, as it helps the chat model understand how to use your tool correctly!
63+
**Providing these hints and documentation is very important**, as it helps the chat model understand how to use your tool correctly!
6364

6465
Let's test it:
6566

6667
```{python}
67-
get_current_time()
68+
get_current_temperature(46.7867, -92.1005)
6869
```
6970

7071

7172
### Using the tool
7273

73-
In order for the LLM to make use of our tool, we need to register it with the chat object. This is done by calling the `register_tool` method on the chat object.
74+
In order for the LLM to make use of our tool, we need to register it with the `chat` object. This is done by calling the `register_tool` method on the chat object.
7475

7576
```{python}
7677
chat.register_tool(get_current_time)
@@ -79,12 +80,30 @@ chat.register_tool(get_current_time)
7980
Now let's retry our original question:
8081

8182
```{python}
82-
_ = chat.chat("How long ago exactly was the moment Neil Armstrong touched down on the moon?")
83+
_ = chat.chat("What's the weather like today in Duluth, MN?")
8384
```
8485

8586
That's correct! Without any further guidance, the chat model decided to call our tool function and successfully used its result in formulating its response.
8687

87-
This tool example was extremely simple, but you can imagine doing much more interesting things from tool functions: calling APIs, reading from or writing to a database, kicking off a complex simulation, or even calling a complementary GenAI model (like an image generator). Or if you are using chatlas in a Shiny app, you could use tools to set reactive values, setting off a chain of reactive updates.
88+
This tool example was extremely simple, but you can imagine doing much more interesting things from tool functions: calling APIs, reading from or writing to a database, kicking off a complex simulation, or even calling a complementary GenAI model (like an image generator). Or if you are using chatlas in a Shiny app, you could use tools to set reactive values, setting off a chain of reactive updates. This is precisely what the [sidebot dashboard](https://github.com/jcheng5/py-sidebot) does to allow for an AI assisted "drill-down" into the data.
89+
90+
### Trouble-shooting
91+
92+
When the execution of a tool function fails, chatlas sends the exception message back to the chat model. This can be useful for gracefully handling errors in the chat model. However, this can also lead to confusion as to why a response did not come back as expected. If you encounter such a situation, you can set `echo="all"` in the `chat.chat()` method to see the full conversation, including tool calls and their results.
93+
94+
```{python}
95+
def get_current_temperature(latitude: float, longitude: float):
96+
"Get the current weather given a latitude and longitude."
97+
raise ValueError("Failed to get current temperature")
98+
99+
chat.tools = [get_current_temperature]
100+
101+
_ = chat.chat("What's the weather like today in Duluth, MN?")
102+
```
103+
104+
105+
106+
88107

89108
### Tool limitations
90109

0 commit comments

Comments
 (0)