Skip to content

Commit b44c387

Browse files
alexmojakiCopilot
andauthored
Add experimental logfire.instrument_litellm() (#1237)
Co-authored-by: Copilot <[email protected]>
1 parent 3c0560b commit b44c387

File tree

10 files changed

+853
-9
lines changed

10 files changed

+853
-9
lines changed

logfire-api/logfire_api/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,8 @@ def instrument_openai_agents(self, *args, **kwargs) -> None: ...
148148

149149
def instrument_google_genai(self, *args, **kwargs) -> None: ...
150150

151+
def instrument_litellm(self, *args, **kwargs) -> None: ...
152+
151153
def instrument_aiohttp_client(self, *args, **kwargs) -> None: ...
152154

153155
def instrument_aiohttp_server(self, *args, **kwargs) -> None: ...
@@ -185,6 +187,7 @@ def shutdown(self, *args, **kwargs) -> None: ...
185187
instrument_openai_agents = DEFAULT_LOGFIRE_INSTANCE.instrument_openai_agents
186188
instrument_anthropic = DEFAULT_LOGFIRE_INSTANCE.instrument_anthropic
187189
instrument_google_genai = DEFAULT_LOGFIRE_INSTANCE.instrument_google_genai
190+
instrument_litellm = DEFAULT_LOGFIRE_INSTANCE.instrument_litellm
188191
instrument_asyncpg = DEFAULT_LOGFIRE_INSTANCE.instrument_asyncpg
189192
instrument_celery = DEFAULT_LOGFIRE_INSTANCE.instrument_celery
190193
instrument_httpx = DEFAULT_LOGFIRE_INSTANCE.instrument_httpx

logfire/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
instrument_openai_agents = DEFAULT_LOGFIRE_INSTANCE.instrument_openai_agents
3636
instrument_anthropic = DEFAULT_LOGFIRE_INSTANCE.instrument_anthropic
3737
instrument_google_genai = DEFAULT_LOGFIRE_INSTANCE.instrument_google_genai
38+
instrument_litellm = DEFAULT_LOGFIRE_INSTANCE.instrument_litellm
3839
instrument_asyncpg = DEFAULT_LOGFIRE_INSTANCE.instrument_asyncpg
3940
instrument_httpx = DEFAULT_LOGFIRE_INSTANCE.instrument_httpx
4041
instrument_celery = DEFAULT_LOGFIRE_INSTANCE.instrument_celery
@@ -127,6 +128,7 @@ def loguru_handler() -> Any:
127128
'instrument_openai_agents',
128129
'instrument_anthropic',
129130
'instrument_google_genai',
131+
'instrument_litellm',
130132
'instrument_asyncpg',
131133
'instrument_httpx',
132134
'instrument_celery',

logfire/_internal/exporters/processor_wrapper.py

Lines changed: 58 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import json
44
from contextlib import suppress
55
from dataclasses import dataclass
6-
from typing import Any
6+
from typing import Any, cast
77
from urllib.parse import parse_qs, urlparse
88

99
from opentelemetry import context
@@ -18,6 +18,7 @@
1818
ATTRIBUTES_LOG_LEVEL_NUM_KEY,
1919
ATTRIBUTES_MESSAGE_KEY,
2020
ATTRIBUTES_MESSAGE_TEMPLATE_KEY,
21+
ATTRIBUTES_TAGS_KEY,
2122
LEVEL_NUMBERS,
2223
log_level_attributes,
2324
)
@@ -83,6 +84,7 @@ def on_end(self, span: ReadableSpan) -> None:
8384
_transform_langchain_span(span_dict)
8485
_transform_google_genai_span(span_dict)
8586
_default_gen_ai_response_model(span_dict)
87+
_transform_litellm_span(span_dict)
8688
self.scrubber.scrub_span(span_dict)
8789
span = ReadableSpan(**span_dict)
8890
super().on_end(span)
@@ -443,3 +445,58 @@ def _transform_google_genai_span(span: ReadableSpanDict):
443445
ATTRIBUTES_JSON_SCHEMA_KEY: attributes_json_schema(JsonSchemaProperties({'events': {'type': 'array'}})),
444446
}
445447
span['events'] = new_events
448+
449+
450+
def _transform_litellm_span(span: ReadableSpanDict):
451+
scope = span['instrumentation_scope']
452+
if not (scope and scope.name == 'openinference.instrumentation.litellm'):
453+
return
454+
455+
attributes = span['attributes']
456+
try:
457+
output = json.loads(cast(str, attributes['output.value']))
458+
new_attrs = {
459+
'request_data': attributes['input.value'],
460+
'response_data': json.dumps({'message': output['choices'][0]['message']}),
461+
}
462+
except Exception: # pragma: no cover
463+
return
464+
465+
try:
466+
new_attrs.update(
467+
{
468+
'gen_ai.request.model': attributes['llm.model_name'],
469+
'gen_ai.response.model': output['model'],
470+
'gen_ai.usage.input_tokens': output['usage']['prompt_tokens'],
471+
'gen_ai.usage.output_tokens': output['usage']['completion_tokens'],
472+
'gen_ai.system': guess_system(output['model']),
473+
}
474+
)
475+
except Exception: # pragma: no cover
476+
pass
477+
478+
span['attributes'] = {
479+
**attributes,
480+
**new_attrs,
481+
ATTRIBUTES_TAGS_KEY: ['LLM'],
482+
ATTRIBUTES_JSON_SCHEMA_KEY: attributes_json_schema(
483+
JsonSchemaProperties(
484+
{
485+
'request_data': {'type': 'object'},
486+
'response_data': {'type': 'object'},
487+
}
488+
)
489+
),
490+
}
491+
492+
493+
def guess_system(model: str):
494+
model_lower = model.lower()
495+
if 'openai' in model_lower or 'gpt-4' in model_lower or 'gpt-3.5' in model_lower:
496+
return 'openai'
497+
elif 'google' in model_lower or 'gemini' in model_lower:
498+
return 'google'
499+
elif 'anthropic' in model_lower or 'claude' in model_lower:
500+
return 'anthropic'
501+
else:
502+
return 'litellm'
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
from typing import Any
2+
3+
from openinference.instrumentation.litellm import LiteLLMInstrumentor
4+
5+
import logfire
6+
7+
8+
def instrument_litellm(logfire_instance: logfire.Logfire, **kwargs: Any):
9+
LiteLLMInstrumentor().instrument(
10+
tracer_provider=logfire_instance.config.get_tracer_provider(),
11+
**kwargs,
12+
)

logfire/_internal/main.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1270,6 +1270,12 @@ def instrument_google_genai(self):
12701270
self._warn_if_not_initialized_for_instrumentation()
12711271
instrument_google_genai(self)
12721272

1273+
def instrument_litellm(self, **kwargs: Any):
1274+
from .integrations.litellm import instrument_litellm
1275+
1276+
self._warn_if_not_initialized_for_instrumentation()
1277+
instrument_litellm(self, **kwargs)
1278+
12731279
def instrument_asyncpg(self, **kwargs: Any) -> None:
12741280
"""Instrument the `asyncpg` module so that spans are automatically created for each query."""
12751281
from .integrations.asyncpg import instrument_asyncpg

pyproject.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,8 @@ dev = [
176176
"langgraph >= 0",
177177
"opentelemetry-instrumentation-google-genai >= 0",
178178
"google-genai >= 0",
179+
"openinference-instrumentation-litellm >= 0",
180+
"litellm >= 0",
179181
]
180182
docs = [
181183
"black>=23.12.0",
Lines changed: 205 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
interactions:
2+
- request:
3+
body: '{"messages":[{"role":"user","content":"What''s the weather like in San
4+
Francisco?"}],"model":"gpt-4o-mini","tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get
5+
the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The
6+
city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}]}'
7+
headers:
8+
accept:
9+
- application/json
10+
accept-encoding:
11+
- gzip, deflate, zstd
12+
connection:
13+
- keep-alive
14+
content-length:
15+
- '453'
16+
content-type:
17+
- application/json
18+
host:
19+
- api.openai.com
20+
user-agent:
21+
- OpenAI/Python 1.93.1
22+
x-stainless-arch:
23+
- arm64
24+
x-stainless-async:
25+
- 'false'
26+
x-stainless-lang:
27+
- python
28+
x-stainless-os:
29+
- MacOS
30+
x-stainless-package-version:
31+
- 1.93.1
32+
x-stainless-raw-response:
33+
- 'true'
34+
x-stainless-read-timeout:
35+
- '600.0'
36+
x-stainless-retry-count:
37+
- '0'
38+
x-stainless-runtime:
39+
- CPython
40+
x-stainless-runtime-version:
41+
- 3.12.6
42+
method: POST
43+
uri: https://api.openai.com/v1/chat/completions
44+
response:
45+
body:
46+
string: !!binary |
47+
H4sIAAAAAAAAA4xT227bMAx991cIfI6HOE1z8VtWtGiHFbsUSIEthcHItK1NljxJ7pYG+ffBSmI7
48+
aQfMD4bAo3NIHlLbgDEQKcQMeIGOl5UM35sR8Zd6sfws9POX5cdPXF+L6a/1rVabbzBoGHr9g7g7
49+
st5xXVaSnNBqD3ND6KhRjaaX0XwWjWYTD5Q6JdnQ8sqFYx2WQolwNByNw+E0jGYHdqEFJwsx+x4w
50+
xtjW/5s6VUp/IGbDwTFSkrWYE8TtJcbAaNlEAK0V1qFyMOhArpUj1ZSuail7gNNaJhyl7BLvv23v
51+
3JmFUiYPjzd3j0V2dTdZ0G2Ny8n1fXT/1X7o5dtLbypfUFYr3prUw9t4fJaMMVBYem5OLuG1MaRc
52+
8pvQFWTOZBgDNHldknJNC7BdgdQcG+EVxCt4QMVuDCouLNcDdrVYwQ5OFHbBW+ennkmGstqifO0e
53+
KqWdz+Xtezogu3ZSUueV0Wt7RoVMKGGLxBBab0B/DsGxEF8C1CejhsrosnKJ0z/JJ50d1gK6bezA
54+
aHYAnXYou/j8GD9RS1JyKPwmtMvHkReUdsxuCbFOhe4BQa/z18W8pb3vXqj8f+Q7gHOqHKVJZSgV
55+
/LTh7pqh5q3+61rrsS8YLJlnwSlxgkwzjZQyrOX+BYHdWEdlkgmVk6mM8M8Isiq5GOPlGGl+wSHY
56+
BX8BAAD//wMAg8CtdlQEAAA=
57+
headers:
58+
CF-RAY:
59+
- 95bfec41cac93ea6-CPT
60+
Connection:
61+
- keep-alive
62+
Content-Encoding:
63+
- gzip
64+
Content-Type:
65+
- application/json
66+
Date:
67+
- Tue, 08 Jul 2025 13:28:07 GMT
68+
Server:
69+
- cloudflare
70+
Set-Cookie:
71+
- __cf_bm=WDKJW8lhFRVuz5bUhutK7ucHJ9lQB5w4NW96OOf.t8I-1751981287-1.0.1.1-nO7HS1sYueZMrObI9vX6K9j0Y8rK2uZWjvjVKtgHbnC7bQLA4oZt7RuX0qJTTNh0FDmx9SexoHPr5noSsLec1U_O_miEeIgbCJCXaVlGgwo;
72+
path=/; expires=Tue, 08-Jul-25 13:58:07 GMT; domain=.api.openai.com; HttpOnly;
73+
Secure; SameSite=None
74+
- _cfuvid=9BG.keq4TGsu2dHVmb003JkWJfCxHwGPhOcH5lazT2s-1751981287838-0.0.1.1-604800000;
75+
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
76+
Transfer-Encoding:
77+
- chunked
78+
X-Content-Type-Options:
79+
- nosniff
80+
access-control-expose-headers:
81+
- X-Request-ID
82+
alt-svc:
83+
- h3=":443"; ma=86400
84+
cf-cache-status:
85+
- DYNAMIC
86+
openai-organization:
87+
- pydantic-28gund
88+
openai-processing-ms:
89+
- '765'
90+
openai-version:
91+
- '2020-10-01'
92+
strict-transport-security:
93+
- max-age=31536000; includeSubDomains; preload
94+
x-envoy-upstream-service-time:
95+
- '770'
96+
x-ratelimit-limit-requests:
97+
- '10000'
98+
x-ratelimit-limit-tokens:
99+
- '10000000'
100+
x-ratelimit-remaining-requests:
101+
- '9999'
102+
x-ratelimit-remaining-tokens:
103+
- '9999986'
104+
x-ratelimit-reset-requests:
105+
- 6ms
106+
x-ratelimit-reset-tokens:
107+
- 0s
108+
x-request-id:
109+
- req_770abb3e3421437d2e4cf1c118192dac
110+
status:
111+
code: 200
112+
message: OK
113+
- request:
114+
body: '{"messages":[{"role":"user","content":"What''s the weather like in San
115+
Francisco?"},{"role":"assistant","tool_calls":[{"function":{"arguments":"{\"location\":\"San
116+
Francisco, CA\"}","name":"get_current_weather"},"id":"call_SWFIWhfCI6AeHuaV6EM1MRsJ","type":"function"}],"annotations":[]},{"tool_call_id":"call_SWFIWhfCI6AeHuaV6EM1MRsJ","role":"tool","name":"get_current_weather","content":"{\"location\":
117+
\"San Francisco\", \"temperature\": \"72\", \"unit\": \"fahrenheit\"}"}],"model":"gpt-4o-mini"}'
118+
headers:
119+
accept:
120+
- application/json
121+
accept-encoding:
122+
- gzip, deflate, zstd
123+
connection:
124+
- keep-alive
125+
content-length:
126+
- '498'
127+
content-type:
128+
- application/json
129+
host:
130+
- api.openai.com
131+
user-agent:
132+
- OpenAI/Python 1.93.1
133+
x-stainless-arch:
134+
- arm64
135+
x-stainless-async:
136+
- 'false'
137+
x-stainless-lang:
138+
- python
139+
x-stainless-os:
140+
- MacOS
141+
x-stainless-package-version:
142+
- 1.93.1
143+
x-stainless-raw-response:
144+
- 'true'
145+
x-stainless-read-timeout:
146+
- '600.0'
147+
x-stainless-retry-count:
148+
- '0'
149+
x-stainless-runtime:
150+
- CPython
151+
x-stainless-runtime-version:
152+
- 3.12.6
153+
method: POST
154+
uri: https://api.openai.com/v1/chat/completions
155+
response:
156+
body:
157+
string: !!binary |
158+
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrK7Y8W4Et2/Hj2IOLHIIAbYEWKAKBplbWxhRJLFdOg8D/
159+
1G/olxWSHMtJU6AXHnZ2hrsz+5wAKCrUGpSptJg62PQjZ4ifbP2Zvs/p9uFus1p84/1heiuHrVWj
160+
luG3D2jkhXVlfB0sCnnXw4ZRC7aqk8V8slpOsuWyA2pfoG1puyDpzKc1OUqzcTZLx4t0sjyxK08G
161+
o1rDjwQA4Ll72zldgT/VGsajl0qNMeodqvW5CUCxt21F6RgpinaiRgNovBN03ehfKwTTMKMTEKwD
162+
spaGEcjBF+1gw9oZisYDRVhkv39truCmhCffgEMsoPaMEAMaKsnAI2qpkKFA0WQjeAYNpWc0OsoI
163+
LArUCHvnHz9cjsNYNlG3lrjG2gtAO+dFt5Z2RtyfkON5det3gf02vqGqkhzFKmfU0bt2zSg+qA49
164+
JgD3ncXNK9dUYF8HycXvsfvuOuvl1BDsAGbXJ1C8aDvU+4jfquUnQy4iUkabCouBOeSpm4L8BZBc
165+
7Pz3MO9p93uT2/2P/AAYg0GwyANjQeb1wkMbY3v2/2o7e9wNrCLygQzmQshtDgWWurH9Mar4FAXr
166+
vCS3Qw5M/UWWIZ/O9HymcTU1KjkmfwAAAP//AwAEgtxMnwMAAA==
167+
headers:
168+
CF-RAY:
169+
- 95bfec492c863ea6-CPT
170+
Connection:
171+
- keep-alive
172+
Content-Encoding:
173+
- gzip
174+
Content-Type:
175+
- application/json
176+
Date:
177+
- Tue, 08 Jul 2025 13:28:08 GMT
178+
Server:
179+
- cloudflare
180+
Transfer-Encoding:
181+
- chunked
182+
X-Content-Type-Options:
183+
- nosniff
184+
access-control-expose-headers:
185+
- X-Request-ID
186+
alt-svc:
187+
- h3=":443"; ma=86400
188+
cf-cache-status:
189+
- DYNAMIC
190+
openai-organization:
191+
- pydantic-28gund
192+
openai-processing-ms:
193+
- '634'
194+
openai-version:
195+
- '2020-10-01'
196+
strict-transport-security:
197+
- max-age=31536000; includeSubDomains; preload
198+
x-envoy-upstream-service-time:
199+
- '664'
200+
x-request-id:
201+
- req_dccddbdd89567dc6a98575f6c472e82a
202+
status:
203+
code: 200
204+
message: OK
205+
version: 1

0 commit comments

Comments
 (0)