Skip to content

Commit 13e4f86

Browse files
authored
Merge pull request #13 from nianba/qipeng/agno
[ISSUE#9] Add support for agno, version 0.1b0.
2 parents cb8bce9 + 6f8f10c commit 13e4f86

File tree

16 files changed

+1069
-0
lines changed

16 files changed

+1069
-0
lines changed
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# OpenTelemerty Agno Instrumentation
2+
3+
Agno Python Agent provides observability for Agno applications. This document provides examples of usage and results in the Agno instrumentation. For details on usage and installation of LoongSuite and Jaeger, please refer to [LoongSuite Documentation](https://github.com/alibaba/loongsuite-python-agent/blob/main/README.md).
4+
5+
*Pull Request: [Add support for agno](https://github.com/alibaba/loongsuite-python-agent/pull/13).*
6+
## Installation
7+
8+
```shell
9+
git clone https://github.com/alibaba/loongsuite-python-agent.git
10+
pip install ./instrumentation-genai/opentelemetry-instrumentation-agno
11+
```
12+
13+
## RUN
14+
15+
### Build the Example
16+
17+
Follow the official [Agno Documentation](https://docs.agno.com/introduction) to create a sample file named `demo.py`
18+
```python
19+
import os
20+
os.environ["DEEPSEEK_API_KEY"] = "YOUR-API-KEY"
21+
from agno.agent import Agent
22+
from agno.models.deepseek import DeepSeek
23+
from agno.tools.reasoning import ReasoningTools
24+
from agno.tools.yfinance import YFinanceTools
25+
agent = Agent(
26+
model=DeepSeek(id="deepseek-reasoner"),
27+
tools=[
28+
ReasoningTools(add_instructions=True),
29+
],
30+
instructions=[
31+
"Use tables to display data",
32+
"Only output the report, no other text",
33+
],
34+
markdown=True,
35+
)
36+
agent.print_response(
37+
"Write a report on NVDA",
38+
stream=False,
39+
)
40+
```
41+
42+
### Collect Data
43+
44+
Run the `demo.py` script using OpenTelemetry
45+
46+
```shell
47+
export OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
48+
49+
opentelemetry-instrument \
50+
--exporter_otlp_protocol grpc \
51+
--traces_exporter otlp \
52+
--exporter_otlp_insecure true \
53+
--exporter_otlp_endpoint YOUR-END-POINT \
54+
--service_name demo \
55+
56+
python demo.py
57+
```
58+
59+
## RESULT
60+
61+
Access the Jaeger UI to view the collected trace data. Trace information should contains:
62+
63+
### 1. Prompt
64+
65+
![promot](_assets/img/agno_demo_prompt.png)
66+
67+
### 2. Reasoning & Response
68+
69+
![reasoning](_assets/img/agno_demo_reasoning.png)
70+
71+
![response](_assets/img/agno_demo_response.png)
72+
73+
### 3. ToolCalls
74+
75+
![toolcall](_assets/img/agno_demo_toolcall.png)
76+
77+
### 4. Other
78+
79+
We also collect other information interest to users, including historical messages, token consumption, model types, etc.
1.23 MB
Loading
1.05 MB
Loading
1.09 MB
Loading
752 KB
Loading
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
[build-system]
2+
requires = ["hatchling"]
3+
build-backend = "hatchling.build"
4+
5+
[project]
6+
name = "opentelemetry-instrumentation-agno"
7+
dynamic = ["version"]
8+
description = "OpenTelemetry Agno Instrumentation"
9+
readme = "README.md"
10+
license = "Apache-2.0"
11+
requires-python = ">=3.8, <3.13"
12+
authors = [
13+
{ name = "LoongSuite Python Agent Authors", email = "[email protected]" },
14+
]
15+
classifiers = [
16+
"Development Status :: 5 - Production/Stable",
17+
"Intended Audience :: Developers",
18+
"License :: OSI Approved :: Apache Software License",
19+
"Programming Language :: Python",
20+
"Programming Language :: Python :: 3",
21+
"Programming Language :: Python :: 3.8",
22+
"Programming Language :: Python :: 3.9",
23+
"Programming Language :: Python :: 3.10",
24+
"Programming Language :: Python :: 3.11",
25+
"Programming Language :: Python :: 3.12",
26+
"Programming Language :: Python :: 3.13",
27+
]
28+
dependencies = [
29+
"wrapt",
30+
]
31+
32+
[project.optional-dependencies]
33+
instruments = [
34+
"agno",
35+
]
36+
test = [
37+
"agno",
38+
"pytest",
39+
"opentelemetry-sdk",
40+
]
41+
type-check = []
42+
43+
44+
[tool.hatch.version]
45+
path = "src/opentelemetry/instrumentation/agno/version.py"
46+
47+
[tool.hatch.build.targets.sdist]
48+
include = [
49+
"src",
50+
]
51+
52+
[tool.hatch.build.targets.wheel]
53+
packages = ["src/opentelemetry"]
54+
55+
[project.entry-points.opentelemetry_instrumentor]
56+
agno = "opentelemetry.instrumentation.agno:AgnoInstrumentor"
Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
from typing import Any, Collection
2+
from wrapt import wrap_function_wrapper
3+
from opentelemetry.instrumentation.agno.package import _instruments
4+
from opentelemetry.instrumentation.agno._wrapper import (
5+
AgnoAgentWrapper,
6+
AgnoFunctionCallWrapper,
7+
AgnoModelWrapper,
8+
)
9+
from opentelemetry import trace as trace_api
10+
from opentelemetry.instrumentation.utils import unwrap
11+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
12+
from opentelemetry.instrumentation.version import (
13+
__version__,
14+
)
15+
16+
"""OpenTelemetry exporters for Agno https://github.com/agno-agi/agno"""
17+
18+
_AGENT = "agno.agent"
19+
_MODULE = "agno.models.base"
20+
_TOOLKIT = "agno.tools.function"
21+
__all__ = ["AgnoInstrumentor"]
22+
23+
class AgnoInstrumentor(BaseInstrumentor): # type: ignore
24+
"""
25+
An instrumentor for agno.
26+
"""
27+
28+
def instrumentation_dependencies(self) -> Collection[str]:
29+
return _instruments
30+
31+
def _instrument(self, **kwargs: Any) -> None:
32+
if not (tracer_provider := kwargs.get("tracer_provider")):
33+
tracer_provider = trace_api.get_tracer_provider()
34+
tracer = trace_api.get_tracer(__name__, __version__, tracer_provider)
35+
36+
agent_warpper = AgnoAgentWrapper(tracer)
37+
function_call_wrapper = AgnoFunctionCallWrapper(tracer)
38+
model_wrapper = AgnoModelWrapper(tracer)
39+
40+
# Wrap the agent run
41+
wrap_function_wrapper(
42+
module=_AGENT,
43+
name="Agent._run",
44+
wrapper=agent_warpper.run,
45+
)
46+
wrap_function_wrapper(
47+
module=_AGENT,
48+
name="Agent._arun",
49+
wrapper=agent_warpper.arun,
50+
)
51+
wrap_function_wrapper(
52+
module=_AGENT,
53+
name="Agent._run_stream",
54+
wrapper=agent_warpper.run_stream,
55+
)
56+
wrap_function_wrapper(
57+
module=_AGENT,
58+
name="Agent._arun_stream",
59+
wrapper=agent_warpper.arun_stream,
60+
)
61+
62+
# Wrap the function
63+
wrap_function_wrapper (
64+
module=_TOOLKIT,
65+
name="FunctionCall.execute",
66+
wrapper=function_call_wrapper.execute,
67+
)
68+
wrap_function_wrapper(
69+
module=_TOOLKIT,
70+
name="FunctionCall.aexecute",
71+
wrapper=function_call_wrapper.aexecute,
72+
)
73+
74+
#Warp the model
75+
wrap_function_wrapper(
76+
module=_MODULE,
77+
name="Model.response",
78+
wrapper=model_wrapper.response,
79+
)
80+
wrap_function_wrapper(
81+
module=_MODULE,
82+
name="Model.aresponse",
83+
wrapper=model_wrapper.aresponse,
84+
)
85+
wrap_function_wrapper(
86+
module=_MODULE,
87+
name="Model.response_stream",
88+
wrapper=model_wrapper.response_stream,
89+
)
90+
wrap_function_wrapper(
91+
module=_MODULE,
92+
name="Model.aresponse_stream",
93+
wrapper=model_wrapper.aresponse_stream,)
94+
95+
def _uninstrument(self, **kwargs: Any) -> None:
96+
97+
# Unwrap the agent call function
98+
import agno.agent
99+
unwrap(agno.agent.Agent, "_run")
100+
unwrap(agno.agent.Agent, "_arun")
101+
unwrap(agno.agent.Agent, "_run_stream")
102+
unwrap(agno.agent.Agent, "_arun_stream")
103+
104+
# Unwrap the function call
105+
import agno.tools.function
106+
unwrap(agno.tools.function.FunctionCall, "execute")
107+
unwrap(agno.tools.function.FunctionCall, "aexecute")
108+
109+
# Unwrap the model
110+
import agno.models.base
111+
unwrap(agno.models.base.Model, "response")
112+
unwrap(agno.models.base.Model, "aresponse")
113+
unwrap(agno.models.base.Model, "response_stream")
114+
unwrap(agno.models.base.Model, "aresponse_stream")
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
import json
2+
from typing import (
3+
Any,
4+
Iterable,
5+
Tuple,
6+
Dict,
7+
List,
8+
)
9+
from opentelemetry.util.types import AttributeValue
10+
from opentelemetry.semconv._incubating.attributes import (
11+
gen_ai_attributes as GenAIAttributes,
12+
)
13+
14+
class AgentRunRequestExtractor(object):
15+
16+
def extract(self, agent : Any, arguments : Dict[Any, Any]) -> Iterable[Tuple[str, AttributeValue]]:
17+
if agent.name:
18+
yield GenAIAttributes.GEN_AI_AGENT_NAME, f"{agent.name}"
19+
20+
if agent.session_id:
21+
yield GenAIAttributes.GEN_AI_AGENT_ID, f"{agent.session_id}"
22+
23+
if agent.knowledge:
24+
yield f"{GenAIAttributes.GEN_AI_AGENT_NAME}.knowledge", f"{agent.knowledge.__class__.__name__}"
25+
26+
if agent.tools:
27+
tool_names = []
28+
from agno.tools.toolkit import Toolkit
29+
from agno.tools.function import Function
30+
for tool in agent.tools:
31+
if isinstance(tool, Function):
32+
tool_names.append(tool.name)
33+
elif isinstance(tool, Toolkit):
34+
tool_names.extend([f for f in tool.functions.keys()])
35+
elif callable(tool):
36+
tool_names.append(tool.__name__)
37+
else:
38+
tool_names.append(str(tool))
39+
yield GenAIAttributes.GEN_AI_TOOL_NAME, ", ".join(tool_names)
40+
41+
for key in arguments.keys():
42+
if key == "run_response":
43+
yield GenAIAttributes.GEN_AI_RESPONSE_ID, f"{arguments[key].run_id}"
44+
elif key == "run_messages":
45+
messages = arguments[key].messages
46+
for idx in range(len(messages)):
47+
message = messages[idx]
48+
yield f"{GenAIAttributes.GEN_AI_PROMPT}.{idx}.message", f"{json.dumps(message.to_dict(), indent=2)}"
49+
elif key == "response_format":
50+
yield GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT, f"{arguments[key]}"
51+
52+
class AgentRunResponseExtractor(object):
53+
54+
def extract(self, response : Any) -> Iterable[Tuple[str, AttributeValue]]:
55+
yield GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, f"{response.to_json()}"
56+
57+
class FunctionCallRequestExtractor(object):
58+
59+
def extract(self, function_call : Any) -> Iterable[Tuple[str, AttributeValue]]:
60+
61+
if function_call.function.name:
62+
yield GenAIAttributes.GEN_AI_TOOL_NAME, f"{function_call.function.name}"
63+
64+
if function_call.function.description:
65+
yield GenAIAttributes.GEN_AI_TOOL_DESCRIPTION, f"{function_call.function.description}"
66+
67+
if function_call.call_id:
68+
yield GenAIAttributes.GEN_AI_TOOL_CALL_ID, f"{function_call.call_id}"
69+
70+
if function_call.arguments:
71+
yield f"{GenAIAttributes.GEN_AI_TOOL_TYPE}.arguments", f"{json.dumps(function_call.arguments, indent=2)}"
72+
73+
class FunctionCallResponseExtractor(object):
74+
75+
def extract(self, response : Any) -> Iterable[Tuple[str, AttributeValue]]:
76+
yield f"{GenAIAttributes.GEN_AI_TOOL_TYPE}.response", f"{response.result}"
77+
78+
class ModelRequestExtractor(object):
79+
80+
def extract(self, model : Any, arguments : Dict[Any, Any]) -> Iterable[Tuple[str, AttributeValue]]:
81+
82+
request_kwargs = {}
83+
if getattr(model, "request_kwargs", None):
84+
request_kwargs = model.request_kwargs
85+
if getattr(model, "request_params", None):
86+
request_kwargs = model.request_params
87+
if getattr(model, "get_request_kwargs", None):
88+
request_kwargs = model.get_request_kwargs()
89+
if getattr(model, "get_request_params", None):
90+
request_kwargs = model.get_request_params()
91+
92+
if request_kwargs:
93+
yield GenAIAttributes.GEN_AI_REQUEST_MODEL, f"{json.dumps(request_kwargs, indent=2)}"
94+
95+
for key in arguments.keys():
96+
if key == "response_format":
97+
yield GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT,f"{arguments[key]}"
98+
elif key == "messages":
99+
messages = arguments["messages"]
100+
for idx in range(len(messages)):
101+
message = messages[idx]
102+
yield f"{GenAIAttributes.GEN_AI_PROMPT}.{idx}.message", f"{json.dumps(message.to_dict(), indent=2)}"
103+
elif key == "tools":
104+
tools = arguments["tools"]
105+
for idx in range(len(tools)):
106+
yield f"{GenAIAttributes.GEN_AI_TOOL_DESCRIPTION}.{idx}", f"{json.dumps(tools[idx], indent=2)}"
107+
108+
class ModelResponseExtractor(object):
109+
110+
def extract(self, responses: List[Any]) -> Iterable[Tuple[str, AttributeValue]]:
111+
content = ""
112+
for response in responses:
113+
# basic response fields
114+
if getattr(response, "role", None):
115+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.role", response.role
116+
if getattr(response, "content", None):
117+
content += response.content
118+
if getattr(response, "audio", None):
119+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.audio", json.dumps(response.audio.to_dict(), indent=2)
120+
if getattr(response, "image", None):
121+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.image", json.dumps(response.image.to_dict(), indent=2)
122+
for idx, exec in enumerate(getattr(response, "tool_executions", []) or []):
123+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.tool_executions.{idx}", json.dumps(exec.to_dict(), indent=2)
124+
# other metadata
125+
if getattr(response, "event", None):
126+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.event", response.event
127+
if getattr(response, "provider_data", None):
128+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.provider_data", json.dumps(response.provider_data, indent=2)
129+
if getattr(response, "thinking", None):
130+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.thinking", response.thinking
131+
if getattr(response, "redacted_thinking", None):
132+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.redacted_thinking", response.redacted_thinking
133+
if getattr(response, "reasoning_content", None):
134+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.reasoning_content", response.reasoning_content
135+
if getattr(response, "extra", None):
136+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.extra", json.dumps(response.extra, indent=2)
137+
if len(content):
138+
yield f"{GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS}.content", f"{content}"
139+

0 commit comments

Comments
 (0)