Skip to content

Commit 4330b17

Browse files
author
Nagkumar Arkalgud
committed
openai-agents: add is_recording() guards; update dependency to openai-agent>=0.1.0; README note; align tests
1 parent d40e568 commit 4330b17

30 files changed

+8427
-576
lines changed

instrumentation-genai/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
| --------------- | ------------------ | --------------- | -------------- |
44
| [opentelemetry-instrumentation-google-genai](./opentelemetry-instrumentation-google-genai) | google-genai >= 1.0.0 | No | development
55
| [opentelemetry-instrumentation-langchain](./opentelemetry-instrumentation-langchain) | langchain >= 0.3.21 | No | development
6+
| [opentelemetry-instrumentation-openai-agents](./opentelemetry-instrumentation-openai-agents) | openai >= 1.26.0 | Yes | development
67
| [opentelemetry-instrumentation-openai-v2](./opentelemetry-instrumentation-openai-v2) | openai >= 1.26.0 | Yes | development
78
| [opentelemetry-instrumentation-vertexai](./opentelemetry-instrumentation-vertexai) | google-cloud-aiplatform >= 1.64 | No | development
89
| [opentelemetry-instrumentation-weaviate](./opentelemetry-instrumentation-weaviate) | weaviate-client >= 3.0.0,<5.0.0 | No | development
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
examples/.env
2+
examples/openai_agents_multi_agent_travel/.env

instrumentation-genai/opentelemetry-instrumentation-openai-agents/=

Whitespace-only changes.

instrumentation-genai/opentelemetry-instrumentation-openai-agents/README.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,14 @@ Installation
1717

1818
pip install opentelemetry-instrumentation-openai-agents
1919

20+
Dependency note
21+
---------------
22+
23+
This instrumentation integrates with the OpenAI Agents framework via the
24+
`openai-agent <https://pypi.org/project/openai-agent/>`_ package. Ensure
25+
``openai-agent>=0.1.0`` is installed in environments where agent events are
26+
emitted; otherwise, the instrumentor will load but skip processor setup.
27+
2028
Usage
2129
-----
2230

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
groups:
2+
- id: event.gen_ai.client.inference.operation.details
3+
name: gen_ai.client.inference.operation.details
4+
type: event
5+
stability: development
6+
brief: >
7+
Describes the details of a GenAI completion request including chat history
8+
and parameters.
9+
note: >
10+
This event is opt-in and could be used to store input and output details
11+
independently from traces.
12+
extends: attributes.gen_ai.inference.client
13+
14+
- id: event.gen_ai.evaluation.result
15+
name: gen_ai.evaluation.result
16+
type: event
17+
stability: development
18+
brief: >
19+
This event captures the result of evaluating GenAI output for quality, accuracy, or other characteristics.
20+
This event SHOULD be parented to GenAI operation span being evaluated when possible
21+
or set `gen_ai.response.id` when span id is not available.
22+
attributes:
23+
- ref: gen_ai.evaluation.name
24+
requirement_level: required
25+
- ref: gen_ai.evaluation.score.value
26+
requirement_level:
27+
conditionally_required: if applicable
28+
- ref: gen_ai.evaluation.score.label
29+
requirement_level:
30+
conditionally_required: if applicable
31+
- ref: gen_ai.evaluation.explanation
32+
requirement_level: recommended
33+
- ref: gen_ai.response.id
34+
requirement_level:
35+
recommended: when available
36+
note: |
37+
The unique identifier assigned to the specific
38+
completion being evaluated. This attribute helps correlate the evaluation
39+
event with the corresponding operation when span id is not available.
40+
- ref: error.type
41+
requirement_level:
42+
conditionally_required: "if the operation ended in an error"
43+
note: |
44+
The `error.type` SHOULD match the error code returned by the Generative AI Evaluation provider or the client library,
45+
the canonical name of exception that occurred, or another low-cardinality error identifier.
46+
Instrumentations SHOULD document the list of errors they report.

instrumentation-genai/opentelemetry-instrumentation-openai-agents/examples/basic_usage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747

4848
custom_client = AsyncAzureOpenAI(
4949
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
50-
# azure_deployment=os.environ["MODEL_DEPLOYMENT_NAME"],
50+
azure_deployment="gpt-4.1-mini",
5151
api_version=os.environ["AZURE_OPENAI_API_VERSION"],
5252
api_key=os.environ["AZURE_OPENAI_API_KEY"],
5353
)
Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
"""OpenAI Agents + MCP HTTP example with full GenAI telemetry capture."""
2+
3+
from __future__ import annotations
4+
5+
import asyncio
6+
import logging
7+
import os
8+
from dataclasses import dataclass
9+
from typing import Callable
10+
from urllib.parse import urlparse
11+
12+
import azure.identity
13+
import openai
14+
from agents import Agent, OpenAIChatCompletionsModel, Runner, set_tracing_disabled
15+
from agents.mcp.server import MCPServerStreamableHttp
16+
from agents.model_settings import ModelSettings
17+
from dotenv import load_dotenv
18+
19+
from opentelemetry import trace
20+
from opentelemetry.instrumentation.openai_agents import OpenAIAgentsInstrumentor
21+
from opentelemetry.sdk.resources import Resource
22+
from opentelemetry.sdk.trace import TracerProvider
23+
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
24+
25+
load_dotenv(override=True)
26+
27+
logging.basicConfig(level=logging.WARNING)
28+
29+
30+
@dataclass
31+
class _ApiConfig:
32+
"""Helper describing how to create the OpenAI client."""
33+
34+
build_client: Callable[[], openai.AsyncOpenAI]
35+
model_name: str
36+
base_url: str
37+
provider: str
38+
39+
40+
def _set_capture_env(provider: str, base_url: str) -> None:
41+
"""Enable all GenAI capture toggles before instrumentation hooks."""
42+
43+
capture_defaults = {
44+
"OTEL_INSTRUMENTATION_OPENAI_AGENTS_CAPTURE_CONTENT": "true",
45+
"OTEL_INSTRUMENTATION_OPENAI_AGENTS_CAPTURE_METRICS": "true",
46+
"OTEL_GENAI_CAPTURE_MESSAGES": "true",
47+
"OTEL_GENAI_CAPTURE_SYSTEM_INSTRUCTIONS": "true",
48+
"OTEL_GENAI_CAPTURE_TOOL_DEFINITIONS": "true",
49+
"OTEL_GENAI_EMIT_OPERATION_DETAILS": "true",
50+
"OTEL_GENAI_AGENT_NAME": os.getenv(
51+
"OTEL_GENAI_AGENT_NAME", "MCP Hotel Assistant"
52+
),
53+
"OTEL_GENAI_AGENT_DESCRIPTION": os.getenv(
54+
"OTEL_GENAI_AGENT_DESCRIPTION",
55+
"Agent orchestrator that uses MCP to find hotels matching user criteria",
56+
),
57+
"OTEL_GENAI_AGENT_ID": os.getenv("OTEL_GENAI_AGENT_ID", "mcp-hotel-assistant"),
58+
}
59+
for env_key, value in capture_defaults.items():
60+
os.environ.setdefault(env_key, value)
61+
62+
parsed = urlparse(base_url)
63+
if parsed.hostname:
64+
os.environ.setdefault("OTEL_GENAI_SERVER_ADDRESS", parsed.hostname)
65+
if parsed.port:
66+
os.environ.setdefault("OTEL_GENAI_SERVER_PORT", str(parsed.port))
67+
68+
69+
def _resolve_api_config() -> _ApiConfig:
70+
"""Return model/client configuration for the selected API host."""
71+
72+
host = os.getenv("API_HOST", "github").lower()
73+
74+
if host == "github":
75+
base_url = os.getenv(
76+
"GITHUB_OPENAI_BASE_URL",
77+
"https://models.inference.ai.azure.com",
78+
).rstrip("/")
79+
model_name = os.getenv("GITHUB_MODEL", "gpt-4o")
80+
api_key = os.environ["GITHUB_TOKEN"]
81+
82+
def _build_client() -> openai.AsyncOpenAI:
83+
return openai.AsyncOpenAI(base_url=base_url, api_key=api_key)
84+
85+
return _ApiConfig(
86+
build_client=_build_client,
87+
model_name=model_name,
88+
base_url=base_url,
89+
provider="azure.ai.inference",
90+
)
91+
92+
if host == "azure":
93+
endpoint = os.environ["AZURE_OPENAI_ENDPOINT"].rstrip("/")
94+
api_version = os.environ["AZURE_OPENAI_VERSION"]
95+
deployment = os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"]
96+
97+
credential = azure.identity.DefaultAzureCredential()
98+
token_provider = azure.identity.get_bearer_token_provider(
99+
credential,
100+
"https://cognitiveservices.azure.com/.default",
101+
)
102+
103+
def _build_client() -> openai.AsyncAzureOpenAI:
104+
return openai.AsyncAzureOpenAI(
105+
api_version=api_version,
106+
azure_endpoint=endpoint,
107+
azure_ad_token_provider=token_provider,
108+
)
109+
110+
return _ApiConfig(
111+
build_client=_build_client,
112+
model_name=deployment,
113+
base_url=endpoint,
114+
provider="azure.ai.openai",
115+
)
116+
117+
if host == "ollama":
118+
base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434/v1").rstrip("/")
119+
model_name = os.getenv("OLLAMA_MODEL", "llama3.1:latest")
120+
api_key = os.getenv("OLLAMA_API_KEY", "none")
121+
122+
def _build_client() -> openai.AsyncOpenAI:
123+
return openai.AsyncOpenAI(base_url=base_url, api_key=api_key)
124+
125+
return _ApiConfig(
126+
build_client=_build_client,
127+
model_name=model_name,
128+
base_url=base_url,
129+
provider="self.hosted",
130+
)
131+
132+
raise ValueError(f"Unsupported API_HOST '{host}'")
133+
134+
135+
def _configure_otel() -> None:
136+
"""Configure the tracer provider and exporters."""
137+
138+
conn = os.getenv("APPLICATION_INSIGHTS_CONNECTION_STRING")
139+
resource = Resource.create(
140+
{
141+
"service.name": "mcp-hotel-finder-service",
142+
"service.namespace": "mcp-orchestration",
143+
"service.version": os.getenv("SERVICE_VERSION", "1.0.0"),
144+
}
145+
)
146+
147+
tracer_provider = TracerProvider(resource=resource)
148+
149+
if conn:
150+
try:
151+
from azure.monitor.opentelemetry.exporter import ( # type: ignore import-not-found
152+
AzureMonitorTraceExporter,
153+
)
154+
except ImportError: # pragma: no cover - optional dependency
155+
print(
156+
"Warning: Azure Monitor exporter not installed. "
157+
"Install with: pip install azure-monitor-opentelemetry-exporter",
158+
)
159+
tracer_provider.add_span_processor(
160+
BatchSpanProcessor(ConsoleSpanExporter())
161+
)
162+
else:
163+
tracer_provider.add_span_processor(
164+
BatchSpanProcessor(
165+
AzureMonitorTraceExporter.from_connection_string(conn)
166+
)
167+
)
168+
print("[otel] Azure Monitor trace exporter configured")
169+
else:
170+
tracer_provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter()))
171+
print("[otel] Console span exporter configured")
172+
print(
173+
"[otel] Set APPLICATION_INSIGHTS_CONNECTION_STRING to export to "
174+
"Application Insights instead of the console",
175+
)
176+
177+
trace.set_tracer_provider(tracer_provider)
178+
179+
180+
api_config = _resolve_api_config()
181+
_set_capture_env(api_config.provider, api_config.base_url)
182+
_configure_otel()
183+
184+
OpenAIAgentsInstrumentor().instrument(
185+
tracer_provider=trace.get_tracer_provider()
186+
)
187+
188+
client = api_config.build_client()
189+
190+
set_tracing_disabled(False)
191+
192+
mcp_server = MCPServerStreamableHttp(
193+
name="weather",
194+
params={"url": os.getenv("MCP_SERVER_URL", "http://localhost:8000/mcp/")},
195+
)
196+
197+
agent = Agent(
198+
name="Assistant",
199+
instructions="Use the tools to achieve the task",
200+
mcp_servers=[mcp_server],
201+
model=OpenAIChatCompletionsModel(model=api_config.model_name, openai_client=client),
202+
model_settings=ModelSettings(tool_choice="required"),
203+
)
204+
205+
206+
def _root_span_name(provider: str) -> str:
207+
return f"mcp_hotel_search[{provider}]"
208+
209+
210+
async def main() -> None:
211+
tracer = trace.get_tracer(__name__)
212+
await mcp_server.connect()
213+
try:
214+
with tracer.start_as_current_span(_root_span_name(api_config.provider)) as span:
215+
message = (
216+
"Find me a hotel in San Francisco for 2 nights starting from 2024-01-01. "
217+
"I need a hotel with free WiFi and a pool."
218+
)
219+
220+
span.set_attribute("user.request", message)
221+
span.set_attribute("mcp.server", mcp_server.name)
222+
span.set_attribute("mcp.url", mcp_server.params.get("url"))
223+
span.set_attribute("api.host", os.getenv("API_HOST", "github"))
224+
span.set_attribute("model.name", api_config.model_name)
225+
226+
try:
227+
result = await Runner.run(starting_agent=agent, input=message)
228+
print(result.final_output)
229+
230+
span.set_attribute(
231+
"agent.response",
232+
result.final_output[:500] if result.final_output else "",
233+
)
234+
span.set_attribute("request.success", True)
235+
except Exception as exc: # pragma: no cover - defensive logging
236+
span.record_exception(exc)
237+
span.set_attribute("request.success", False)
238+
raise
239+
finally:
240+
await mcp_server.cleanup()
241+
242+
243+
if __name__ == "__main__":
244+
try:
245+
asyncio.run(main())
246+
finally:
247+
trace.get_tracer_provider().shutdown()

0 commit comments

Comments
 (0)