forked from microsoft/agent-framework
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathadvanced_manual_setup_console_output.py
More file actions
133 lines (106 loc) · 5.23 KB
/
advanced_manual_setup_console_output.py
File metadata and controls
133 lines (106 loc) · 5.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import logging
from random import randint
from typing import Annotated
from agent_framework import Message, tool
from agent_framework.observability import enable_instrumentation
from agent_framework.openai import OpenAIChatClient
from dotenv import load_dotenv
from opentelemetry._logs import set_logger_provider
from opentelemetry.metrics import set_meter_provider
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, ConsoleLogExporter
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import ConsoleMetricExporter, PeriodicExportingMetricReader
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
from opentelemetry.semconv._incubating.attributes.service_attributes import SERVICE_NAME
from opentelemetry.trace import set_tracer_provider
from pydantic import Field
# Load environment variables from .env file
load_dotenv()
"""
This sample shows how to manually configure to send traces, logs, and metrics to the console,
without using the `configure_otel_providers` helper function.
"""
resource = Resource.create({SERVICE_NAME: "ManualSetup"})
def setup_logging():
# Create and set a global logger provider for the application.
logger_provider = LoggerProvider(resource=resource)
# Log processors are initialized with an exporter which is responsible
logger_provider.add_log_record_processor(BatchLogRecordProcessor(ConsoleLogExporter()))
# Sets the global default logger provider
set_logger_provider(logger_provider)
# Create a logging handler to write logging records, in OTLP format, to the exporter.
handler = LoggingHandler()
# Attach the handler to the root logger. `getLogger()` with no arguments returns the root logger.
# Events from all child loggers will be processed by this handler.
logger = logging.getLogger()
logger.addHandler(handler)
# Set the logging level to NOTSET to allow all records to be processed by the handler.
logger.setLevel(logging.NOTSET)
def setup_tracing():
# Initialize a trace provider for the application. This is a factory for creating tracers.
tracer_provider = TracerProvider(resource=resource)
# Span processors are initialized with an exporter which is responsible
# for sending the telemetry data to a particular backend.
tracer_provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter()))
# Sets the global default tracer provider
set_tracer_provider(tracer_provider)
def setup_metrics():
# Initialize a metric provider for the application. This is a factory for creating meters.
meter_provider = MeterProvider(
metric_readers=[PeriodicExportingMetricReader(ConsoleMetricExporter(), export_interval_millis=5000)],
resource=resource,
)
# Sets the global default meter provider
set_meter_provider(meter_provider)
# NOTE: approval_mode="never_require" is for sample brevity.
# Use "always_require" in production; see samples/02-agents/tools/function_tool_with_approval.py
# and samples/02-agents/tools/function_tool_with_approval_and_sessions.py.
@tool(approval_mode="never_require")
async def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
await asyncio.sleep(randint(0, 10) / 10.0) # Simulate a network call
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
async def run_chat_client() -> None:
"""Run an AI service.
This function runs an AI service and prints the output.
Telemetry will be collected for the service execution behind the scenes,
and the traces will be sent to the configured telemetry backend.
The telemetry will include information about the AI service execution.
Args:
stream: Whether to use streaming for the plugin
Remarks:
When function calling is outside the open telemetry loop
each of the call to the model is handled as a seperate span,
while when the open telemetry is put last, a single span
is shown, which might include one or more rounds of function calling.
So for the scenario below, you should see the following:
2 spans with gen_ai.operation.name=chat
The first has finish_reason "tool_calls"
The second has finish_reason "stop"
2 spans with gen_ai.operation.name=execute_tool
"""
client = OpenAIChatClient()
message = "What's the weather in Amsterdam and in Paris?"
print(f"User: {message}")
print("Assistant: ", end="")
async for chunk in client.get_response([Message(role="user", text=message)], tools=get_weather, stream=True):
if chunk.text:
print(chunk.text, end="")
print("")
async def main():
"""Run the selected scenario(s)."""
setup_logging()
setup_tracing()
setup_metrics()
enable_instrumentation()
await run_chat_client()
if __name__ == "__main__":
asyncio.run(main())