diff --git a/python/samples/demos/travel_planning_system/.env.example b/python/samples/demos/travel_planning_system/.env.example new file mode 100644 index 000000000000..af61de968e3c --- /dev/null +++ b/python/samples/demos/travel_planning_system/.env.example @@ -0,0 +1,5 @@ +APPINSIGHTS_CONNECTION_STRING="..." +SEMANTICKERNEL_EXPERIMENTAL_GENAI_ENABLE_OTEL_DIAGNOSTICS=true +SEMANTICKERNEL_EXPERIMENTAL_GENAI_ENABLE_OTEL_DIAGNOSTICS_SENSITIVE=true +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o-mini" +AZURE_OPENAI_ENDPOINT="..." \ No newline at end of file diff --git a/python/samples/demos/travel_planning_system/agents.py b/python/samples/demos/travel_planning_system/agents.py new file mode 100644 index 000000000000..d83e316d1b75 --- /dev/null +++ b/python/samples/demos/travel_planning_system/agents.py @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.functions import kernel_function + +_BASE_SYSTEM_MSG = ( + "You are a helpful travel planning assistant. Always be professional and provide accurate information." +) + + +class FlightPlugin: + @kernel_function + def book_flight(self, flight_id: str) -> str: + """Book a specific flight.""" + return f"Successfully booked flight with ID {flight_id}. Your booking reference is FLX12345." + + +class HotelPlugin: + @kernel_function + def book_hotel(self, hotel_id: str) -> str: + """Book a specific hotel.""" + return f"Successfully booked hotel with ID {hotel_id}. Your booking reference is HTX12345." + + +class PlanningPlugin: + @kernel_function + def get_weather(self, location: str) -> str: + """Get weather information for a location.""" + return f"Weather information for {location}: Sunny, 25°C." + + @kernel_function + def search_hotels(self, location: str, check_in: str, check_out: str) -> str: + """Search for available hotels.""" + available_hotels = [ + {"hotel_id": "HT123", "name": "Hotel Sunshine", "price": "$150/night", "accommodates": "2 people"}, + {"hotel_id": "HT456", "name": "Ocean View Resort", "price": "$200/night", "accommodates": "4 people"}, + {"hotel_id": "HT789", "name": "Mountain Retreat", "price": "$180/night", "accommodates": "2 people"}, + ] + return f"Searching hotels in {location} from {check_in} to {check_out}:\n{available_hotels}" + + @kernel_function + def search_flights(self, origin: str, destination: str, date: str) -> str: + """Search for available flights.""" + available_flights = [ + {"flight_id": "FL123", "take-off-time": "10:00 AM", "arrival-time": "12:00 PM", "price": "$200"}, + {"flight_id": "FL456", "take-off-time": "2:00 PM", "arrival-time": "4:00 PM", "price": "$250"}, + {"flight_id": "FL789", "take-off-time": "6:00 PM", "arrival-time": "8:00 PM", "price": "$300"}, + ] + return f"Available flights from {origin} to {destination} on {date}:\n{available_flights}" + + +def get_agents() -> dict[str, ChatCompletionAgent]: + """Creates and returns a set of agents for the travel planning system.""" + # 1. Conversation Manager Agent + conversation_manager = ChatCompletionAgent( + name="conversation_manager", + description="Manages conversation flow and coordinates between agents", + instructions=f"{_BASE_SYSTEM_MSG} You coordinate the conversation and ensure users get comprehensive help.", + service=AzureChatCompletion(), + ) + + # 2. Travel Planner Agent + planner = ChatCompletionAgent( + name="planner", + description="Creates comprehensive travel plans including flights, hotels, and activities", + instructions=( + f"{_BASE_SYSTEM_MSG} You create detailed travel plans that include flights, hotels, and activities." + ), + service=AzureChatCompletion(), + plugins=[PlanningPlugin()], + ) + + # 3. Router Agent + router = ChatCompletionAgent( + name="router", + description="Routes tasks to appropriate specialized agents", + instructions=f"{_BASE_SYSTEM_MSG} You analyze plans and delegate tasks to the right specialized agents.", + service=AzureChatCompletion(), + ) + + # 4. Destination Expert Agent + destination_expert = ChatCompletionAgent( + name="destination_expert", + description="Expert in destination recommendations and local information", + instructions=( + f"{_BASE_SYSTEM_MSG} You provide expert advice on destinations, attractions, and local experiences." + ), + service=AzureChatCompletion(), + plugins=[PlanningPlugin()], + ) + + # 5. Flight Agent + flight_agent = ChatCompletionAgent( + name="flight_agent", + description="Specializes in flight booking", + instructions=f"{_BASE_SYSTEM_MSG} You handle all flight-related tasks including booking.", + service=AzureChatCompletion(), + plugins=[FlightPlugin()], + ) + + # 6. Hotel Agent + hotel_agent = ChatCompletionAgent( + name="hotel_agent", + description="Specializes in hotel booking", + instructions=f"{_BASE_SYSTEM_MSG} You handle all hotel-related tasks including booking.", + service=AzureChatCompletion(), + plugins=[HotelPlugin()], + ) + + return { + conversation_manager.name: conversation_manager, + planner.name: planner, + router.name: router, + destination_expert.name: destination_expert, + flight_agent.name: flight_agent, + hotel_agent.name: hotel_agent, + } diff --git a/python/samples/demos/travel_planning_system/group_chat_based_system.py b/python/samples/demos/travel_planning_system/group_chat_based_system.py new file mode 100644 index 000000000000..77a7780f3ed7 --- /dev/null +++ b/python/samples/demos/travel_planning_system/group_chat_based_system.py @@ -0,0 +1,239 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import sys + +from opentelemetry import trace + +from samples.demos.travel_planning_system.agents import get_agents +from samples.demos.travel_planning_system.observability import enable_observability +from semantic_kernel.agents import ( + BooleanResult, + ChatCompletionAgent, + GroupChatManager, + GroupChatOrchestration, + MessageResult, + StringResult, +) +from semantic_kernel.agents.runtime import InProcessRuntime +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, AzureChatPromptExecutionSettings +from semantic_kernel.contents import ( + AuthorRole, + ChatHistory, + ChatMessageContent, + FunctionCallContent, + FunctionResultContent, + StreamingChatMessageContent, +) +from semantic_kernel.functions.kernel_arguments import KernelArguments + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +# Flag to indicate if a new message is being received +is_new_message = True + + +def streaming_agent_response_callback(message: StreamingChatMessageContent, is_final: bool) -> None: + """Observer function to print the messages from the agents. + + Args: + message (StreamingChatMessageContent): The streaming message content from the agent. + is_final (bool): Indicates if this is the final part of the message. + """ + global is_new_message + if is_new_message: + print(f"# {message.name}") + is_new_message = False + print(message.content, end="", flush=True) + + for item in message.items: + if isinstance(item, FunctionCallContent): + print(f"Calling '{item.name}' with arguments '{item.arguments}'", end="", flush=True) + if isinstance(item, FunctionResultContent): + print(f"Result from '{item.name}' is '{item.result}'", end="", flush=True) + + if is_final: + print() + is_new_message = True + + +def human_response_function(chat_histoy: ChatHistory) -> ChatMessageContent: + """Observer function to print the messages from the agents.""" + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span("human_in_the_loop"): + user_input = input("User: ") + return ChatMessageContent(role=AuthorRole.USER, content=user_input) + + +class AgentBaseGroupChatManager(GroupChatManager): + """A group chat managers that uses a ChatCompletionAgent.""" + + agent: ChatCompletionAgent + + def __init__(self, **kwargs): + """Initialize the base group chat manager with a ChatCompletionAgent.""" + agent = ChatCompletionAgent( + name="Manager", + description="The manager of the group chat, responsible for coordinating the agents.", + instructions=( + "You are the manager of the group chat. " + "Your role is to coordinate the agents and ensure they satisfy the user's request. " + ), + service=AzureChatCompletion(), + ) + + super().__init__(agent=agent, **kwargs) + + @override + async def should_request_user_input(self, chat_history: ChatHistory) -> BooleanResult: + """Determine if the manager should request user input based on the chat history.""" + if len(chat_history.messages) == 0: + return BooleanResult( + result=False, + reason="No agents have spoken yet.", + ) + + last_message = chat_history.messages[-1] + if last_message.role == AuthorRole.USER: + return BooleanResult( + result=False, + reason="User input is not needed if the last message is from the user.", + ) + + messages = chat_history.messages[:] + messages.append(ChatMessageContent(role=AuthorRole.USER, content="Does the group need further user input?")) + + settings = AzureChatPromptExecutionSettings() + settings.response_format = BooleanResult + + response = await self.agent.get_response(messages, arguments=KernelArguments(settings=settings)) + return BooleanResult.model_validate_json(response.message.content) + + @override + async def should_terminate(self, chat_history: ChatHistory) -> BooleanResult: + """Provide concrete implementation for should_terminate.""" + should_terminate = await super().should_terminate(chat_history) + if should_terminate.result: + return should_terminate + + if len(chat_history.messages) == 0: + return BooleanResult( + result=False, + reason="No agents have spoken yet.", + ) + + messages = chat_history.messages[:] + messages.append( + ChatMessageContent( + role=AuthorRole.USER, + content="Has the user's request been satisfied?", + ) + ) + + settings = AzureChatPromptExecutionSettings() + settings.response_format = BooleanResult + + response = await self.agent.get_response(messages, arguments=KernelArguments(settings=settings)) + return BooleanResult.model_validate_json(response.message.content) + + @override + async def select_next_agent( + self, + chat_history: ChatHistory, + participant_descriptions: dict[str, str], + ) -> StringResult: + """Provide concrete implementation for selecting the next agent to speak.""" + messages = chat_history.messages[:] + messages.append( + ChatMessageContent( + role=AuthorRole.USER, + content=( + "Who should speak next based on the conversation? Pick one agent from the participants:\n" + + "\n".join([f"{k}: {v}" for k, v in participant_descriptions.items()]) + + "\nPlease provide the agent's name." + ), + ) + ) + + settings = AzureChatPromptExecutionSettings() + settings.response_format = StringResult + + response = await self.agent.get_response(messages, arguments=KernelArguments(settings=settings)) + result = StringResult.model_validate_json(response.message.content) + + if result.result not in participant_descriptions: + raise ValueError( + f"Selected agent '{result.result}' is not in the list of participants: " + f"{list(participant_descriptions.keys())}" + ) + + return result + + @override + async def filter_results( + self, + chat_history: ChatHistory, + ) -> MessageResult: + """Provide concrete implementation for filtering results.""" + messages = chat_history.messages[:] + messages.append(ChatMessageContent(role=AuthorRole.USER, content="Please summarize the conversation.")) + + settings = AzureChatPromptExecutionSettings() + settings.response_format = StringResult + + response = await self.agent.get_response(messages, arguments=KernelArguments(settings=settings)) + string_with_reason = StringResult.model_validate_json(response.message.content) + + return MessageResult( + result=ChatMessageContent( + role=AuthorRole.ASSISTANT, + content=string_with_reason.result, + ), + reason=string_with_reason.reason, + ) + + +@enable_observability +async def main(): + """Main function to run the agents.""" + # 1. Create a Group Chat orchestration with multiple agents + agents: dict[str, ChatCompletionAgent] = get_agents() + group_chat_orchestration = GroupChatOrchestration( + members=[ + agents["planner"], + agents["flight_agent"], + agents["hotel_agent"], + ], + manager=AgentBaseGroupChatManager(max_rounds=20, human_response_function=human_response_function), + streaming_agent_response_callback=streaming_agent_response_callback, + ) + + # 2. Create a runtime and start it + runtime = InProcessRuntime() + runtime.start() + + # 3. Invoke the orchestration with a task and the runtime + orchestration_result = await group_chat_orchestration.invoke( + task=( + "Plan a trip to bali for 5 days including flights, hotels, and " + "activities for a vegetarian family of 4 members. The family lives in Seattle, WA, USA. " + "Their vacation starts on July 30th 2025. their have a strict budget of $5000 for the trip. " + "Please provide a detailed plan and make the necessary hotel and flight bookings." + ), + runtime=runtime, + ) + + # 4. Wait for the results + value = await orchestration_result.get() + print(value) + + # 5. Stop the runtime after the invocation is complete + await runtime.stop_when_idle() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/demos/travel_planning_system/handoff_based_system.py b/python/samples/demos/travel_planning_system/handoff_based_system.py new file mode 100644 index 000000000000..bd0c2bc7d038 --- /dev/null +++ b/python/samples/demos/travel_planning_system/handoff_based_system.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from opentelemetry import trace + +from samples.demos.travel_planning_system.agents import get_agents +from samples.demos.travel_planning_system.observability import enable_observability +from semantic_kernel.agents import HandoffOrchestration +from semantic_kernel.agents.chat_completion.chat_completion_agent import ChatCompletionAgent +from semantic_kernel.agents.orchestration.handoffs import OrchestrationHandoffs +from semantic_kernel.agents.runtime import InProcessRuntime +from semantic_kernel.contents import AuthorRole, ChatMessageContent + + +def agent_response_callback(message: ChatMessageContent) -> None: + """Observer function to print the messages from the agents.""" + if message.content: + print(f"# {message.name}\n{message.content}") + + +def human_response_function() -> ChatMessageContent: + """Observer function to print the messages from the agents.""" + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span("human_in_the_loop"): + user_input = input("User: ") + return ChatMessageContent(role=AuthorRole.USER, content=user_input) + + +def get_agents_and_handoffs(): + """Create agents and define handoffs for the travel planning system. + + Note: prompts need further refinement to ensure they are suitable for the agents. + Note: the router agent seems unnecessary. + """ + BASE_TRANSFER_DESCRIPTION = "Do not call this function in parallel with other functions." + + agents: dict[str, ChatCompletionAgent] = get_agents() + + conversation_manager = agents["conversation_manager"] + planner = agents["planner"] + router = agents["router"] + destination_expert = agents["destination_expert"] + flight_agent = agents["flight_agent"] + hotel_agent = agents["hotel_agent"] + + handoffs = ( + OrchestrationHandoffs() + .add_many( + source_agent=conversation_manager, + target_agents={ + planner.name: f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for trip planning.", + router.name: ( + f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for routing tasks to specialized agents." + ), + destination_expert.name: ( + f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for destination expertise." + ), + flight_agent.name: f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for flight-related tasks.", + hotel_agent.name: f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for hotel-related tasks.", + }, + ) + .add( + source_agent=planner, + target_agent=router, + description=f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for routing tasks to specialized agents.", + ) + .add_many( + source_agent=router, + target_agents={ + destination_expert.name: ( + f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for destination expertise." + ), + flight_agent.name: f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for flight-related tasks.", + hotel_agent.name: f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for hotel-related tasks.", + }, + ) + .add( + source_agent=destination_expert, + target_agent=conversation_manager, + description=f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for non-destination related questions.", + ) + .add( + source_agent=flight_agent, + target_agent=conversation_manager, + description=f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for non-flight related questions.", + ) + .add( + source_agent=hotel_agent, + target_agent=conversation_manager, + description=f"{BASE_TRANSFER_DESCRIPTION} Transfer to this agent for non-hotel related questions.", + ) + ) + + return [ + conversation_manager, + planner, + router, + destination_expert, + flight_agent, + hotel_agent, + ], handoffs + + +@enable_observability +async def main(): + """Main function to run the agents.""" + # 1. Create a handoff orchestration with multiple agents + agents, handoffs = get_agents_and_handoffs() + handoff_orchestration = HandoffOrchestration( + members=agents, + handoffs=handoffs, + agent_response_callback=agent_response_callback, + human_response_function=human_response_function, + ) + + # 2. Create a runtime and start it + runtime = InProcessRuntime() + runtime.start() + + # 3. Invoke the orchestration with a task and the runtime + orchestration_result = await handoff_orchestration.invoke( + task=( + "Plan a trip to bali for 5 days including flights, hotels, and " + "activities for a vegetarian family of 4 members." + ), + runtime=runtime, + ) + + # 4. Wait for the results + value = await orchestration_result.get() + print(value) + + # 5. Stop the runtime after the invocation is complete + await runtime.stop_when_idle() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/demos/travel_planning_system/observability.py b/python/samples/demos/travel_planning_system/observability.py new file mode 100644 index 000000000000..0f834a1bc77a --- /dev/null +++ b/python/samples/demos/travel_planning_system/observability.py @@ -0,0 +1,105 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import os +import sys + +from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter, AzureMonitorTraceExporter +from dotenv import load_dotenv +from opentelemetry import trace +from opentelemetry._logs import set_logger_provider +from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler +from opentelemetry.sdk._logs.export import BatchLogRecordProcessor +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.semconv.resource import ResourceAttributes +from opentelemetry.trace import set_tracer_provider +from opentelemetry.trace.span import format_trace_id + +if sys.version_info >= (3, 12): + pass # pragma: no cover +else: + pass # pragma: no cover + +load_dotenv() + +APPINSIGHTS_CONNECTION_STRING = os.getenv("APPINSIGHTS_CONNECTION_STRING") + +resource = Resource.create({ResourceAttributes.SERVICE_NAME: "TravelPlanningSystemDemo"}) + + +def set_up_logging(): + class KernelFilter(logging.Filter): + """A filter to not process records from semantic_kernel.""" + + # These are the namespaces that we want to exclude from logging for the purposes of this demo. + namespaces_to_exclude: list[str] = [ + "semantic_kernel.functions.kernel_plugin", + "semantic_kernel.prompt_template.kernel_prompt_template", + ] + + def filter(self, record): + return not any([record.name.startswith(namespace) for namespace in self.namespaces_to_exclude]) + + exporters = [] + exporters.append(AzureMonitorLogExporter(connection_string=APPINSIGHTS_CONNECTION_STRING)) + + # Create and set a global logger provider for the application. + logger_provider = LoggerProvider(resource=resource) + # Log processors are initialized with an exporter which is responsible + # for sending the telemetry data to a particular backend. + for log_exporter in exporters: + logger_provider.add_log_record_processor(BatchLogRecordProcessor(log_exporter)) + # Sets the global default logger provider + set_logger_provider(logger_provider) + + # Create a logging handler to write logging records, in OTLP format, to the exporter. + handler = LoggingHandler() + # Add filters to the handler to only process records from semantic_kernel. + handler.addFilter(logging.Filter("semantic_kernel")) + handler.addFilter(KernelFilter()) + # Attach the handler to the root logger. `getLogger()` with no arguments returns the root logger. + # Events from all child loggers will be processed by this handler. + logger = logging.getLogger() + logger.addHandler(handler) + # Set the logging level to NOTSET to allow all records to be processed by the handler. + logger.setLevel(logging.NOTSET) + + +# class CustomBatchSpanProcessor(BatchSpanProcessor): +# @override +# def on_end(self, span: ReadableSpan): +# if span.name.startswith("agent_runtime"): +# # Skip spans that are part of the agent runtime. +# return +# super().on_end(span) + + +def set_up_tracing(): + exporters = [] + exporters.append(AzureMonitorTraceExporter(connection_string=APPINSIGHTS_CONNECTION_STRING)) + + # Initialize a trace provider for the application. This is a factory for creating tracers. + tracer_provider = TracerProvider(resource=resource) + # Span processors are initialized with an exporter which is responsible + # for sending the telemetry data to a particular backend. + for exporter in exporters: + tracer_provider.add_span_processor(BatchSpanProcessor(exporter)) + # Sets the global default tracer provider + set_tracer_provider(tracer_provider) + + +def enable_observability(func): + """A decorator to enable observability for the demo.""" + + async def wrapper(*args, **kwargs): + set_up_logging() + set_up_tracing() + + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span("main") as current_span: + print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") + return await func(*args, **kwargs) + + return wrapper diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/.env.example b/python/samples/getting_started_with_agents/multi_agent_orchestration/.env.example new file mode 100644 index 000000000000..af61de968e3c --- /dev/null +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/.env.example @@ -0,0 +1,5 @@ +APPINSIGHTS_CONNECTION_STRING="..." +SEMANTICKERNEL_EXPERIMENTAL_GENAI_ENABLE_OTEL_DIAGNOSTICS=true +SEMANTICKERNEL_EXPERIMENTAL_GENAI_ENABLE_OTEL_DIAGNOSTICS_SENSITIVE=true +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-4o-mini" +AZURE_OPENAI_ENDPOINT="..." \ No newline at end of file diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/observability.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/observability.py new file mode 100644 index 000000000000..f6569234509a --- /dev/null +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/observability.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import os + +from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter, AzureMonitorTraceExporter +from dotenv import load_dotenv +from opentelemetry import trace +from opentelemetry._logs import set_logger_provider +from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler +from opentelemetry.sdk._logs.export import BatchLogRecordProcessor +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.semconv.resource import ResourceAttributes +from opentelemetry.trace import set_tracer_provider +from opentelemetry.trace.span import format_trace_id + +load_dotenv() + +APPINSIGHTS_CONNECTION_STRING = os.getenv("APPINSIGHTS_CONNECTION_STRING") + +resource = Resource.create({ResourceAttributes.SERVICE_NAME: "TravelPlanningSystemDemo"}) + + +def set_up_logging(): + class KernelFilter(logging.Filter): + """A filter to not process records from semantic_kernel.""" + + # These are the namespaces that we want to exclude from logging for the purposes of this demo. + namespaces_to_exclude: list[str] = [ + "semantic_kernel.functions.kernel_plugin", + "semantic_kernel.prompt_template.kernel_prompt_template", + ] + + def filter(self, record): + return not any([record.name.startswith(namespace) for namespace in self.namespaces_to_exclude]) + + exporters = [] + exporters.append(AzureMonitorLogExporter(connection_string=APPINSIGHTS_CONNECTION_STRING)) + + # Create and set a global logger provider for the application. + logger_provider = LoggerProvider(resource=resource) + # Log processors are initialized with an exporter which is responsible + # for sending the telemetry data to a particular backend. + for log_exporter in exporters: + logger_provider.add_log_record_processor(BatchLogRecordProcessor(log_exporter)) + # Sets the global default logger provider + set_logger_provider(logger_provider) + + # Create a logging handler to write logging records, in OTLP format, to the exporter. + handler = LoggingHandler() + # Add filters to the handler to only process records from semantic_kernel. + handler.addFilter(logging.Filter("semantic_kernel")) + handler.addFilter(KernelFilter()) + # Attach the handler to the root logger. `getLogger()` with no arguments returns the root logger. + # Events from all child loggers will be processed by this handler. + logger = logging.getLogger() + logger.addHandler(handler) + # Set the logging level to NOTSET to allow all records to be processed by the handler. + logger.setLevel(logging.NOTSET) + + +def set_up_tracing(): + exporters = [] + exporters.append(AzureMonitorTraceExporter(connection_string=APPINSIGHTS_CONNECTION_STRING)) + + # Initialize a trace provider for the application. This is a factory for creating tracers. + tracer_provider = TracerProvider(resource=resource) + # Span processors are initialized with an exporter which is responsible + # for sending the telemetry data to a particular backend. + for exporter in exporters: + tracer_provider.add_span_processor(BatchSpanProcessor(exporter)) + # Sets the global default tracer provider + set_tracer_provider(tracer_provider) + + +def enable_observability(func): + """A decorator to enable observability for the samples.""" + + async def wrapper(*args, **kwargs): + set_up_logging() + set_up_tracing() + + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span("main") as current_span: + print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") + return await func(*args, **kwargs) + + return wrapper diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step1_concurrent.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step1_concurrent.py index cdb72f98f5b7..a3f57fb0a9df 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step1_concurrent.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step1_concurrent.py @@ -2,6 +2,7 @@ import asyncio +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, ConcurrentOrchestration from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -35,6 +36,7 @@ def get_agents() -> list[Agent]: return [physics_agent, chemistry_agent] +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a concurrent orchestration with multiple agents diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step1a_concurrent_structured_outputs.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step1a_concurrent_structured_outputs.py index 66af8ccd0a5b..498eefd2c620 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step1a_concurrent_structured_outputs.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step1a_concurrent_structured_outputs.py @@ -5,6 +5,7 @@ from pydantic import BaseModel +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, ConcurrentOrchestration from semantic_kernel.agents.orchestration.tools import structured_outputs_transform from semantic_kernel.agents.runtime import InProcessRuntime @@ -52,6 +53,7 @@ def get_agents() -> list[Agent]: return [theme_agent, sentiment_agent, entity_agent] +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a concurrent orchestration with multiple agents diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step2_sequential.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step2_sequential.py index 28390f0643b8..c21c5e423dff 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step2_sequential.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step2_sequential.py @@ -2,6 +2,7 @@ import asyncio +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, SequentialOrchestration from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -60,6 +61,7 @@ def agent_response_callback(message: ChatMessageContent) -> None: print(f"# {message.name}\n{message.content}") +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a sequential orchestration with multiple agents and an agent diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step2a_sequential_cancellation_token.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step2a_sequential_cancellation_token.py index 67f8c76a15bb..117aad1b3c2c 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step2a_sequential_cancellation_token.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step2a_sequential_cancellation_token.py @@ -3,6 +3,7 @@ import asyncio import logging +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, SequentialOrchestration from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -58,6 +59,7 @@ def get_agents() -> list[Agent]: return [concept_extractor_agent, writer_agent, format_proof_agent] +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a sequential orchestration with multiple agents diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step2b_sequential_streaming_agent_response_callback.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step2b_sequential_streaming_agent_response_callback.py index ad80c5d64a36..f2855a4e67ab 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step2b_sequential_streaming_agent_response_callback.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step2b_sequential_streaming_agent_response_callback.py @@ -2,6 +2,7 @@ import asyncio +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, SequentialOrchestration from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -76,6 +77,7 @@ def streaming_agent_response_callback(message: StreamingChatMessageContent, is_f is_new_message = True +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a sequential orchestration with multiple agents and an agent diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step3_group_chat.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step3_group_chat.py index ea61ec69206e..cfa1968ca629 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step3_group_chat.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step3_group_chat.py @@ -2,6 +2,7 @@ import asyncio +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration, RoundRobinGroupChatManager from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -56,6 +57,7 @@ def agent_response_callback(message: ChatMessageContent) -> None: print(f"**{message.name}**\n{message.content}") +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a group chat orchestration with a round robin manager diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step3a_group_chat_human_in_the_loop.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step3a_group_chat_human_in_the_loop.py index 675476ac5556..af53856d93c3 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step3a_group_chat_human_in_the_loop.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step3a_group_chat_human_in_the_loop.py @@ -3,6 +3,7 @@ import asyncio import sys +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration from semantic_kernel.agents.orchestration.group_chat import BooleanResult, RoundRobinGroupChatManager from semantic_kernel.agents.runtime import InProcessRuntime @@ -97,6 +98,7 @@ async def human_response_function(chat_histoy: ChatHistory) -> ChatMessageConten return ChatMessageContent(role=AuthorRole.USER, content=user_input) +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a group chat orchestration with a round robin manager diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step3b_group_chat_with_chat_completion_manager.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step3b_group_chat_with_chat_completion_manager.py index f5c7133c333e..cfe3db6aa66e 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step3b_group_chat_with_chat_completion_manager.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step3b_group_chat_with_chat_completion_manager.py @@ -3,6 +3,7 @@ import asyncio import sys +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration from semantic_kernel.agents.orchestration.group_chat import BooleanResult, GroupChatManager, MessageResult, StringResult from semantic_kernel.agents.runtime import InProcessRuntime @@ -303,6 +304,7 @@ def agent_response_callback(message: ChatMessageContent) -> None: print(f"**{message.name}**\n{message.content}") +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a group chat orchestration with the custom group chat manager diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step4_handoff.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step4_handoff.py index d0a20ad00560..26bf7c69eca8 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step4_handoff.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step4_handoff.py @@ -2,6 +2,7 @@ import asyncio +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, HandoffOrchestration, OrchestrationHandoffs from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -140,6 +141,7 @@ def human_response_function() -> ChatMessageContent: return ChatMessageContent(role=AuthorRole.USER, content=user_input) +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a handoff orchestration with multiple agents diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step4a_handoff_structured_inputs.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step4a_handoff_structured_inputs.py index 8fba8356811c..021e68c4c1e1 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step4a_handoff_structured_inputs.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step4a_handoff_structured_inputs.py @@ -5,6 +5,7 @@ from pydantic import BaseModel +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import Agent, ChatCompletionAgent, HandoffOrchestration, OrchestrationHandoffs from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -147,6 +148,7 @@ def custom_input_transform(input_message: GithubIssue) -> ChatMessageContent: return ChatMessageContent(role=AuthorRole.USER, content=input_message.model_dump_json()) +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a handoff orchestration with multiple agents diff --git a/python/samples/getting_started_with_agents/multi_agent_orchestration/step5_magentic.py b/python/samples/getting_started_with_agents/multi_agent_orchestration/step5_magentic.py index 5f1a95e010af..2e39cb0883ec 100644 --- a/python/samples/getting_started_with_agents/multi_agent_orchestration/step5_magentic.py +++ b/python/samples/getting_started_with_agents/multi_agent_orchestration/step5_magentic.py @@ -2,6 +2,7 @@ import asyncio +from samples.getting_started_with_agents.multi_agent_orchestration.observability import enable_observability from semantic_kernel.agents import ( Agent, ChatCompletionAgent, @@ -70,6 +71,7 @@ def agent_response_callback(message: ChatMessageContent) -> None: print(f"**{message.name}**\n{message.content}") +@enable_observability async def main(): """Main function to run the agents.""" # 1. Create a Magentic orchestration with two agents and a Magentic manager