Skip to content

Commit d8ec8ca

Browse files
committed
init changes
1 parent 3b8de8e commit d8ec8ca

File tree

19 files changed

+1799
-18
lines changed

19 files changed

+1799
-18
lines changed

src/backend/app_kernel.py

Lines changed: 122 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2,41 +2,32 @@
22
import asyncio
33
import logging
44
import os
5+
# Azure monitoring
6+
import re
57
import uuid
68
from typing import Dict, List, Optional
79

810
# Semantic Kernel imports
911
from app_config import config
1012
from auth.auth_utils import get_authenticated_user_details
11-
12-
# Azure monitoring
13-
import re
14-
from dateutil import parser
1513
from azure.monitor.opentelemetry import configure_azure_monitor
1614
from config_kernel import Config
15+
from dateutil import parser
1716
from event_utils import track_event_if_configured
18-
1917
# FastAPI imports
2018
from fastapi import FastAPI, HTTPException, Query, Request
2119
from fastapi.middleware.cors import CORSMiddleware
2220
from kernel_agents.agent_factory import AgentFactory
23-
2421
# Local imports
2522
from middleware.health_check import HealthCheckMiddleware
26-
from models.messages_kernel import (
27-
AgentMessage,
28-
AgentType,
29-
HumanClarification,
30-
HumanFeedback,
31-
InputTask,
32-
PlanWithSteps,
33-
Step,
34-
UserLanguage
35-
)
36-
23+
from models.messages_kernel import (AgentMessage, AgentType,
24+
HumanClarification, HumanFeedback,
25+
InputTask, PlanWithSteps, Step,
26+
UserLanguage)
3727
# Updated import for KernelArguments
3828
from utils_kernel import initialize_runtime_and_context, rai_success
39-
29+
from v3.orchestration.manager import OnboardingOrchestrationManager
30+
from v3.scenarios.onboarding_cases import MagenticScenarios
4031

4132
# Check if the Application Insights Instrumentation Key is set in the environment variables
4233
connection_string = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")
@@ -281,6 +272,119 @@ async def input_task_endpoint(input_task: InputTask, request: Request):
281272
)
282273
raise HTTPException(status_code=400, detail=f"Error creating plan: {error_msg}") from e
283274

275+
@app.post("/api/input_task_2")
276+
async def input_task_endpoint_2(input_task: InputTask, request: Request):
277+
"""
278+
Receive the initial input task from the user.
279+
"""
280+
# Fix 1: Properly await the async rai_success function
281+
if not await rai_success(input_task.description, True):
282+
print("RAI failed")
283+
284+
track_event_if_configured(
285+
"RAI failed",
286+
{
287+
"status": "Plan not created",
288+
"description": input_task.description,
289+
"session_id": input_task.session_id,
290+
},
291+
)
292+
293+
return {
294+
"status": "Plan not created",
295+
}
296+
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
297+
user_id = authenticated_user["user_principal_id"]
298+
299+
if not user_id:
300+
track_event_if_configured(
301+
"UserIdNotFound", {"status_code": 400, "detail": "no user"}
302+
)
303+
raise HTTPException(status_code=400, detail="no user")
304+
305+
# Generate session ID if not provided
306+
if not input_task.session_id:
307+
input_task.session_id = str(uuid.uuid4())
308+
309+
try:
310+
# Create all agents instead of just the planner agent
311+
# This ensures other agents are created first and the planner has access to them
312+
# <? Do we need to do this every time? >
313+
kernel, memory_store = await initialize_runtime_and_context(
314+
input_task.session_id, user_id
315+
)
316+
client = None
317+
try:
318+
client = config.get_ai_project_client()
319+
except Exception as client_exc:
320+
logging.error(f"Error creating AIProjectClient: {client_exc}")
321+
322+
agents = await AgentFactory.create_all_agents(
323+
session_id=input_task.session_id,
324+
user_id=user_id,
325+
memory_store=memory_store,
326+
client=client,
327+
)
328+
329+
group_chat_manager = agents[AgentType.GROUP_CHAT_MANAGER.value]
330+
331+
# Convert input task to JSON for the kernel function, add user_id here
332+
333+
# Use the planner to handle the task
334+
await group_chat_manager.handle_input_task(input_task)
335+
336+
# Get plan from memory store
337+
plan = await memory_store.get_plan_by_session(input_task.session_id)
338+
339+
if not plan: # If the plan is not found, raise an error
340+
track_event_if_configured(
341+
"PlanNotFound",
342+
{
343+
"status": "Plan not found",
344+
"session_id": input_task.session_id,
345+
"description": input_task.description,
346+
},
347+
)
348+
raise HTTPException(status_code=404, detail="Plan not found")
349+
# Log custom event for successful input task processing
350+
track_event_if_configured(
351+
"InputTaskProcessed",
352+
{
353+
"status": f"Plan created with ID: {plan.id}",
354+
"session_id": input_task.session_id,
355+
"plan_id": plan.id,
356+
"description": input_task.description,
357+
},
358+
)
359+
if client:
360+
try:
361+
client.close()
362+
except Exception as e:
363+
logging.error(f"Error sending to AIProjectClient: {e}")
364+
return {
365+
"status": f"Plan created with ID: {plan.id}",
366+
"session_id": input_task.session_id,
367+
"plan_id": plan.id,
368+
"description": input_task.description,
369+
}
370+
371+
except Exception as e:
372+
# Extract clean error message for rate limit errors
373+
error_msg = str(e)
374+
if "Rate limit is exceeded" in error_msg:
375+
match = re.search(r"Rate limit is exceeded\. Try again in (\d+) seconds?\.", error_msg)
376+
if match:
377+
error_msg = f"Rate limit is exceeded. Try again in {match.group(1)} seconds."
378+
379+
track_event_if_configured(
380+
"InputTaskError",
381+
{
382+
"session_id": input_task.session_id,
383+
"description": input_task.description,
384+
"error": str(e),
385+
},
386+
)
387+
raise HTTPException(status_code=400, detail=f"Error creating plan: {error_msg}") from e
284388

285389
@app.post("/api/human_feedback")
286390
async def human_feedback_endpoint(human_feedback: HumanFeedback, request: Request):
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# Callbacks package for handling agent responses and streaming
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
2+
class DebugGlobalAccess:
3+
"""Class to manage global access to the Magentic orchestration manager."""
4+
5+
_managers = []
6+
7+
@classmethod
8+
def add_manager(cls, manager):
9+
"""Add a new manager to the global list."""
10+
cls._managers.append(manager)
11+
12+
@classmethod
13+
def get_managers(cls):
14+
"""Get the list of all managers."""
15+
return cls._managers
Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
"""
2+
Enhanced response callbacks for employee onboarding agent system.
3+
Provides detailed monitoring and response handling for different agent types.
4+
"""
5+
6+
import sys
7+
from semantic_kernel.contents import ChatMessageContent, StreamingChatMessageContent
8+
9+
def enhanced_agent_response_callback(message: ChatMessageContent) -> None:
10+
"""Enhanced callback to monitor agent responses with detailed information."""
11+
12+
# Get basic message information
13+
message_type = type(message).__name__
14+
role = getattr(message, 'role', 'unknown')
15+
metadata = getattr(message, 'metadata', {})
16+
agent_name = message.name or "Unknown Agent"
17+
18+
# Handle different agent types with specific formatting
19+
if "Coder" in agent_name:
20+
_handle_coder_response(message, agent_name, message_type)
21+
elif "Reasoning" in agent_name:
22+
_handle_reasoning_response(message, agent_name, message_type, role)
23+
elif "Research" in agent_name or "Enhanced" in agent_name:
24+
_handle_research_response(message, agent_name, message_type, role, metadata)
25+
else:
26+
_handle_default_response(message, agent_name, message_type, role)
27+
28+
def _handle_coder_response(message, agent_name, message_type):
29+
"""Handle coder agent responses with code execution details."""
30+
if hasattr(message, 'items') and message.items and len(message.items) > 0:
31+
for item in message.items:
32+
if hasattr(item, 'text') and item.text:
33+
print(item.text, end='', flush=True)
34+
35+
content = message.content or ""
36+
if content.strip():
37+
print(content, end='', flush=True)
38+
39+
def _handle_reasoning_response(message, agent_name, message_type, role):
40+
"""Handle reasoning agent responses with logical process details."""
41+
print(f"\n🧠 **{agent_name}** [{message_type}] (role: {role})")
42+
print("-" * (len(agent_name) + len(message_type) + 15))
43+
44+
if hasattr(message, 'items') and message.items:
45+
for item in message.items:
46+
if hasattr(item, 'function_name') and item.function_name:
47+
print(f"🔧 Function Call: {item.function_name}")
48+
if hasattr(item, 'text') and item.text:
49+
print(item.text, end='', flush=True)
50+
51+
content = message.content or ""
52+
if content.strip():
53+
print(f"💭 Reasoning: {content}")
54+
55+
sys.stdout.flush()
56+
print()
57+
58+
def _handle_research_response(message, agent_name, message_type, role, metadata):
59+
"""Handle research agent responses with search result details."""
60+
print(f"\n🔍 **{agent_name}** [{message_type}] (role: {role})")
61+
print("-" * (len(agent_name) + len(message_type) + 15))
62+
63+
if metadata:
64+
print(f"📋 Metadata: {metadata}")
65+
66+
# Show detailed search results if available
67+
if hasattr(message, 'items') and message.items and len(message.items) > 0:
68+
print(f"🔧 Found {len(message.items)} items in message")
69+
70+
for i, item in enumerate(message.items):
71+
print(f" Item {i+1}:")
72+
73+
if hasattr(item, 'function_name'):
74+
print(f" Function Name: {item.function_name}")
75+
76+
if hasattr(item, 'arguments'):
77+
print(f" Arguments: {item.arguments}")
78+
79+
if hasattr(item, 'text') and item.text:
80+
print(f" Text: {item.text[:200]}...")
81+
82+
# Extract Bing search results
83+
if hasattr(item, 'response_metadata'):
84+
_parse_search_results(item.response_metadata)
85+
86+
content = message.content or ""
87+
if content.strip():
88+
print(f"💬 Content: {content}")
89+
else:
90+
print("💬 Content: [Empty]")
91+
92+
sys.stdout.flush()
93+
print()
94+
95+
def _parse_search_results(response_meta):
96+
"""Parse and display Bing search results from metadata."""
97+
print(f" Response Metadata: {str(response_meta)[:300]}...")
98+
99+
if isinstance(response_meta, str):
100+
try:
101+
import json
102+
parsed_meta = json.loads(response_meta)
103+
if 'webPages' in parsed_meta:
104+
web_pages = parsed_meta.get('webPages', {})
105+
total_docs = web_pages.get('totalEstimatedMatches', 0)
106+
available_docs = len(web_pages.get('value', []))
107+
print(f" 📊 BING SEARCH RESULTS: {available_docs} docs returned, ~{total_docs} total matches")
108+
109+
# Show first few results
110+
for j, page in enumerate(web_pages.get('value', [])[:3]):
111+
title = page.get('name', 'No title')[:50]
112+
url = page.get('url', 'No URL')[:80]
113+
print(f" Result {j+1}: {title} - {url}")
114+
except Exception as parse_error:
115+
print(f" ⚠️ Could not parse search results: {parse_error}")
116+
117+
def _handle_default_response(message, agent_name, message_type, role):
118+
"""Handle default agent responses."""
119+
print(f"\n🤖 **{agent_name}** [{message_type}] (role: {role})")
120+
print("-" * (len(agent_name) + len(message_type) + 15))
121+
122+
content = message.content or ""
123+
if content.strip():
124+
print(f"💬 Content: {content}")
125+
else:
126+
print("💬 Content: [Empty]")
127+
128+
sys.stdout.flush()
129+
print()
130+
131+
async def streaming_agent_response_callback(streaming_message: StreamingChatMessageContent, is_final: bool) -> None:
132+
"""Simple streaming callback to show real-time agent responses."""
133+
134+
# Print streaming content as it arrives
135+
if hasattr(streaming_message, 'content') and streaming_message.content:
136+
print(streaming_message.content, end='', flush=True)
137+
138+
# Add a newline when the streaming is complete for this message
139+
if is_final:
140+
print()

src/backend/v3/config/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# Configuration package for Magentic Example

0 commit comments

Comments
 (0)