Skip to content

Commit be3f7f4

Browse files
committed
fixing input task
1 parent 99ad0dd commit be3f7f4

File tree

3 files changed

+54
-47
lines changed

3 files changed

+54
-47
lines changed

src/backend/app_kernel.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -40,15 +40,15 @@
4040
from models.agent_types import AgentType
4141
from kernel_agents.agent_factory import AgentFactory
4242

43-
# Check if the Application Insights Instrumentation Key is set in the environment variables
44-
instrumentation_key = os.getenv("APPLICATIONINSIGHTS_INSTRUMENTATION_KEY")
45-
if instrumentation_key:
46-
# Configure Application Insights if the Instrumentation Key is found
47-
configure_azure_monitor(connection_string=instrumentation_key)
48-
logging.info("Application Insights configured with the provided Instrumentation Key")
49-
else:
50-
# Log a warning if the Instrumentation Key is not found
51-
logging.warning("No Application Insights Instrumentation Key found. Skipping configuration")
43+
# # Check if the Application Insights Instrumentation Key is set in the environment variables
44+
# instrumentation_key = os.getenv("APPLICATIONINSIGHTS_INSTRUMENTATION_KEY")
45+
# if instrumentation_key:
46+
# # Configure Application Insights if the Instrumentation Key is found
47+
# configure_azure_monitor(connection_string=instrumentation_key)
48+
# logging.info("Application Insights configured with the provided Instrumentation Key")
49+
# else:
50+
# # Log a warning if the Instrumentation Key is not found
51+
# logging.warning("No Application Insights Instrumentation Key found. Skipping configuration")
5252

5353
# Configure logging
5454
logging.basicConfig(level=logging.INFO)
@@ -59,10 +59,10 @@
5959
)
6060
logging.getLogger("azure.identity.aio._internal").setLevel(logging.WARNING)
6161

62-
# Suppress info logs from OpenTelemetry exporter
63-
logging.getLogger("azure.monitor.opentelemetry.exporter.export._base").setLevel(
64-
logging.WARNING
65-
)
62+
# # Suppress info logs from OpenTelemetry exporter
63+
# logging.getLogger("azure.monitor.opentelemetry.exporter.export._base").setLevel(
64+
# logging.WARNING
65+
# )
6666

6767
# Initialize the FastAPI app
6868
app = FastAPI()
@@ -132,9 +132,10 @@ async def input_task_endpoint(input_task: InputTask, request: Request):
132132
input_task_data["user_id"] = user_id
133133
input_task_json = json.dumps(input_task_data)
134134

135+
logging.info(f"Input task: {input_task}")
135136
# Use the planner to handle the task
136137
result = await planner_agent.handle_input_task(
137-
KernelArguments(input_task_json=input_task_json)
138+
input_task
138139
)
139140

140141
print(f"Result: {result}")

src/backend/kernel_agents/agent_factory.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,12 @@
2525
from kernel_agents.group_chat_manager import GroupChatManager
2626
from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig
2727
from context.cosmos_memory_kernel import CosmosMemoryContext
28+
from models.messages_kernel import PlannerResponsePlan
2829

29-
30+
from azure.ai.projects.models import (
31+
ResponseFormatJsonSchema,
32+
ResponseFormatJsonSchemaType,
33+
)
3034
logger = logging.getLogger(__name__)
3135

3236

@@ -348,7 +352,14 @@ async def create_all_agents(
348352
session_id=session_id,
349353
user_id=user_id,
350354
temperature=temperature,
351-
agent_instances=agent_instances # Pass agent instances to the planner
355+
agent_instances=agent_instances, # Pass agent instances to the planner
356+
response_format=ResponseFormatJsonSchemaType(
357+
json_schema=ResponseFormatJsonSchema(
358+
name=PlannerResponsePlan.__name__,
359+
description=f"respond with {PlannerResponsePlan.__name__.lower()}",
360+
schema=PlannerResponsePlan.model_json_schema(),
361+
)
362+
)
352363
)
353364
agents[planner_agent_type] = planner_agent
354365

src/backend/kernel_agents/planner_agent.py

Lines changed: 26 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
AgentMessage,
1616
InputTask,
1717
Plan,
18+
PlannerResponsePlan,
1819
Step,
1920
StepStatus,
2021
PlanStatus,
@@ -23,16 +24,6 @@
2324
from event_utils import track_event_if_configured
2425
from app_config import config
2526

26-
# Define structured output models
27-
class StructuredOutputStep(BaseModel):
28-
action: str = Field(description="Detailed description of the step action")
29-
agent: str = Field(description="Name of the agent to execute this step")
30-
31-
class StructuredOutputPlan(BaseModel):
32-
initial_goal: str = Field(description="The goal of the plan")
33-
steps: List[StructuredOutputStep] = Field(description="List of steps to achieve the goal")
34-
summary_plan_and_steps: str = Field(description="Brief summary of the plan and steps")
35-
human_clarification_request: Optional[str] = Field(None, description="Any additional information needed from the human")
3627

3728
class PlannerAgent(BaseAgent):
3829
"""Planner agent implementation using Semantic Kernel.
@@ -125,7 +116,7 @@ async def async_init(self) -> None:
125116
logging.error(f"Failed to create Azure AI Agent for PlannerAgent: {e}")
126117
raise
127118

128-
async def handle_input_task(self, kernel_arguments: KernelArguments) -> str:
119+
async def handle_input_task(self, input_task: InputTask) -> str:
129120
"""Handle the initial input task from the user.
130121
131122
Args:
@@ -135,15 +126,19 @@ async def handle_input_task(self, kernel_arguments: KernelArguments) -> str:
135126
Status message
136127
"""
137128
# Parse the input task
138-
input_task_json = kernel_arguments["input_task_json"]
139-
input_task = InputTask.parse_raw(input_task_json)
129+
logging.info("Handling input task")
130+
131+
logging.info(f"Parsed input task: {input_task}")
140132

141133
# Generate a structured plan with steps
134+
135+
logging.info(f"Received input task: {input_task.description}")
136+
logging.info(f"Session ID: {input_task.session_id}, User ID: {self._user_id}")
142137
plan, steps = await self._create_structured_plan(input_task)
143138

144-
print(f"Plan created: {plan}")
139+
logging.info(f"Plan created: {plan}")
140+
logging.info(f"Steps created: {steps}")
145141

146-
print(f"Steps created: {steps}")
147142

148143

149144
if steps:
@@ -273,8 +268,13 @@ async def _create_structured_plan(self, input_task: InputTask) -> Tuple[Plan, Li
273268
"""
274269
try:
275270
# Generate the instruction for the LLM
271+
logging.info("Generating instruction for the LLM")
272+
logging.debug(f"Input: {input_task}")
273+
logging.debug(f"Available agents: {self._available_agents}")
274+
276275
instruction = self._generate_instruction(input_task.description)
277276

277+
logging.info(f"Generated instruction: {instruction}")
278278
# Log the input task for debugging
279279
logging.info(f"Creating plan for task: '{input_task.description}'")
280280
logging.info(f"Using available agents: {self._available_agents}")
@@ -294,17 +294,17 @@ async def _create_structured_plan(self, input_task: InputTask) -> Tuple[Plan, Li
294294
kernel_args = KernelArguments()
295295
kernel_args["input"] = f"TASK: {input_task.description}\n\n{instruction}"
296296

297-
print(f"Kernel arguments: {kernel_args}")
297+
logging.debug(f"Kernel arguments: {kernel_args}")
298298

299299
# Call invoke with proper keyword arguments
300300
response_content = ""
301301

302-
# Use keyword arguments instead of positional arguments
303-
# Set a lower temperature to ensure consistent results
302+
# Ensure we're using the right pattern for Azure AI agents with semantic kernel
303+
# Properly handle async generation
304304
async_generator = self._azure_ai_agent.invoke(
305305
arguments=kernel_args,
306306
settings={
307-
"temperature": 0.0
307+
"temperature": 0.0, # Keep temperature low for consistent planning
308308
}
309309
)
310310

@@ -313,13 +313,8 @@ async def _create_structured_plan(self, input_task: InputTask) -> Tuple[Plan, Li
313313
if chunk is not None:
314314
response_content += str(chunk)
315315

316-
print(f"Response content: {response_content}")
317-
318-
# Debug the response
319-
logging.info(f"Response content length: {len(response_content)}")
320-
logging.debug(f"Response content first 500 chars: {response_content[:500]}")
321-
# Log more of the response for debugging
322-
logging.info(f"Full response: {response_content}")
316+
317+
logging.info(f"Response content: {response_content}")
323318

324319
# Check if response is empty or whitespace
325320
if not response_content or response_content.isspace():
@@ -329,7 +324,7 @@ async def _create_structured_plan(self, input_task: InputTask) -> Tuple[Plan, Li
329324
try:
330325
# First try to parse using Pydantic model
331326
try:
332-
parsed_result = StructuredOutputPlan.parse_raw(response_content)
327+
parsed_result = PlannerResponsePlan.parse_raw(response_content)
333328
except Exception as e1:
334329
logging.warning(f"Failed to parse direct JSON with Pydantic: {str(e1)}")
335330

@@ -339,12 +334,12 @@ async def _create_structured_plan(self, input_task: InputTask) -> Tuple[Plan, Li
339334
json_content = json_match.group(1)
340335
logging.info(f"Found JSON content in markdown code block, length: {len(json_content)}")
341336
try:
342-
parsed_result = StructuredOutputPlan.parse_raw(json_content)
337+
parsed_result = PlannerResponsePlan.parse_raw(json_content)
343338
except Exception as e2:
344339
logging.warning(f"Failed to parse extracted JSON with Pydantic: {str(e2)}")
345340
# Try conventional JSON parsing as fallback
346341
json_data = json.loads(json_content)
347-
parsed_result = StructuredOutputPlan.parse_obj(json_data)
342+
parsed_result = PlannerResponsePlan.parse_obj(json_data)
348343
else:
349344
# Try to extract JSON without code blocks - maybe it's embedded in text
350345
# Look for patterns like { ... } that contain "initial_goal" and "steps"
@@ -356,12 +351,12 @@ async def _create_structured_plan(self, input_task: InputTask) -> Tuple[Plan, Li
356351
logging.info(f"Found potential JSON in text, length: {len(potential_json)}")
357352
try:
358353
json_data = json.loads(potential_json)
359-
parsed_result = StructuredOutputPlan.parse_obj(json_data)
354+
parsed_result = PlannerResponsePlan.parse_obj(json_data)
360355
except Exception as e3:
361356
logging.warning(f"Failed to parse potential JSON: {str(e3)}")
362357
# If all extraction attempts fail, try parsing the whole response as JSON
363358
json_data = json.loads(response_content)
364-
parsed_result = StructuredOutputPlan.parse_obj(json_data)
359+
parsed_result = PlannerResponsePlan.parse_obj(json_data)
365360
else:
366361
# If we can't find JSON patterns, create a fallback plan from the text
367362
logging.info("Using fallback plan creation from text response")

0 commit comments

Comments
 (0)