diff --git a/.vscode/launch.json b/.vscode/launch.json index 0a65a8163..e03a0815d 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -41,6 +41,30 @@ "LOGGING_CONFIG_PATH": "${workspaceFolder}/preset/logging_config.yaml" } }, + { + "name": "SAM (workflow)", + "type": "debugpy", + "request": "launch", + "module": "solace_ai_connector.main", + "console": "integratedTerminal", + "args": "--envfile .env examples/agents/orchestrator_example.yaml examples/agents/test_agent_example.yaml examples/agents/jira_bug_triage_workflow.yaml examples/gateways/webui_gateway_example.yaml examples/agents/advanced_workflow_test.yaml", + "justMyCode": false, + "env": { + "LOGGING_CONFIG_PATH": "${workspaceFolder}/preset/logging_config.yaml" + } + }, + { + "name": "SAM (new node types)", + "type": "debugpy", + "request": "launch", + "module": "solace_ai_connector.main", + "console": "integratedTerminal", + "args": "--envfile .env examples/agents/orchestrator_example.yaml examples/gateways/webui_gateway_example.yaml examples/agents/all_node_types_workflow.yaml", + "justMyCode": false, + "env": { + "LOGGING_CONFIG_PATH": "${workspaceFolder}/preset/logging_config.yaml" + } + }, { "name": "SAM (preset)", "type": "debugpy", diff --git a/examples/agents/all_node_types_workflow.yaml b/examples/agents/all_node_types_workflow.yaml new file mode 100644 index 000000000..4e3a7d094 --- /dev/null +++ b/examples/agents/all_node_types_workflow.yaml @@ -0,0 +1,1151 @@ +log: + stdout_log_level: INFO + log_file_level: DEBUG + log_file: all_node_types_workflow.log + +!include ../shared_config.yaml + +apps: + # ============================================================================ + # SUPPORTING AGENTS + # These agents are used by the workflow to demonstrate all node types + # ============================================================================ + + # ---------------------------------------------------------------------------- + # AGENT: Order Validator + # Validates order structure and returns validation result + # ---------------------------------------------------------------------------- + - name: order_validator_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "OrderValidator" + model: *planning_model + + instruction: | + You validate orders. + 1. Read 'order_id' and 'items' from input + 2. Check if order_id is present and items array is not empty + 3. Create a JSON artifact with: {"valid": true/false, "order_id": , "item_count": , "message": } + 4. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + items: {type: array, items: {type: object}} + required: [order_id, items] + + output_schema: + type: object + properties: + valid: {type: boolean} + order_id: {type: string} + item_count: {type: integer} + message: {type: string} + required: [valid, order_id] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Validates order structure and data" + skills: [{id: "validate_order", name: "Validate Order", description: "Validates orders", tags: ["validation"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Customer Enricher + # Enriches order with customer data (runs in parallel with inventory check) + # ---------------------------------------------------------------------------- + - name: customer_enricher_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "CustomerEnricher" + model: *planning_model + + instruction: | + You enrich orders with customer data. + 1. Read 'customer_id' from input + 2. Create a JSON artifact with mock customer data: {"customer_id": , "customer_name": "Customer ", "tier": "gold", "discount_pct": 10} + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + customer_id: {type: string} + required: [customer_id] + + output_schema: + type: object + properties: + customer_id: {type: string} + customer_name: {type: string} + tier: {type: string} + discount_pct: {type: integer} + required: [customer_id, customer_name] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Enriches order with customer information" + skills: [{id: "enrich_customer", name: "Enrich Customer", description: "Gets customer data", tags: ["enrichment"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Discount Calculator + # Calculates discount based on customer tier (sequential after CustomerEnricher) + # ---------------------------------------------------------------------------- + - name: discount_calculator_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "DiscountCalculator" + model: *planning_model + + instruction: | + You calculate discounts based on customer tier. + 1. Read 'customer_tier' from input + 2. Calculate discount: gold=15%, silver=10%, bronze=5%, default=0% + 3. Create a JSON artifact with: {"customer_tier": , "discount_percentage": , "discount_code": "TIER_"} + 4. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + customer_tier: {type: string} + required: [customer_tier] + + output_schema: + type: object + properties: + customer_tier: {type: string} + discount_percentage: {type: number} + discount_code: {type: string} + required: [customer_tier, discount_percentage] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Calculates discount based on customer tier" + skills: [{id: "calc_discount", name: "Calculate Discount", description: "Calculates tier discount", tags: ["pricing"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Inventory Checker + # Checks inventory availability (runs in parallel with customer enrichment) + # ---------------------------------------------------------------------------- + - name: inventory_checker_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "InventoryChecker" + model: *planning_model + + instruction: | + You check inventory availability. + 1. Read 'items' from input (array of item objects) + 2. Create a JSON artifact with: {"all_available": true, "checked_items": , "unavailable_items": []} + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + items: {type: array, items: {type: object}} + required: [items] + + output_schema: + type: object + properties: + all_available: {type: boolean} + checked_items: {type: integer} + unavailable_items: {type: array, items: {type: string}} + required: [all_available, checked_items] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Checks inventory availability for items" + skills: [{id: "check_inventory", name: "Check Inventory", description: "Checks stock", tags: ["inventory"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Item Processor + # Processes individual items (used by map node) + # ---------------------------------------------------------------------------- + - name: item_processor_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "ItemProcessor" + model: *planning_model + + instruction: | + You process individual order items. + 1. Read 'item_id', 'quantity', and 'price' from input + 2. Calculate line_total = quantity * price + 3. Create a JSON artifact with: {"item_id": , "quantity": , "price": , "line_total": , "processed": true} + 4. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + item_id: {type: string} + quantity: {type: integer} + price: {type: number} + required: [item_id, quantity, price] + + output_schema: + type: object + properties: + item_id: {type: string} + quantity: {type: integer} + price: {type: number} + line_total: {type: number} + processed: {type: boolean} + required: [item_id, line_total, processed] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Processes individual order line items" + skills: [{id: "process_item", name: "Process Item", description: "Processes items", tags: ["processing"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Price Calculator + # Calculates total price with discounts + # ---------------------------------------------------------------------------- + - name: price_calculator_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "PriceCalculator" + model: *planning_model + + instruction: | + You calculate order totals. + 1. Read 'subtotal' and 'discount_pct' from input + 2. Calculate discount_amount = subtotal * (discount_pct / 100) + 3. Calculate final_total = subtotal - discount_amount + 4. Create a JSON artifact with: {"subtotal": , "discount_pct": , "discount_amount": , "final_total": } + 5. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + subtotal: {type: number} + discount_pct: {type: integer} + required: [subtotal, discount_pct] + + output_schema: + type: object + properties: + subtotal: {type: number} + discount_pct: {type: integer} + discount_amount: {type: number} + final_total: {type: number} + required: [subtotal, final_total] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Calculates order totals with discounts" + skills: [{id: "calculate_price", name: "Calculate Price", description: "Calculates totals", tags: ["pricing"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Manual Approver + # Handles orders requiring manual approval + # ---------------------------------------------------------------------------- + - name: manual_approver_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "ManualApprover" + model: *planning_model + + instruction: | + You handle manual order approval. + 1. Read 'order_id' and 'total' from input + 2. Create a JSON artifact with: {"approved": true, "approval_type": "manual", "approver": "system", "order_id": } + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + total: {type: number} + required: [order_id, total] + + output_schema: + type: object + properties: + approved: {type: boolean} + approval_type: {type: string} + approver: {type: string} + order_id: {type: string} + required: [approved, approval_type] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles manual order approval" + skills: [{id: "manual_approve", name: "Manual Approve", description: "Manual approval", tags: ["approval"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Auto Approver + # Handles orders that can be auto-approved + # ---------------------------------------------------------------------------- + - name: auto_approver_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "AutoApprover" + model: *planning_model + + instruction: | + You handle automatic order approval. + 1. Read 'order_id' from input + 2. Create a JSON artifact with: {"approved": true, "approval_type": "automatic", "approver": "auto-system", "order_id": } + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + required: [order_id] + + output_schema: + type: object + properties: + approved: {type: boolean} + approval_type: {type: string} + approver: {type: string} + order_id: {type: string} + required: [approved, approval_type] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles automatic order approval" + skills: [{id: "auto_approve", name: "Auto Approve", description: "Auto approval", tags: ["approval"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Priority Handlers (Express, Standard, Economy) + # Different shipping handlers based on priority + # ---------------------------------------------------------------------------- + - name: express_shipper_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "ExpressShipper" + model: *planning_model + + instruction: | + You handle express shipping. + 1. Read 'order_id' from input + 2. Create a JSON artifact with: {"shipping_method": "express", "estimated_days": 1, "tracking_id": "EXP-", "order_id": } + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + required: [order_id] + + output_schema: + type: object + properties: + shipping_method: {type: string} + estimated_days: {type: integer} + tracking_id: {type: string} + order_id: {type: string} + required: [shipping_method, estimated_days] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles express shipping" + skills: [{id: "express_ship", name: "Express Ship", description: "Express shipping", tags: ["shipping"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + - name: standard_shipper_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "StandardShipper" + model: *planning_model + + instruction: | + You handle standard shipping. + 1. Read 'order_id' from input + 2. Create a JSON artifact with: {"shipping_method": "standard", "estimated_days": 5, "tracking_id": "STD-", "order_id": } + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + required: [order_id] + + output_schema: + type: object + properties: + shipping_method: {type: string} + estimated_days: {type: integer} + tracking_id: {type: string} + order_id: {type: string} + required: [shipping_method, estimated_days] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles standard shipping" + skills: [{id: "standard_ship", name: "Standard Ship", description: "Standard shipping", tags: ["shipping"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + - name: economy_shipper_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "EconomyShipper" + model: *planning_model + + instruction: | + You handle economy shipping. + 1. Read 'order_id' from input + 2. Create a JSON artifact with: {"shipping_method": "economy", "estimated_days": 10, "tracking_id": "ECO-", "order_id": } + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + required: [order_id] + + output_schema: + type: object + properties: + shipping_method: {type: string} + estimated_days: {type: integer} + tracking_id: {type: string} + order_id: {type: string} + required: [shipping_method, estimated_days] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles economy shipping" + skills: [{id: "economy_ship", name: "Economy Ship", description: "Economy shipping", tags: ["shipping"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Notification Sender + # Sends order notifications + # ---------------------------------------------------------------------------- + - name: notification_sender_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "NotificationSender" + model: *planning_model + + instruction: | + You send order notifications. + 1. Read 'order_id', 'customer_name', and 'message' from input + 2. Create a JSON artifact with: {"notification_sent": true, "channel": "email", "recipient": , "order_id": , "message": } + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + customer_name: {type: string} + message: {type: string} + required: [order_id, message] + + output_schema: + type: object + properties: + notification_sent: {type: boolean} + channel: {type: string} + recipient: {type: string} + order_id: {type: string} + required: [notification_sent, channel] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Sends customer notifications" + skills: [{id: "send_notification", name: "Send Notification", description: "Sends notifications", tags: ["notification"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Order Finalizer + # Creates final order summary + # ---------------------------------------------------------------------------- + - name: order_finalizer_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "OrderFinalizer" + model: *planning_model + + instruction: | + You create final order summaries. + 1. Read all input fields: order_id, customer_name, final_total, shipping_method, tracking_id, approval_type + 2. Create a JSON artifact with a complete order summary combining all this information + 3. Include: {"order_id": , "status": "completed", "customer_name": , "final_total": , "shipping": {"method": , "tracking_id": }, "approval": {"type": }} + 4. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + customer_name: {type: string} + final_total: {type: number} + shipping_method: {type: string} + tracking_id: {type: string} + approval_type: {type: string} + required: [order_id] + + output_schema: + type: object + properties: + order_id: {type: string} + status: {type: string} + customer_name: {type: string} + final_total: {type: number} + shipping: + type: object + properties: + method: {type: string} + tracking_id: {type: string} + approval: + type: object + properties: + type: {type: string} + required: [order_id, status] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Creates final order summaries" + skills: [{id: "finalize_order", name: "Finalize Order", description: "Finalizes orders", tags: ["finalization"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ---------------------------------------------------------------------------- + # AGENT: Status Checker (for loop demonstration) + # Checks if a condition is met (simulates polling) + # ---------------------------------------------------------------------------- + - name: status_checker_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "StatusChecker" + model: *planning_model + + instruction: | + You check status for polling scenarios. + 1. Read 'order_id' and 'iteration' from input + 2. If iteration >= 2, set ready = true, otherwise ready = false + 3. Create a JSON artifact with: {"order_id": , "iteration": , "ready": , "status_message": "Check "} + 4. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + order_id: {type: string} + iteration: {type: integer} + required: [order_id, iteration] + + output_schema: + type: object + properties: + order_id: {type: string} + iteration: {type: integer} + ready: {type: boolean} + status_message: {type: string} + required: [order_id, ready] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Checks status for polling" + skills: [{id: "check_status", name: "Check Status", description: "Checks status", tags: ["status"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + + # ============================================================================ + # WORKFLOW: Complete Order Processing Pipeline + # Demonstrates ALL node types: agent, conditional, switch, map, loop + # Parallelism is achieved through dependency-based scheduling (implicit fork) + # ============================================================================ + - name: complete_order_workflow + app_base_path: . + app_module: solace_agent_mesh.workflow.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "CompleteOrderWorkflow" + display_name: "Complete Order Processing Workflow" + + workflow: + description: | + A comprehensive order processing workflow that demonstrates ALL workflow node types: + + 1. AGENT NODE: Validates the incoming order + 2. PARALLEL AGENTS: Runs customer enrichment and inventory check in parallel + (Achieved through same dependency - both depend on validate_order) + 3. MAP NODE: Processes each item in the order individually + 4. AGENT NODE: Calculates final price with discounts + 5. CONDITIONAL NODE: Routes to manual or auto approval based on total + 6. SWITCH NODE: Selects shipping method based on priority + 7. LOOP NODE: Polls for readiness status + 8. AGENT NODE: Sends notification + 9. AGENT NODE: Finalizes the order + + Input: Order with items, customer_id, and shipping_priority + Output: Complete order summary with all processing details + + input_schema: + type: object + properties: + order_id: + type: string + description: "Unique order identifier" + customer_id: + type: string + description: "Customer identifier" + items: + type: array + description: "List of order items" + items: + type: object + properties: + item_id: + type: string + description: "Item identifier" + quantity: + type: integer + description: "Quantity ordered" + price: + type: number + description: "Price per unit" + required: [item_id, quantity, price] + shipping_priority: + type: string + enum: ["express", "standard", "economy"] + description: "Shipping priority level" + required: [order_id, customer_id, items, shipping_priority] + + output_schema: + type: object + properties: + order_id: {type: string} + status: {type: string} + customer_name: {type: string} + final_total: {type: number} + shipping_method: {type: string} + tracking_id: {type: string} + approval_type: {type: string} + required: [order_id, status] + + nodes: + # ==================================================================== + # STEP 1: AGENT NODE - Validate the order + # ==================================================================== + - id: validate_order + type: agent + agent_name: "OrderValidator" + input: + order_id: "{{workflow.input.order_id}}" + items: "{{workflow.input.items}}" + + # ==================================================================== + # STEP 2: PARALLEL AGENTS - Customer enrichment and inventory check + # Demonstrates parallel execution through implicit fork + # Both agents have the same dependency (validate_order), so they + # run in parallel automatically + # ==================================================================== + - id: customer_enrichment + type: agent + agent_name: "CustomerEnricher" + depends_on: [validate_order] + input: + customer_id: "{{workflow.input.customer_id}}" + + # Sequential node in the customer branch - calculates discount based on customer tier + - id: customer_discount_calc + type: agent + agent_name: "DiscountCalculator" + depends_on: [customer_enrichment] + input: + customer_tier: "{{customer_enrichment.output.tier}}" + + - id: inventory_check + type: agent + agent_name: "InventoryChecker" + depends_on: [validate_order] + input: + items: "{{workflow.input.items}}" + + # ==================================================================== + # STEP 3: MAP NODE - Process each item individually + # Demonstrates iteration over array items + # Depends on both parallel branches (implicit join - waits for all) + # ==================================================================== + - id: process_items + type: map + depends_on: [customer_discount_calc, inventory_check] + items: "{{workflow.input.items}}" + node: process_single_item + + # Inner node for map - processes a single item + - id: process_single_item + type: agent + agent_name: "ItemProcessor" + input: + item_id: "{{_map_item.item_id}}" + quantity: "{{_map_item.quantity}}" + price: "{{_map_item.price}}" + + # ==================================================================== + # STEP 4: AGENT NODE - Calculate total price + # ==================================================================== + - id: calculate_price + type: agent + agent_name: "PriceCalculator" + depends_on: [process_items] + input: + subtotal: 100.00 + discount_pct: "{{customer_enrichment.output.discount_pct}}" + + # ==================================================================== + # STEP 5: CONDITIONAL NODE - Approval routing + # Demonstrates binary branching based on order total + # Orders over $500 require manual approval + # ==================================================================== + - id: approval_decision + type: conditional + depends_on: [calculate_price] + condition: "{{calculate_price.output.final_total}} > 500" + true_branch: manual_approval + false_branch: auto_approval + + # Manual approval branch + - id: manual_approval + type: agent + agent_name: "ManualApprover" + depends_on: [approval_decision] + input: + order_id: "{{workflow.input.order_id}}" + total: "{{calculate_price.output.final_total}}" + + # Auto approval branch + - id: auto_approval + type: agent + agent_name: "AutoApprover" + depends_on: [approval_decision] + input: + order_id: "{{workflow.input.order_id}}" + + # ==================================================================== + # STEP 6: SWITCH NODE - Select shipping method + # Demonstrates multi-way branching based on priority + # Depends on both approval branches (implicit join - waits for the + # one that executes, since the other is skipped by conditional) + # ==================================================================== + - id: shipping_selection + type: switch + depends_on: [manual_approval, auto_approval] + cases: + - condition: "'{{workflow.input.shipping_priority}}' == 'express'" + node: ship_express + - condition: "'{{workflow.input.shipping_priority}}' == 'standard'" + node: ship_standard + default: ship_economy + + # Express shipping handler + - id: ship_express + type: agent + agent_name: "ExpressShipper" + depends_on: [shipping_selection] + input: + order_id: "{{workflow.input.order_id}}" + + # Standard shipping handler + - id: ship_standard + type: agent + agent_name: "StandardShipper" + depends_on: [shipping_selection] + input: + order_id: "{{workflow.input.order_id}}" + + # Economy shipping handler (default) + - id: ship_economy + type: agent + agent_name: "EconomyShipper" + depends_on: [shipping_selection] + input: + order_id: "{{workflow.input.order_id}}" + + # ==================================================================== + # STEP 7: LOOP NODE - Poll for readiness + # Demonstrates iteration until condition is met + # Depends on all shipping branches (implicit join - waits for the + # one that executes, since others are skipped by switch) + # ==================================================================== + - id: poll_ready + type: loop + depends_on: [ship_express, ship_standard, ship_economy] + node: check_status + condition: "{{check_status.output.ready}} == false" + max_iterations: 5 + + # Inner node for loop - checks status + - id: check_status + type: agent + agent_name: "StatusChecker" + input: + order_id: "{{workflow.input.order_id}}" + iteration: "{{_loop_iteration}}" + + # ==================================================================== + # STEP 8: AGENT NODE - Send notification + # ==================================================================== + - id: send_notification + type: agent + agent_name: "NotificationSender" + depends_on: [poll_ready] + input: + order_id: "{{workflow.input.order_id}}" + customer_name: "{{customer_enrichment.output.customer_name}}" + message: "Your order has been processed and shipped!" + + # ==================================================================== + # STEP 9: AGENT NODE - Finalize order + # ==================================================================== + - id: finalize_order + type: agent + agent_name: "OrderFinalizer" + depends_on: [send_notification] + input: + order_id: "{{workflow.input.order_id}}" + customer_name: "{{customer_enrichment.output.customer_name}}" + final_total: "{{calculate_price.output.final_total}}" + shipping_method: + coalesce: + - "{{ship_express.output.shipping_method}}" + - "{{ship_standard.output.shipping_method}}" + - "{{ship_economy.output.shipping_method}}" + tracking_id: + coalesce: + - "{{ship_express.output.tracking_id}}" + - "{{ship_standard.output.tracking_id}}" + - "{{ship_economy.output.tracking_id}}" + approval_type: + coalesce: + - "{{manual_approval.output.approval_type}}" + - "{{auto_approval.output.approval_type}}" + + # ==================================================================== + # OUTPUT MAPPING + # Maps node outputs to workflow output schema + # ==================================================================== + output_mapping: + order_id: "{{finalize_order.output.order_id}}" + status: "{{finalize_order.output.status}}" + customer_name: "{{finalize_order.output.customer_name}}" + final_total: "{{finalize_order.output.final_total}}" + shipping_method: "{{finalize_order.output.shipping.method}}" + tracking_id: "{{finalize_order.output.shipping.tracking_id}}" + approval_type: "{{finalize_order.output.approval.type}}" + + # ==================================================================== + # WORKFLOW METADATA + # ==================================================================== + skills: + - id: "process_order" + name: "Process Order" + description: "Processes a complete order through validation, enrichment, pricing, approval, and shipping" + tags: ["order", "processing", "workflow"] + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ============================================================================ + # WORKFLOW: Simple Loop Demo + # A simpler workflow that just demonstrates the loop node + # ============================================================================ + - name: simple_loop_workflow + app_base_path: . + app_module: solace_agent_mesh.workflow.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "SimpleLoopWorkflow" + display_name: "Simple Loop Workflow" + + workflow: + description: | + A simple workflow that demonstrates the loop node by polling + a status checker until it reports ready. + + input_schema: + type: object + properties: + task_id: + type: string + description: "Task identifier to check status for" + required: [task_id] + + output_schema: + type: object + properties: + task_id: {type: string} + iterations_completed: {type: integer} + final_status: {type: string} + required: [task_id, iterations_completed] + + nodes: + # Loop node - polls until ready or max iterations + - id: poll_status + type: loop + node: check_task_status + condition: "{{check_task_status.output.ready}} == false" + max_iterations: 5 + + # Inner node for loop - checks status + - id: check_task_status + type: agent + agent_name: "StatusChecker" + input: + order_id: "{{workflow.input.task_id}}" + iteration: "{{_loop_iteration}}" + + output_mapping: + task_id: "{{workflow.input.task_id}}" + iterations_completed: "{{poll_status.output.iterations_completed}}" + final_status: "{{check_task_status.output.status_message}}" + + skills: + - id: "poll_status" + name: "Poll Status" + description: "Polls status until ready" + tags: ["loop", "polling"] + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } diff --git a/examples/agents/jira_bug_triage_workflow.yaml b/examples/agents/jira_bug_triage_workflow.yaml new file mode 100644 index 000000000..7fc940ad4 --- /dev/null +++ b/examples/agents/jira_bug_triage_workflow.yaml @@ -0,0 +1,568 @@ +# Jira Bug Triage Workflow - Demo Configuration +# +# This workflow demonstrates a realistic bug triage process: +# 1. Detect duplicate issues using similarity analysis +# 2. Conditionally branch based on duplicate detection +# 3. Perform code analysis if not a duplicate +# 4. Generate appropriate Jira comments +# +# DEMO MODE: All agents operate in demonstration mode and generate realistic +# but fake data. No actual Jira or code repository access is required. + +log: + stdout_log_level: INFO + log_file_level: DEBUG + log_file: jira_workflow.log + +# Shared configuration +shared_config: + - broker_connection: &broker_connection + dev_mode: ${SOLACE_DEV_MODE, false} + broker_url: ${SOLACE_BROKER_URL, ws://localhost:8008} + broker_username: ${SOLACE_BROKER_USERNAME, default} + broker_password: ${SOLACE_BROKER_PASSWORD, default} + broker_vpn: ${SOLACE_BROKER_VPN, default} + temporary_queue: ${USE_TEMPORARY_QUEUES, true} + + - models: + planning: &planning_model + model: ${LLM_SERVICE_PLANNING_MODEL_NAME} + api_base: ${LLM_SERVICE_ENDPOINT} + api_key: ${LLM_SERVICE_API_KEY} + parallel_tool_calls: true + max_tokens: ${MAX_TOKENS, 16000} + temperature: 0.7 + cache_strategy: "5m" + + - services: + session_service: &default_session_service + type: "sql" + default_behavior: "PERSISTENT" + database_url: ${SESSION_DATABASE_URL, sqlite:///session.db} + + artifact_service: &default_artifact_service + type: "filesystem" + base_path: "/tmp/samv2" + artifact_scope: namespace + +apps: + # ============================================================================ + # AGENT 1: DuplicateDetectorAgent + # Analyzes Jira issues to detect duplicates and find similar issues + # ============================================================================ + - name: duplicate_detector_agent + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "DuplicateDetector" + model: *planning_model + + instruction: | + You are a Jira duplicate detection agent operating in DEMONSTRATION MODE. + + CRITICAL: You do NOT have access to real Jira systems. You must generate + realistic but fake data based on the Jira ID provided. + + Your task: + 1. Generate a realistic bug report for the given Jira ID + 2. Create exactly 2 plausible similar issues with IDs from the same project (keep it minimal for testing) + 3. Analyze for potential duplication (25% chance of duplicate) + 4. Save all data as JSON artifacts + 5. Return structured output matching the exact schema + + When generating fake data: + - Use the project prefix from the input (e.g., DATAGO-12345 → DATAGO project) + - Create realistic bug descriptions related to common software issues: + * NullPointerExceptions + * Memory leaks + * API timeout issues + * Data validation errors + * Race conditions + - Generate similar issue IDs with different numbers (e.g., DATAGO-11234, DATAGO-10567) + - Make similarities plausible (same component, similar error messages) + + CRITICAL OUTPUT REQUIREMENTS: + 1. Create artifact "issue_.json" with JSON structure containing: + - jira_id: string (e.g., "PROJECT-12345") + - title: string (brief bug title) + - description: string (detailed description) + - priority: string (High|Medium|Low) + - reporter: string (username) + - created_date: string (ISO 8601 timestamp) + - labels: array of strings + - component: string (component name) + + 2. Create artifact "similar_.json" for each similar issue + + 3. Decide on duplication (check the force_duplicate_result input parameter): + - If force_duplicate_result = "duplicate": ALWAYS set is_duplicate = true, confidence 70-95 + - If force_duplicate_result = "not_duplicate": ALWAYS set is_duplicate = false, confidence 10-40 + - If force_duplicate_result = "random" OR not provided: + * 75% of time: is_duplicate = false (to showcase full workflow), confidence 10-40 + * 25% of time: is_duplicate = true, confidence 70-95 + + 4. You MUST end your response with: + «result:artifact=duplicate_analysis_.json:v0 status=success» + + The result artifact must contain your structured output matching the schema exactly. + + input_schema: + type: object + properties: + jira_id: + type: string + pattern: "^[A-Z]+-\\d+$" + description: "Jira issue ID (e.g., DATAGO-12345)" + force_duplicate_result: + type: string + enum: ["duplicate", "not_duplicate", "random"] + description: "Control duplicate detection: 'duplicate', 'not_duplicate', or 'random' (default)" + required: [jira_id] + + output_schema: + type: object + properties: + issue_details_artifact: + type: string + description: "Artifact name containing the main issue details" + similar_issues: + type: array + items: + type: string + description: "Array of artifact names for similar issues" + is_duplicate: + type: boolean + description: "Whether this issue is likely a duplicate" + duplicate_confidence: + type: number + minimum: 0 + maximum: 100 + description: "Confidence score for duplicate determination" + recommendation: + type: string + description: "Explanation of the duplicate determination" + required: [issue_details_artifact, similar_issues, is_duplicate, duplicate_confidence, recommendation] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Detects duplicate Jira issues using similarity analysis" + skills: [{id: "detect_dup", name: "Detect Duplicates", description: "Analyzes issues for duplicates", tags: ["jira", "analysis"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ============================================================================ + # AGENT 2: CodeAnalyzerAgent + # Performs code analysis to identify root cause and propose fixes + # ============================================================================ + - name: code_analyzer_agent + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "CodeAnalyzer" + model: *planning_model + + instruction: | + You are a code analysis agent operating in DEMONSTRATION MODE. + + CRITICAL: You do NOT have access to real code repositories or Claude Code. + You must generate realistic but fake code analysis based on the issue description. + + Your task: + 1. Read the issue details from the provided artifact + 2. Review similar issues if helpful for context + 3. Generate a plausible root cause analysis + 4. Identify likely affected files (realistic paths) + 5. Create actual code snippets showing: + - Current (buggy) code + - Proposed fix with proper syntax + 6. Estimate fix complexity + + When generating fake analysis: + - Use realistic file paths for the project type (e.g., src/main/java/, lib/python/, etc.) + - Create actual code snippets in appropriate languages (Python, Java, JavaScript, etc.) + - Make code snippets realistic (5-15 lines each) + - Include proper syntax, imports, and context + - Show clear before/after differences + + Example code change structure (JSON format): + - file: string (e.g., "src/pipeline/processor.py") + - line_numbers: array of integers (e.g., [45, 50]) + - current_code: string (the buggy code) + - suggested_code: string (the fixed code) + - explanation: string (description of the change) + + CRITICAL OUTPUT REQUIREMENTS: + 1. Create artifact "code_analysis_.json" with complete analysis + + 2. The analysis artifact should contain: + - root_cause: Detailed explanation + - affected_files: Array of file paths + - proposed_fix: Object with approach, file_changes array, estimated_complexity + - related_issues_insights: What you learned from similar issues + + 3. You MUST end your response with: + «result:artifact=code_analysis_.json:v0 status=success» + + The result artifact must contain your structured output matching the schema exactly. + + input_schema: + type: object + properties: + jira_id: + type: string + description: "Jira issue ID for reference" + issue_details_artifact: + type: string + description: "Artifact name containing the main issue details" + similar_issues_artifacts: + type: array + items: + type: string + description: "Array of artifact names for similar issues" + required: [jira_id, issue_details_artifact, similar_issues_artifacts] + + output_schema: + type: object + properties: + analysis_artifact: + type: string + description: "Artifact name containing the complete code analysis" + summary: + type: string + description: "Brief summary of findings" + complexity: + type: string + enum: [low, medium, high] + description: "Estimated fix complexity" + required: [analysis_artifact, summary, complexity] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Analyzes code to identify root causes and propose fixes" + skills: [{id: "analyze_code", name: "Analyze Code", description: "Performs code analysis", tags: ["code", "analysis"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ============================================================================ + # AGENT 3: JiraCommenterAgent + # Generates formatted Jira comments based on workflow results + # ============================================================================ + - name: jira_commenter_agent + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "JiraCommenter" + model: *planning_model + + instruction: | + You are a Jira comment generator agent operating in DEMONSTRATION MODE. + + Your task is to create well-formatted Jira comments in two modes: + + MODE 1 - DUPLICATE DETECTION: + When mode = "duplicate": + - Read the issue details artifact + - Create a professional comment explaining the duplication + - Reference the similar issues found + - Include the confidence score + - Suggest closing as duplicate + + Example duplicate comment format: + ``` + h2. Duplicate Issue Detected + + This issue appears to be a duplicate of existing issues in the system. + + *Confidence Score:* 85% + + *Similar Issues:* + * [DATAGO-11234] - NullPointerException in data processor + * [DATAGO-10567] - Data processing pipeline throws NPE + + *Recommendation:* + Review the linked issues above. If this is indeed a duplicate, consider + closing this issue and adding any unique information to the existing issue. + + _Analysis performed by automated workflow_ + ``` + + MODE 2 - CODE ANALYSIS: + When mode = "analysis": + - Read the issue details artifact + - Read the code analysis artifact + - Create a detailed technical comment with: + * Root cause explanation + * Affected files + * Code snippets in Jira code blocks (use the code markup syntax) + * Proposed fix with before/after code + * Complexity estimate + + Example analysis comment format (using Jira markup): + ``` + h2. Automated Code Analysis Results + + h3. Root Cause + The issue is caused by missing null validation in the data processing pipeline... + + h3. Affected Files + * src/pipeline/processor.py - Lines 45-50 + * src/utils/validator.py - Lines 120-125 + + h3. Proposed Fix + + *Complexity:* Low + + *File: processor.py* + + # Current code (buggy) + def process(data): + result = data.transform() + return result + + + + # Proposed fix + def process(data): + if data is None: + logger.warning('Null data received') + return None + result = data.transform() + return result + + + h3. Recommendation + Apply the suggested null check to prevent NullPointerExceptions... + + _Analysis performed by automated workflow_ + ``` + + CRITICAL OUTPUT REQUIREMENTS: + 1. Generate well-formatted Jira markup (use h2, h3, Jira code blocks, *, etc.) + 2. Include all relevant information from the artifacts + 3. Keep comments professional and actionable + 4. You MUST end your response with: + «result:artifact=jira_comment_.json:v0 status=success» + + The result artifact must be a JSON object with a "comment_text" field containing your formatted Jira comment. + + input_schema: + type: object + properties: + jira_id: + type: string + description: "Jira issue ID" + mode: + type: string + enum: [duplicate, analysis] + description: "Comment generation mode" + issue_details_artifact: + type: string + description: "Artifact name containing the main issue details" + recommendation: + type: string + description: "Recommendation text (for duplicate mode)" + duplicate_confidence: + type: number + description: "Confidence score (for duplicate mode)" + analysis_artifact: + type: string + description: "Artifact name containing code analysis (for analysis mode)" + required: [jira_id, mode, issue_details_artifact] + + output_schema: + type: object + properties: + comment_text: + type: string + description: "Formatted Jira comment text" + required: [comment_text] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Generates formatted Jira comments from analysis results" + skills: [{id: "gen_comment", name: "Generate Comment", description: "Creates Jira comments", tags: ["jira", "formatting"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ============================================================================ + # WORKFLOW: JiraBugTriageWorkflow + # Orchestrates the complete bug triage process + # ============================================================================ + - name: jira_bug_triage_workflow + app_module: solace_agent_mesh.workflow.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "JiraBugTriageWorkflow" + display_name: "Jira Bug Triage Workflow" + + # Workflow execution timeouts + max_workflow_execution_time_seconds: 1800 + default_node_timeout_seconds: 300 + + workflow: + description: | + Automated Jira bug triage workflow that: + 1. Analyzes incoming bugs for duplicates + 2. Performs code analysis for unique issues + 3. Generates appropriate Jira comments + + Input: Jira issue ID (e.g., DATAGO-12345) + Output: Formatted Jira comment with analysis or duplicate detection results + + This is a demonstration workflow - all agents generate realistic fake data. + + input_schema: + type: object + properties: + jira_id: + type: string + pattern: "^[A-Z]+-\\d+$" + description: "Jira issue ID in format PROJECT-12345" + force_duplicate_result: + type: string + enum: ["duplicate", "not_duplicate", "random"] + description: "Optional: Control duplicate detection result - 'duplicate' forces duplicate detection, 'not_duplicate' forces unique issue, 'random' uses default 25% chance (default: random)" + required: [jira_id] + + output_schema: + type: object + properties: + jira_id: + type: string + description: "Original Jira issue ID" + final_comment: + type: string + description: "Generated Jira comment text" + workflow_path: + type: string + description: "Which path the workflow took (full_analysis or duplicate_detected)" + is_duplicate: + type: boolean + description: "Whether issue was detected as duplicate" + required: [jira_id, final_comment, workflow_path] + + nodes: + # ============================================ + # Node 1: Detect Duplicates + # ============================================ + - id: detect_duplicates + type: agent + agent_name: "DuplicateDetector" + input: + jira_id: "{{workflow.input.jira_id}}" + force_duplicate_result: + coalesce: + - "{{workflow.input.force_duplicate_result}}" + - "random" + + # ============================================ + # Node 2: Check if Duplicate + # ============================================ + - id: check_duplicate + type: conditional + depends_on: [detect_duplicates] + condition: "{{detect_duplicates.output.is_duplicate}} == false" + true_branch: analyze_code + false_branch: create_duplicate_comment + + # ============================================ + # Node 3A: Code Analysis (if not duplicate) + # ============================================ + - id: analyze_code + type: agent + agent_name: "CodeAnalyzer" + depends_on: [check_duplicate] + input: + jira_id: "{{workflow.input.jira_id}}" + issue_details_artifact: "{{detect_duplicates.output.issue_details_artifact}}" + similar_issues_artifacts: "{{detect_duplicates.output.similar_issues}}" + + # ============================================ + # Node 3B: Create Duplicate Comment (if duplicate) + # ============================================ + - id: create_duplicate_comment + type: agent + agent_name: "JiraCommenter" + depends_on: [check_duplicate] + input: + jira_id: "{{workflow.input.jira_id}}" + mode: "duplicate" + issue_details_artifact: "{{detect_duplicates.output.issue_details_artifact}}" + recommendation: "{{detect_duplicates.output.recommendation}}" + duplicate_confidence: "{{detect_duplicates.output.duplicate_confidence}}" + + # ============================================ + # Node 4: Create Analysis Comment (after code analysis) + # ============================================ + - id: create_analysis_comment + type: agent + agent_name: "JiraCommenter" + depends_on: [analyze_code] + input: + jira_id: "{{workflow.input.jira_id}}" + mode: "analysis" + issue_details_artifact: "{{detect_duplicates.output.issue_details_artifact}}" + analysis_artifact: "{{analyze_code.output.analysis_artifact}}" + + output_mapping: + jira_id: "{{workflow.input.jira_id}}" + final_comment: + coalesce: + - "{{create_analysis_comment.output.comment_text}}" + - "{{create_duplicate_comment.output.comment_text}}" + workflow_path: + coalesce: + - "{{create_analysis_comment.output.comment_text}}" + - "{{create_duplicate_comment.output.comment_text}}" + is_duplicate: "{{detect_duplicates.output.is_duplicate}}" + + session_service: + <<: *default_session_service + + artifact_service: + <<: *default_artifact_service + + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } diff --git a/examples/agents/new_node_types_test.yaml b/examples/agents/new_node_types_test.yaml new file mode 100644 index 000000000..d72c0b0b7 --- /dev/null +++ b/examples/agents/new_node_types_test.yaml @@ -0,0 +1,368 @@ +log: + stdout_log_level: INFO + log_file_level: DEBUG + log_file: new_node_types_test.log + +!include ../shared_config.yaml + +apps: + # ============================================================================ + # AGENT: Echo Agent + # Simple agent that echoes back input with a prefix + # ============================================================================ + - name: echo_agent_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "EchoAgent" + model: *planning_model + + instruction: | + You are an echo agent. Read the input message and create a JSON artifact with the echoed response. + 1. Read 'message' from input + 2. Create a JSON artifact with: {"echoed": "Echo: " + message, "timestamp": current_time} + 3. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + message: {type: string} + required: [message] + + output_schema: + type: object + properties: + echoed: {type: string} + timestamp: {type: string} + required: [echoed] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Echoes back the input message" + skills: [{id: "echo", name: "Echo", description: "Echoes input", tags: ["echo"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ============================================================================ + # AGENT: Category Classifier + # Classifies input into categories: A, B, C, or unknown + # ============================================================================ + - name: classifier_agent_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "Classifier" + model: *planning_model + + instruction: | + You classify inputs into categories. + 1. Read 'value' from input (a number) + 2. Classify: value < 10 -> "A", value < 50 -> "B", value < 100 -> "C", else -> "unknown" + 3. Create a JSON artifact with: {"category": , "original_value": } + 4. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + input_schema: + type: object + properties: + value: {type: integer} + required: [value] + + output_schema: + type: object + properties: + category: {type: string, enum: ["A", "B", "C", "unknown"]} + original_value: {type: integer} + required: [category, original_value] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Classifies values into categories" + skills: [{id: "classify", name: "Classify", description: "Classifies values", tags: ["classify"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ============================================================================ + # AGENT: Handler A/B/C + # Different handlers for different categories + # ============================================================================ + - name: handler_a_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "HandlerA" + model: *planning_model + + instruction: | + You handle Category A items. + 1. Create a JSON artifact with: {"handler": "A", "action": "processed with high priority"} + 2. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + output_schema: + type: object + properties: + handler: {type: string} + action: {type: string} + required: [handler, action] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles Category A" + skills: [{id: "handle_a", name: "Handle A", description: "Handles A", tags: ["handler"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + - name: handler_b_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "HandlerB" + model: *planning_model + + instruction: | + You handle Category B items. + 1. Create a JSON artifact with: {"handler": "B", "action": "processed with normal priority"} + 2. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + output_schema: + type: object + properties: + handler: {type: string} + action: {type: string} + required: [handler, action] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles Category B" + skills: [{id: "handle_b", name: "Handle B", description: "Handles B", tags: ["handler"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + - name: handler_c_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "HandlerC" + model: *planning_model + + instruction: | + You handle Category C items. + 1. Create a JSON artifact with: {"handler": "C", "action": "processed with low priority"} + 2. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + output_schema: + type: object + properties: + handler: {type: string} + action: {type: string} + required: [handler, action] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles Category C" + skills: [{id: "handle_c", name: "Handle C", description: "Handles C", tags: ["handler"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + - name: handler_default_app + app_base_path: . + app_module: solace_agent_mesh.agent.sac.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "HandlerDefault" + model: *planning_model + + instruction: | + You handle unknown/default items. + 1. Create a JSON artifact with: {"handler": "Default", "action": "processed as fallback"} + 2. End with: «result:artifact= status=success» + + IMPORTANT: When invoked by a workflow, use the exact output filename specified in the workflow instructions. + + output_schema: + type: object + properties: + handler: {type: string} + action: {type: string} + required: [handler, action] + + tools: + - tool_type: builtin-group + group_name: "artifact_management" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card: + description: "Handles unknown categories" + skills: [{id: "handle_default", name: "Handle Default", description: "Handles default", tags: ["handler"]}] + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } + + # ============================================================================ + # WORKFLOW: Switch Node Test + # Tests the new SwitchNode for multi-way branching + # ============================================================================ + - name: switch_test_workflow + app_base_path: . + app_module: solace_agent_mesh.workflow.app + broker: + <<: *broker_connection + + app_config: + namespace: ${NAMESPACE} + agent_name: "SwitchTestWorkflow" + display_name: "Switch Node Test Workflow" + + workflow: + description: | + Tests the SwitchNode for multi-way branching. + + Takes a numeric value, classifies it, then routes to the appropriate handler + based on the category using a switch statement. + + input_schema: + type: object + properties: + value: {type: integer, description: "A number to classify and route"} + required: [value] + + output_schema: + type: object + properties: + category: {type: string} + handler_result: {type: string} + required: [category, handler_result] + + nodes: + # Step 1: Classify the value + - id: classify + type: agent + agent_name: "Classifier" + input: + value: "{{workflow.input.value}}" + + # Step 2: Switch based on category + - id: route_by_category + type: switch + depends_on: [classify] + cases: + - condition: "'{{classify.output.category}}' == 'A'" + node: handle_a + - condition: "'{{classify.output.category}}' == 'B'" + node: handle_b + - condition: "'{{classify.output.category}}' == 'C'" + node: handle_c + default: handle_default + + # Step 3a-d: Handlers for each category + - id: handle_a + type: agent + agent_name: "HandlerA" + depends_on: [route_by_category] + + - id: handle_b + type: agent + agent_name: "HandlerB" + depends_on: [route_by_category] + + - id: handle_c + type: agent + agent_name: "HandlerC" + depends_on: [route_by_category] + + - id: handle_default + type: agent + agent_name: "HandlerDefault" + depends_on: [route_by_category] + + output_mapping: + category: "{{classify.output.category}}" + handler_result: + coalesce: + - "{{handle_a.output.action}}" + - "{{handle_b.output.action}}" + - "{{handle_c.output.action}}" + - "{{handle_default.output.action}}" + + session_service: + <<: *default_session_service + artifact_service: + <<: *default_artifact_service + + agent_card_publishing: { interval_seconds: 10 } + agent_discovery: { enabled: false } diff --git a/examples/agents/test_agent_example.yaml b/examples/agents/test_agent_example.yaml index 165ecef03..b76c961b3 100644 --- a/examples/agents/test_agent_example.yaml +++ b/examples/agents/test_agent_example.yaml @@ -101,3 +101,93 @@ apps: inter_agent_communication: allow_list: ["*"] # Allows this agent to communicate with any other agent. request_timeout_seconds: 120 # How long to wait for a response from a peer agent. + + + - name: test_agent_app_2 + # Specifies the base path for resolving other module paths. + app_base_path: . + # The Python module that contains the main application entry point for an agent. + app_module: src.solace_agent_mesh.agent.sac.app + # Inherits broker connection settings from the shared_config.yaml file. + broker: + <<: *broker_connection + + # Configuration specific to this agent application. + app_config: + # The namespace for this agent, used for topic hierarchy and resource isolation. + namespace: ${NAMESPACE} + # Whether the agent supports streaming responses back to the client. + supports_streaming: true + # The unique name of the agent within the mesh. + agent_name: "TestAgentB" + # A user-friendly name for display purposes. + display_name: "Test Agent B" + # The alias for the LLM model configuration to use (defined in shared_config.yaml). + model: *planning_model + # The core instructions that define the agent's persona and primary goal. + instruction: | + You are an agent that helps test the system. You will do as asked to ensure that + all aspects of agent to agent communication are working. You will also output + very frequent and verbose status updates. + + + # The list of tools available to this agent. + tools: + # A simple tool that waits for a specified duration. + - tool_type: builtin + tool_name: "time_delay" + # A tool designed to always return an error, for testing failure scenarios. + - tool_type: builtin + tool_name: "always_fail_tool" + # A tool for testing how the system handles tool calls that don't complete. + - tool_type: builtin + tool_name: "dangling_tool_call_test_tool" + # A group of built-in tools for managing files (e.g., save, load, list). + - tool_type: builtin-group + group_name: "artifact_management" + # A group of built-in tools for data analysis (e.g., jq, sql). + - tool_type: builtin-group + group_name: "data_analysis" + + # Configuration for the service that manages conversation history. + session_service: + type: "memory" # Stores session history in memory. + default_behavior: "PERSISTENT" # Retains history across multiple tasks. + + # Configuration for the service that manages file artifacts. + artifact_service: + type: "filesystem" # Stores artifacts on the local filesystem. + base_path: "/tmp/samv2" # The directory where artifacts will be saved. + artifact_scope: namespace # Artifacts are shared across all agents in the same namespace. + + # How the agent should handle artifacts in its context. "reference" means it will see metadata, not full content. + artifact_handling_mode: "reference" + # Enables the agent to resolve dynamic {{embeds}} in its responses. + enable_embed_resolution: true + # Allows the content of artifacts to be automatically included in the LLM prompt. + enable_artifact_content_instruction: true + + # The agent's "business card" for discovery by other agents. + agent_card: + description: | + Use this agent to test the system. It can do basic artifact management and will + generate lots of status updates. + defaultInputModes: ["text"] + defaultOutputModes: ["text", "file"] + # A list of specific skills the agent possesses. + skills: + - id: time_delay + name: Time Delay + description: "Delays for a specified number of seconds." + examples: + - "Delay for 5 seconds." + - "Wait for 10 seconds." + + # How often (in seconds) the agent publishes its agent_card for discovery. + agent_card_publishing: { interval_seconds: 10 } + # Enables this agent to be discovered by other agents. + agent_discovery: { enabled: true } + # Configuration for communication with other agents. + inter_agent_communication: + allow_list: [] # Allows this agent to communicate with any other agent. + request_timeout_seconds: 120 # How long to wait for a response from a peer agent. diff --git a/pyproject.toml b/pyproject.toml index 267a76734..81b67e016 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,6 +182,11 @@ markers = [ "proxy: marks tests related to the A2A proxy functionality", "streaming: marks tests related to streaming messages", "artifacts: marks tests related to artifact handling and services", + "workflows: marks tests related to workflow execution", + "conditional: marks tests for conditional workflow branching", + "map: marks tests for map (parallel iteration) workflows", + "switch: marks tests for switch (multi-way) workflow branching", + "loop: marks tests for loop workflow iteration", "api: marks tests related to the API", "tasks: marks tests related to task management", "builtin_artifact_embeds: marks tests related to the builtin_artifact_embeds", diff --git a/src/solace_agent_mesh/common/sac/sam_component_base.py b/src/solace_agent_mesh/common/sac/sam_component_base.py index ac2709543..608e4537f 100644 --- a/src/solace_agent_mesh/common/sac/sam_component_base.py +++ b/src/solace_agent_mesh/common/sac/sam_component_base.py @@ -146,6 +146,14 @@ def process_event(self, event): message: SolaceMessage = event.data topic = message.get_topic() + # DEBUG: Log all incoming messages at process_event level + log.debug( + "%s [PROCESS_EVENT_DEBUG] MESSAGE event received | topic=%s | component=%s", + self.log_identifier, + topic, + self._get_component_id() if hasattr(self, '_get_component_id') else 'unknown' + ) + if not topic: log.warning( "%s Received message without topic. Ignoring.", @@ -463,8 +471,9 @@ def publish_a2a_message( payload=payload, topic=topic, user_properties=user_properties ) - log.debug( - "%s [publish_a2a_message] Successfully called app.send_message on topic '%s'", + # DEBUG: Upgrade to INFO for troubleshooting message routing + log.info( + "%s [MSG_DEBUG] Published message to topic: %s", self.log_identifier, topic ) else: diff --git a/src/solace_agent_mesh/gateway/http_sse/component.py b/src/solace_agent_mesh/gateway/http_sse/component.py index a1f803e24..b68fc3a9c 100644 --- a/src/solace_agent_mesh/gateway/http_sse/component.py +++ b/src/solace_agent_mesh/gateway/http_sse/component.py @@ -47,6 +47,7 @@ class BaseArtifactService: JSONRPCResponse, Task, TaskArtifactUpdateEvent, + TaskState, TaskStatusUpdateEvent, ) @@ -1704,6 +1705,20 @@ def _infer_visualization_event_details( if result.metadata else None ) + task_status = a2a.get_task_status(result) + # Guard against task_status being an Enum (TaskState) instead of TaskStatus object + if ( + task_status + and not isinstance(task_status, TaskState) + and hasattr(task_status, "message") + ): + data_parts = a2a.get_data_parts_from_message( + task_status.message + ) + if data_parts: + details["debug_type"] = data_parts[0].data.get( + "type", "task_result" + ) elif isinstance(result, TaskArtifactUpdateEvent): artifact = a2a.get_artifact_from_artifact_update(result) if artifact: @@ -1739,6 +1754,11 @@ def _infer_visualization_event_details( if message.metadata else None ) + data_parts = a2a.get_data_parts_from_message(message) + if data_parts: + details["debug_type"] = data_parts[0].data.get( + "type", method + ) elif method == "tasks/cancel": details["task_id"] = a2a.get_task_id_from_cancel_request( rpc_request diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 00a250385..e9eb12096 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1136,6 +1136,459 @@ def create_agent_config( "broker": {"dev_mode": True}, "app_module": "solace_agent_mesh.agent.sac.app", }, + { + "name": "TestSimpleWorkflowApp", + "app_config": { + "namespace": "test_namespace", + "agent_name": "SimpleTestWorkflow", + "display_name": "Simple Test Workflow", + "artifact_scope": "namespace", + "workflow": { + "description": "A simple 2-node workflow for testing", + "input_schema": { + "type": "object", + "properties": { + "input_text": {"type": "string"} + }, + "required": ["input_text"] + }, + "output_schema": { + "type": "object", + "properties": { + "final_result": {"type": "string"} + }, + "required": ["final_result"] + }, + "nodes": [ + { + "id": "step_1", + "type": "agent", + "agent_name": "TestPeerAgentA", + "input": { + "task_description": "{{workflow.input.input_text}}" + } + }, + { + "id": "step_2", + "type": "agent", + "agent_name": "TestPeerAgentB", + "depends_on": ["step_1"], + "input": { + "task_description": "Process the output from step 1" + } + } + ], + "output_mapping": { + "final_result": "{{step_2.output}}" + } + }, + "session_service": {"type": "memory", "default_behavior": "RUN_BASED"}, + "artifact_service": {"type": "test_in_memory"}, + "agent_card_publishing": {"interval_seconds": 1}, + "agent_discovery": {"enabled": True}, + }, + "broker": {"dev_mode": True}, + "app_module": "solace_agent_mesh.workflow.app", + }, + { + "name": "TestStructuredWorkflowApp", + "app_config": { + "namespace": "test_namespace", + "agent_name": "StructuredTestWorkflow", + "display_name": "Structured Test Workflow with Validation", + "artifact_scope": "namespace", + "workflow": { + "description": "A workflow with structured input/output schemas for validation testing", + "input_schema": { + "type": "object", + "properties": { + "customer_name": {"type": "string"}, + "order_id": {"type": "string"}, + "amount": {"type": "integer"} + }, + "required": ["customer_name", "order_id", "amount"] + }, + "output_schema": { + "type": "object", + "properties": { + "customer_name": {"type": "string"}, + "order_id": {"type": "string"}, + "amount": {"type": "integer"}, + "status": {"type": "string"}, + "processed": {"type": "boolean"} + }, + "required": ["customer_name", "order_id", "amount", "status", "processed"] + }, + "nodes": [ + { + "id": "validate_order", + "type": "agent", + "agent_name": "TestPeerAgentA", + "input": { + "customer_name": "{{workflow.input.customer_name}}", + "order_id": "{{workflow.input.order_id}}", + "amount": "{{workflow.input.amount}}" + }, + "input_schema_override": { + "type": "object", + "properties": { + "customer_name": {"type": "string"}, + "order_id": {"type": "string"}, + "amount": {"type": "integer"} + }, + "required": ["customer_name", "order_id", "amount"] + }, + "output_schema_override": { + "type": "object", + "properties": { + "customer_name": {"type": "string"}, + "order_id": {"type": "string"}, + "amount": {"type": "integer"}, + "status": {"type": "string"} + }, + "required": ["customer_name", "order_id", "amount", "status"] + } + }, + { + "id": "process_order", + "type": "agent", + "agent_name": "TestPeerAgentB", + "depends_on": ["validate_order"], + "input": { + "customer_name": "{{validate_order.output.customer_name}}", + "order_id": "{{validate_order.output.order_id}}", + "amount": "{{validate_order.output.amount}}", + "status": "{{validate_order.output.status}}" + }, + "input_schema_override": { + "type": "object", + "properties": { + "customer_name": {"type": "string"}, + "order_id": {"type": "string"}, + "amount": {"type": "integer"}, + "status": {"type": "string"} + }, + "required": ["customer_name", "order_id", "amount", "status"] + }, + "output_schema_override": { + "type": "object", + "properties": { + "customer_name": {"type": "string"}, + "order_id": {"type": "string"}, + "amount": {"type": "integer"}, + "status": {"type": "string"}, + "processed": {"type": "boolean"} + }, + "required": ["customer_name", "order_id", "amount", "status", "processed"] + } + } + ], + "output_mapping": { + "customer_name": "{{process_order.output.customer_name}}", + "order_id": "{{process_order.output.order_id}}", + "amount": "{{process_order.output.amount}}", + "status": "{{process_order.output.status}}", + "processed": "{{process_order.output.processed}}" + } + }, + "session_service": {"type": "memory", "default_behavior": "RUN_BASED"}, + "artifact_service": {"type": "test_in_memory"}, + "agent_card_publishing": {"interval_seconds": 1}, + "agent_discovery": {"enabled": True}, + }, + "broker": {"dev_mode": True}, + "app_module": "solace_agent_mesh.workflow.app", + }, + { + "name": "TestConditionalWorkflowApp", + "app_config": { + "namespace": "test_namespace", + "agent_name": "ConditionalTestWorkflow", + "display_name": "Conditional Test Workflow", + "artifact_scope": "namespace", + "workflow": { + "description": "A workflow with conditional branching based on status", + "input_schema": { + "type": "object", + "properties": { + "input_text": {"type": "string"}, + "should_succeed": {"type": "boolean"}, + }, + "required": ["input_text", "should_succeed"], + }, + "output_schema": { + "type": "object", + "properties": { + "result": {"type": "string"}, + "path_taken": {"type": "string"}, + }, + }, + "nodes": [ + { + "id": "check_status", + "type": "agent", + "agent_name": "TestPeerAgentA", + "input": { + "task": "Check status", + "should_succeed": "{{workflow.input.should_succeed}}", + }, + }, + { + "id": "branch", + "type": "conditional", + "condition": "'{{check_status.output.status}}' == 'success'", + "true_branch": "success_path", + "false_branch": "failure_path", + "depends_on": ["check_status"], + }, + { + "id": "success_path", + "type": "agent", + "agent_name": "TestPeerAgentB", + "depends_on": ["branch"], + "input": {"task": "Handle success"}, + }, + { + "id": "failure_path", + "type": "agent", + "agent_name": "TestPeerAgentC", + "depends_on": ["branch"], + "input": {"task": "Handle failure"}, + }, + ], + "output_mapping": { + "result": { + "coalesce": [ + "{{success_path.output.result}}", + "{{failure_path.output.result}}", + ] + }, + "path_taken": { + "coalesce": [ + "{{success_path.output.path}}", + "{{failure_path.output.path}}", + ] + }, + }, + }, + "session_service": {"type": "memory", "default_behavior": "RUN_BASED"}, + "artifact_service": {"type": "test_in_memory"}, + "agent_card_publishing": {"interval_seconds": 1}, + "agent_discovery": {"enabled": True}, + }, + "broker": {"dev_mode": True}, + "app_module": "solace_agent_mesh.workflow.app", + }, + { + "name": "TestMapWorkflowApp", + "app_config": { + "namespace": "test_namespace", + "agent_name": "MapTestWorkflow", + "display_name": "Map Test Workflow", + "artifact_scope": "namespace", + "workflow": { + "description": "A workflow that iterates over a list of items", + "input_schema": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": {"type": "string"}, + }, + }, + "required": ["items"], + }, + "output_schema": { + "type": "object", + "properties": { + "results": { + "type": "array", + "items": {"type": "object"}, + }, + }, + }, + "nodes": [ + { + "id": "process_items", + "type": "map", + "node": "process_single_item", + "items": "{{workflow.input.items}}", + }, + { + "id": "process_single_item", + "type": "agent", + "agent_name": "TestPeerAgentA", + "input": { + "item": "{{_map_item}}", + "index": "{{_map_index}}", + }, + }, + ], + "output_mapping": { + "results": "{{process_items.output}}", + }, + }, + "session_service": {"type": "memory", "default_behavior": "RUN_BASED"}, + "artifact_service": {"type": "test_in_memory"}, + "agent_card_publishing": {"interval_seconds": 1}, + "agent_discovery": {"enabled": True}, + }, + "broker": {"dev_mode": True}, + "app_module": "solace_agent_mesh.workflow.app", + }, + { + "name": "TestSwitchWorkflowApp", + "app_config": { + "namespace": "test_namespace", + "agent_name": "SwitchTestWorkflow", + "display_name": "Switch Test Workflow", + "artifact_scope": "namespace", + "workflow": { + "description": "A workflow with switch (multi-way) branching", + "input_schema": { + "type": "object", + "properties": { + "action": {"type": "string"}, + }, + "required": ["action"], + }, + "output_schema": { + "type": "object", + "properties": { + "result": {"type": "string"}, + "action_taken": {"type": "string"}, + }, + }, + "nodes": [ + { + "id": "route_action", + "type": "switch", + "cases": [ + { + "condition": "'{{workflow.input.action}}' == 'create'", + "node": "create_handler", + }, + { + "condition": "'{{workflow.input.action}}' == 'update'", + "node": "update_handler", + }, + { + "condition": "'{{workflow.input.action}}' == 'delete'", + "node": "delete_handler", + }, + ], + "default": "default_handler", + }, + { + "id": "create_handler", + "type": "agent", + "agent_name": "TestPeerAgentA", + "depends_on": ["route_action"], + "input": {"task": "Create resource"}, + }, + { + "id": "update_handler", + "type": "agent", + "agent_name": "TestPeerAgentB", + "depends_on": ["route_action"], + "input": {"task": "Update resource"}, + }, + { + "id": "delete_handler", + "type": "agent", + "agent_name": "TestPeerAgentC", + "depends_on": ["route_action"], + "input": {"task": "Delete resource"}, + }, + { + "id": "default_handler", + "type": "agent", + "agent_name": "TestPeerAgentA", + "depends_on": ["route_action"], + "input": {"task": "Handle unknown action"}, + }, + ], + "output_mapping": { + "result": { + "coalesce": [ + "{{create_handler.output.result}}", + "{{update_handler.output.result}}", + "{{delete_handler.output.result}}", + "{{default_handler.output.result}}", + ] + }, + "action_taken": { + "coalesce": [ + "{{create_handler.output.action}}", + "{{update_handler.output.action}}", + "{{delete_handler.output.action}}", + "{{default_handler.output.action}}", + ] + }, + }, + }, + "session_service": {"type": "memory", "default_behavior": "RUN_BASED"}, + "artifact_service": {"type": "test_in_memory"}, + "agent_card_publishing": {"interval_seconds": 1}, + "agent_discovery": {"enabled": True}, + }, + "broker": {"dev_mode": True}, + "app_module": "solace_agent_mesh.workflow.app", + }, + { + "name": "TestLoopWorkflowApp", + "app_config": { + "namespace": "test_namespace", + "agent_name": "LoopTestWorkflow", + "display_name": "Loop Test Workflow", + "artifact_scope": "namespace", + "workflow": { + "description": "A workflow with loop iteration", + "input_schema": { + "type": "object", + "properties": { + "max_count": {"type": "integer"}, + }, + "required": ["max_count"], + }, + "output_schema": { + "type": "object", + "properties": { + "final_count": {"type": "integer"}, + "iterations": {"type": "integer"}, + }, + }, + "nodes": [ + { + "id": "count_loop", + "type": "loop", + "node": "increment_counter", + "condition": "{{increment_counter.output.count}} < {{workflow.input.max_count}}", + "max_iterations": 10, + }, + { + "id": "increment_counter", + "type": "agent", + "agent_name": "TestPeerAgentA", + "input": { + "task": "Increment counter", + "current_iteration": "{{_loop_iteration}}", + }, + }, + ], + "output_mapping": { + "final_count": "{{increment_counter.output.count}}", + "iterations": "{{count_loop.output.iterations_completed}}", + }, + }, + "session_service": {"type": "memory", "default_behavior": "RUN_BASED"}, + "artifact_service": {"type": "test_in_memory"}, + "agent_card_publishing": {"interval_seconds": 1}, + "agent_discovery": {"enabled": True}, + }, + "broker": {"dev_mode": True}, + "app_module": "solace_agent_mesh.workflow.app", + }, { "name": "TestA2AProxyApp", "app_config": { @@ -1172,6 +1625,13 @@ def create_agent_config( wrapped_service=test_artifact_service_instance, component=component ), ) + # Also patch for workflow and gateway components that import from agent.adk.services + session_monkeypatch.setattr( + "solace_agent_mesh.agent.adk.services.initialize_artifact_service", + lambda component: ScopedArtifactServiceWrapper( + wrapped_service=test_artifact_service_instance, component=component + ), + ) log_level_str = request.config.getoption("--log-cli-level") or "INFO" @@ -1385,6 +1845,30 @@ def peer_d_component(peer_agent_d_app_under_test: SamAgentApp) -> SamAgentCompon return get_component_from_app(peer_agent_d_app_under_test) +@pytest.fixture(scope="session") +def test_simple_workflow_app( + shared_solace_connector: SolaceAiConnector, +): + """Retrieves the TestSimpleWorkflowApp instance.""" + from solace_agent_mesh.workflow.app import WorkflowApp + app_instance = shared_solace_connector.get_app("TestSimpleWorkflowApp") + assert isinstance( + app_instance, WorkflowApp + ), "Failed to retrieve TestSimpleWorkflowApp." + yield app_instance + + +@pytest.fixture(scope="session") +def test_simple_workflow_component(test_simple_workflow_app): + """Retrieves the SimpleTestWorkflow component instance.""" + from solace_agent_mesh.workflow.component import WorkflowComponent + component = get_component_from_app(test_simple_workflow_app) + assert isinstance( + component, WorkflowComponent + ), "Failed to retrieve WorkflowComponent from TestSimpleWorkflowApp." + return component + + @pytest.fixture(scope="session") def combined_dynamic_agent_component( combined_dynamic_agent_app_under_test: SamAgentApp, diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_conditional_workflow_false_branch.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_conditional_workflow_false_branch.yaml new file mode 100644 index 000000000..e4a9031e0 --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_conditional_workflow_false_branch.yaml @@ -0,0 +1,82 @@ +test_case_id: "conditional_workflow_false_branch_001" +description: "Tests a conditional workflow where the false branch is taken based on failure status" +tags: ["all", "workflows", "conditional"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 30 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"input_text": "Test conditional failure", "should_succeed": false}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "ConditionalTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "conditional_false_test_session" + prompt: + parts: + - text: "Run the conditional workflow" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # Step 1: TestPeerAgentA (check_status) - returns failure status + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="check_status_output.json" mime_type="application/json" description="Status check result" + {"status": "failure", "message": "Checks failed"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 2: System processes the artifact save notification + - static_response: + choices: + - message: + role: "assistant" + content: "Status check complete. «result:artifact=check_status_output.json:0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "check_status_output.json" + status: "success" + + # Step 3: TestPeerAgentC (failure_path) - handles failure + # Note: success_path is skipped because condition was false + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="failure_output.json" mime_type="application/json" description="Failure path result" + {"result": "Handled failure gracefully", "path": "failure"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 4: System processes the artifact save for failure path + - static_response: + choices: + - message: + role: "assistant" + content: "Failure path complete. «result:artifact=failure_output.json:0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "failure_output.json" + status: "success" + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_conditional_workflow_true_branch.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_conditional_workflow_true_branch.yaml new file mode 100644 index 000000000..db93f430c --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_conditional_workflow_true_branch.yaml @@ -0,0 +1,82 @@ +test_case_id: "conditional_workflow_true_branch_001" +description: "Tests a conditional workflow where the true branch is taken based on success status" +tags: ["all", "workflows", "conditional"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 30 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"input_text": "Test conditional", "should_succeed": true}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "ConditionalTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "conditional_test_session" + prompt: + parts: + - text: "Run the conditional workflow" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # Step 1: TestPeerAgentA (check_status) - returns success status + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="check_status_output.json" mime_type="application/json" description="Status check result" + {"status": "success", "message": "All checks passed"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 2: System processes the artifact save notification + - static_response: + choices: + - message: + role: "assistant" + content: "Status check complete. «result:artifact=check_status_output.json:0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "check_status_output.json" + status: "success" + + # Step 3: TestPeerAgentB (success_path) - handles success + # Note: failure_path is skipped because condition was true + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="success_output.json" mime_type="application/json" description="Success path result" + {"result": "Successfully processed", "path": "success"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 4: System processes the artifact save for success path + - static_response: + choices: + - message: + role: "assistant" + content: "Success path complete. «result:artifact=success_output.json:0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "success_output.json" + status: "success" + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_loop_workflow.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_loop_workflow.yaml new file mode 100644 index 000000000..5a05a3f70 --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_loop_workflow.yaml @@ -0,0 +1,65 @@ +test_case_id: "loop_workflow_001" +description: "Tests a loop workflow that iterates until condition is false" +tags: ["all", "workflows", "loop"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 60 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"max_count": 2}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "LoopTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "loop_test_session" + prompt: + parts: + - text: "Run the counting loop" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # Iteration 1: First increment (count goes to 1) + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="counter_iter_0.json" mime_type="application/json" description="Counter state" + {"count": 1, "iteration": 0} + »»» + + - static_response: + choices: + - message: + role: "assistant" + content: "Counter incremented. «result:artifact=counter_iter_0.json:0 status=success»" + + # Iteration 2: Second increment (count goes to 2, which equals max_count) + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="counter_iter_1.json" mime_type="application/json" description="Counter state" + {"count": 2, "iteration": 1} + »»» + + - static_response: + choices: + - message: + role: "assistant" + content: "Counter incremented. «result:artifact=counter_iter_1.json:0 status=success»" + + # Loop should stop because count (2) >= max_count (2) + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_map_workflow.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_map_workflow.yaml new file mode 100644 index 000000000..025d7d32f --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_map_workflow.yaml @@ -0,0 +1,64 @@ +test_case_id: "map_workflow_001" +description: "Tests a map workflow that iterates over a list of items" +tags: ["all", "workflows", "map"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 60 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"items": ["item_a", "item_b"]}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "MapTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "map_test_session" + prompt: + parts: + - text: "Process all items" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # Iteration 1: Process first item + # Note: Map iterations may run in parallel, order is not guaranteed + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="item_0_output.json" mime_type="application/json" description="Processed item" + {"processed_item": "item_a", "index": 0, "status": "processed"} + »»» + + - static_response: + choices: + - message: + role: "assistant" + content: "Item processed. «result:artifact=item_0_output.json:0 status=success»" + + # Iteration 2: Process second item + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="item_1_output.json" mime_type="application/json" description="Processed item" + {"processed_item": "item_b", "index": 1, "status": "processed"} + »»» + + - static_response: + choices: + - message: + role: "assistant" + content: "Item processed. «result:artifact=item_1_output.json:0 status=success»" + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_simple_two_node_workflow.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_simple_two_node_workflow.yaml new file mode 100644 index 000000000..b4b2e15f1 --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_simple_two_node_workflow.yaml @@ -0,0 +1,81 @@ +test_case_id: "simple_workflow_two_nodes_001" +description: "Tests a simple 2-node workflow where Peer A processes input and Peer B processes A's output" +tags: ["all", "workflows"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 20 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"input_text": "Process this test data"}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "SimpleTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "workflow_test_session" + prompt: + parts: + - text: "Run the workflow" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # Step 1: TestPeerAgentA receives the workflow input + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="step1_output.json" mime_type="application/json" description="Output from step 1" + {"processed": "Step 1 completed", "data": "processed_data"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 2: System processes the fenced block and injects _notify_artifact_save + - static_response: + choices: + - message: + role: "assistant" + content: "Step 1 output has been saved. «result:artifact=step1_output.json:0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "step1_output.json" + status: "success" + + # Step 3: TestPeerAgentB receives the task + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="step2_output.json" mime_type="application/json" description="Final workflow output" + {"final_result": "Workflow completed successfully"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 4: System processes the fenced block for step 2 + - static_response: + choices: + - message: + role: "assistant" + content: "Step 2 output has been saved. Workflow complete. «result:artifact=step2_output.json:0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "step2_output.json" + status: "success" + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_switch_workflow_create_case.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_switch_workflow_create_case.yaml new file mode 100644 index 000000000..b240ee7e3 --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_switch_workflow_create_case.yaml @@ -0,0 +1,50 @@ +test_case_id: "switch_workflow_create_case_001" +description: "Tests a switch workflow where the 'create' case is matched" +tags: ["all", "workflows", "switch"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 30 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"action": "create"}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "SwitchTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "switch_create_test_session" + prompt: + parts: + - text: "Route the action" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # create_handler (TestPeerAgentA): Process create action + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="create_output.json" mime_type="application/json" description="Create result" + {"result": "Resource created successfully", "action": "create"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # System processes the artifact save notification + - static_response: + choices: + - message: + role: "assistant" + content: "Create action completed. «result:artifact=create_output.json:0 status=success»" + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_switch_workflow_default_case.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_switch_workflow_default_case.yaml new file mode 100644 index 000000000..c2c365541 --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_switch_workflow_default_case.yaml @@ -0,0 +1,50 @@ +test_case_id: "switch_workflow_default_case_001" +description: "Tests a switch workflow where no case matches and default is used" +tags: ["all", "workflows", "switch"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 30 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"action": "unknown_action"}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "SwitchTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "switch_default_test_session" + prompt: + parts: + - text: "Route the action" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # default_handler (TestPeerAgentA): Handle unknown action + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="default_output.json" mime_type="application/json" description="Default handler result" + {"result": "Handled unknown action", "action": "default"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # System processes the artifact save notification + - static_response: + choices: + - message: + role: "assistant" + content: "Default action completed. «result:artifact=default_output.json:0 status=success»" + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_declarative/test_data/workflows/test_workflow_with_structured_input.yaml b/tests/integration/scenarios_declarative/test_data/workflows/test_workflow_with_structured_input.yaml new file mode 100644 index 000000000..e0d55ed2c --- /dev/null +++ b/tests/integration/scenarios_declarative/test_data/workflows/test_workflow_with_structured_input.yaml @@ -0,0 +1,81 @@ +test_case_id: "workflow_structured_input_001" +description: "Tests workflow with structured input schema validation (multiple fields)" +tags: ["all", "workflows", "validation"] +skip_intermediate_events: true +expected_completion_timeout_seconds: 20 + +test_runner_config_overrides: + agent_config: + artifact_scope: "namespace" + +setup_artifacts: + - filename: "workflow_input.json" + content: '{"customer_name": "John Doe", "order_id": "ORD-12345", "amount": 150}' + mime_type: "application/json" + +gateway_input: + target_agent_name: "StructuredTestWorkflow" + user_identity: "workflow_test_user@example.com" + external_context: + a2a_session_id: "workflow_test_session" + prompt: + parts: + - text: "Process the order data" + invoked_with_artifacts: + - filename: "workflow_input.json" + version: 0 + +llm_interactions: + # Step 1: TestPeerAgentA receives structured input with schema validation + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="step1_output.json" mime_type="application/json" description="Validated order data" + {"customer_name": "John Doe", "order_id": "ORD-12345", "amount": 150, "status": "validated"} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 2: System processes the fenced block and injects _notify_artifact_save + - static_response: + choices: + - message: + role: "assistant" + content: "Order validated successfully. «result:artifact=step1_output.json:v0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "step1_output.json" + status: "success" + + # Step 3: TestPeerAgentB processes the validated data + - static_response: + choices: + - message: + role: "assistant" + content: | + «««save_artifact: filename="step2_output.json" mime_type="application/json" description="Final processed order" + {"customer_name": "John Doe", "order_id": "ORD-12345", "amount": 150, "status": "validated", "processed": true} + »»» + expected_request: + tools_present: ["load_artifact"] + + # Step 4: System processes the fenced block for step 2 + - static_response: + choices: + - message: + role: "assistant" + content: "Order processed successfully. «result:artifact=step2_output.json:v0 status=success»" + expected_request: + expected_tool_responses_in_llm_messages: + - tool_name: "_notify_artifact_save" + response_json_matches: + filename: "step2_output.json" + status: "success" + +expected_gateway_output: + - type: "final_response" + task_state: "completed" diff --git a/tests/integration/scenarios_programmatic/test_workflow_errors.py b/tests/integration/scenarios_programmatic/test_workflow_errors.py new file mode 100644 index 000000000..7af6c69a0 --- /dev/null +++ b/tests/integration/scenarios_programmatic/test_workflow_errors.py @@ -0,0 +1,1998 @@ +""" +Programmatic integration tests for workflow error handling. + +Tests error scenarios that are awkward to express in declarative YAML tests: +- Invalid input schema rejection +- Node failure handling +- Output schema validation with retry +""" + +import pytest +import json +from sam_test_infrastructure.llm_server.server import ( + TestLLMServer, + ChatCompletionResponse, + Message, + Choice, + Usage, +) +from sam_test_infrastructure.gateway_interface.component import ( + TestGatewayComponent, +) +from sam_test_infrastructure.artifact_service.service import ( + TestInMemoryArtifactService, +) +from a2a.types import Task, JSONRPCError +from a2a.utils.message import get_message_text +from google.genai import types as adk_types + +from .test_helpers import ( + prime_llm_server, + submit_test_input, + get_all_task_events, + extract_outputs_from_event_list, +) + +pytestmark = [ + pytest.mark.all, + pytest.mark.asyncio, + pytest.mark.workflows, + pytest.mark.error, +] + + +def create_workflow_input_with_artifact( + target_workflow: str, + user_identity: str, + artifact_filename: str, + artifact_content: dict, + scenario_id: str, +) -> dict: + """ + Creates gateway input data for a workflow with an artifact. + """ + return { + "target_agent_name": target_workflow, + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": f"Process the workflow input"}], + "external_context_override": { + "test_case": scenario_id, + "a2a_session_id": f"session_{scenario_id}", + }, + "artifacts": [ + { + "filename": artifact_filename, + "content": json.dumps(artifact_content), + "mime_type": "application/json", + } + ], + } + + +async def test_workflow_rejects_invalid_input_schema( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, +): + """ + Test that workflows properly reject input that doesn't match the input schema. + + The StructuredTestWorkflow expects: + - customer_name: string (required) + - order_id: string (required) + - amount: integer (required) + + We send input missing required fields to verify validation. + """ + scenario_id = "workflow_invalid_input_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Don't need to prime LLM - workflow should reject before calling any agents + prime_llm_server(test_llm_server, []) + + # Send input missing required fields (missing order_id and amount) + input_data = create_workflow_input_with_artifact( + target_workflow="StructuredTestWorkflow", + user_identity="invalid_input_user@example.com", + artifact_filename="workflow_input.json", + artifact_content={"customer_name": "Test Customer"}, # Missing order_id and amount + scenario_id=scenario_id, + ) + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=15.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event received" + + # The workflow should fail due to schema validation + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + # We expect failure due to invalid input + assert terminal_event.status.state == "failed", ( + f"Scenario {scenario_id}: Expected workflow to fail with invalid input, " + f"got state: {terminal_event.status.state}" + ) + elif isinstance(terminal_event, JSONRPCError): + print(f"Scenario {scenario_id}: Received error (expected): {terminal_event.error}") + # Error response is also acceptable for validation failures + + print(f"Scenario {scenario_id}: Workflow properly rejected invalid input.") + + +async def test_workflow_rejects_wrong_type_input( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, +): + """ + Test that workflows reject input with wrong types. + + The StructuredTestWorkflow expects amount to be an integer. + We send a string to verify type validation. + """ + scenario_id = "workflow_wrong_type_input_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Don't need to prime LLM - workflow should reject before calling any agents + prime_llm_server(test_llm_server, []) + + # Send input with wrong type (amount should be integer, not string) + input_data = create_workflow_input_with_artifact( + target_workflow="StructuredTestWorkflow", + user_identity="wrong_type_user@example.com", + artifact_filename="workflow_input.json", + artifact_content={ + "customer_name": "Test Customer", + "order_id": "ORD-123", + "amount": "not_an_integer", # Should be integer + }, + scenario_id=scenario_id, + ) + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=15.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event received" + + # The workflow should fail due to type validation + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "failed", ( + f"Scenario {scenario_id}: Expected workflow to fail with wrong type input, " + f"got state: {terminal_event.status.state}" + ) + elif isinstance(terminal_event, JSONRPCError): + print(f"Scenario {scenario_id}: Received error (expected): {terminal_event.error}") + + print(f"Scenario {scenario_id}: Workflow properly rejected wrong type input.") + + +async def test_workflow_node_failure_propagates( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, +): + """ + Test that when an agent node returns a failure status, the workflow fails properly. + + This tests the error handling path where: + 1. Workflow starts execution + 2. First agent node returns status=failure + 3. Workflow should fail with appropriate error information + """ + scenario_id = "workflow_node_failure_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Prime the LLM to simulate an agent that fails + # First call: agent saves artifact + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-failure-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""I encountered an error processing this request. +«««save_artifact: filename="error_output.json" mime_type="application/json" description="Error details" +{"error": "Processing failed", "reason": "Invalid data format"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + # Second call: agent returns failure status + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-failure-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Failed to process the request. «result:artifact=error_output.json:0 status=failure»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server(test_llm_server, [llm_response_1, llm_response_2]) + + # Submit to the simple workflow - it will fail at step_1 + input_data = create_workflow_input_with_artifact( + target_workflow="SimpleTestWorkflow", + user_identity="error_test_user@example.com", + artifact_filename="workflow_input.json", + artifact_content={"input_text": "Test data that will fail"}, + scenario_id=scenario_id, + ) + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + # Get all events with a longer timeout for error propagation + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=15.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + # The workflow should complete (potentially with failure state) + # or return an error + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event received" + + # Check that we got either a failed task or an error + if isinstance(terminal_event, Task): + # Task completed - check if it's in failed state + assert terminal_event.status is not None + print(f"Scenario {scenario_id}: Task completed with state: {terminal_event.status.state}") + # The workflow should fail when a node fails + assert terminal_event.status.state == "failed", ( + f"Scenario {scenario_id}: Expected task to fail when node fails, " + f"got state: {terminal_event.status.state}" + ) + elif isinstance(terminal_event, JSONRPCError): + # Got an error response - this is also acceptable for failure scenarios + print(f"Scenario {scenario_id}: Received error: {terminal_event.error}") + + print(f"Scenario {scenario_id}: Completed - workflow properly handled node failure.") + + +async def test_workflow_completes_successfully_with_valid_input( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, +): + """ + Baseline test: verify workflow completes successfully with valid input. + This serves as a control test for the error scenarios. + """ + scenario_id = "workflow_success_baseline_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Prime LLM for successful two-node workflow + # Step 1: First agent + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-success-1a", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing step 1. +«««save_artifact: filename="step1_output.json" mime_type="application/json" description="Step 1 result" +{"processed": "Step 1 done", "data": "intermediate_data"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_1b = ChatCompletionResponse( + id="chatcmpl-success-1b", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Step 1 complete. «result:artifact=step1_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Step 2: Second agent + llm_response_2a = ChatCompletionResponse( + id="chatcmpl-success-2a", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing step 2. +«««save_artifact: filename="step2_output.json" mime_type="application/json" description="Step 2 result" +{"final_result": "Workflow completed successfully"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_2b = ChatCompletionResponse( + id="chatcmpl-success-2b", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Step 2 complete. «result:artifact=step2_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [llm_response_1, llm_response_1b, llm_response_2a, llm_response_2b], + ) + + input_data = create_workflow_input_with_artifact( + target_workflow="SimpleTestWorkflow", + user_identity="success_test_user@example.com", + artifact_filename="workflow_input.json", + artifact_content={"input_text": "Valid test data"}, + scenario_id=scenario_id, + ) + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=20.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event received" + assert isinstance(terminal_event, Task), ( + f"Scenario {scenario_id}: Expected Task, got {type(terminal_event)}" + ) + assert terminal_event.status.state == "completed", ( + f"Scenario {scenario_id}: Expected completed state, got {terminal_event.status.state}" + ) + + print(f"Scenario {scenario_id}: Workflow completed successfully as expected.") + + +async def test_workflow_handles_empty_agent_response( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, +): + """ + Test workflow behavior when an agent returns without the expected result embed. + + This tests edge case handling where the agent doesn't properly signal completion. + """ + scenario_id = "workflow_empty_response_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Prime LLM to return a response without result embed + # This simulates an agent that doesn't follow the structured invocation protocol + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-empty-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="I processed the request but forgot to save the output properly.", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=15, total_tokens=25), + ).model_dump(exclude_none=True) + + # Second attempt - agent tries again but still no result embed + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-empty-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Still processing...", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=5, total_tokens=15), + ).model_dump(exclude_none=True) + + # Eventually give up or provide proper response + llm_response_3 = ChatCompletionResponse( + id="chatcmpl-empty-3", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Let me save the output properly. +«««save_artifact: filename="step1_output.json" mime_type="application/json" description="Output" +{"processed": "Finally done"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_4 = ChatCompletionResponse( + id="chatcmpl-empty-4", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Output saved. «result:artifact=step1_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Responses for step 2 + llm_response_5 = ChatCompletionResponse( + id="chatcmpl-empty-5", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Step 2 processing. +«««save_artifact: filename="step2_output.json" mime_type="application/json" description="Final output" +{"final_result": "Completed after retry"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_6 = ChatCompletionResponse( + id="chatcmpl-empty-6", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Done. «result:artifact=step2_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [ + llm_response_1, + llm_response_2, + llm_response_3, + llm_response_4, + llm_response_5, + llm_response_6, + ], + ) + + input_data = create_workflow_input_with_artifact( + target_workflow="SimpleTestWorkflow", + user_identity="edge_case_user@example.com", + artifact_filename="workflow_input.json", + artifact_content={"input_text": "Test edge case"}, + scenario_id=scenario_id, + ) + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + # Longer timeout since agent might retry + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=30.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event received" + + # We expect either success (after retries) or failure + if isinstance(terminal_event, Task): + print( + f"Scenario {scenario_id}: Task ended with state: {terminal_event.status.state}" + ) + # Either state is acceptable - we're testing that the workflow handles this gracefully + assert terminal_event.status.state in ["completed", "failed"], ( + f"Scenario {scenario_id}: Unexpected state: {terminal_event.status.state}" + ) + + print(f"Scenario {scenario_id}: Workflow handled edge case gracefully.") + + +async def test_workflow_output_schema_validation_triggers_retry( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that when an agent returns output that doesn't match the output schema, + the workflow retries the agent with validation feedback. + + The StructuredTestWorkflow's validate_order node expects output with: + - customer_name: string (required) + - order_id: string (required) + - amount: integer (required) + - status: string (required) + + We simulate the agent first returning invalid output (missing 'status'), + then returning valid output after receiving retry feedback. + """ + scenario_id = "workflow_output_schema_retry_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save the input artifact to the artifact service + user_identity = "schema_retry_user@example.com" + session_id = f"session_{scenario_id}" + artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-123", + "amount": 100, + } + artifact_filename = "workflow_input.json" + + # Save artifact like the declarative test runner does + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + print(f"Scenario {scenario_id}: Setup artifact '{artifact_filename}' created.") + + # First response: Agent saves artifact MISSING the required 'status' field + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-schema-retry-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing the order validation. +«««save_artifact: filename="validate_output.json" mime_type="application/json" description="Validation result" +{"customer_name": "Test Customer", "order_id": "ORD-123", "amount": 100} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + # First result embed - artifact doesn't match schema (missing 'status') + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-schema-retry-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Validation complete. «result:artifact=validate_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Retry response: Agent saves corrected artifact WITH 'status' field + llm_response_3 = ChatCompletionResponse( + id="chatcmpl-schema-retry-3", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""I'll correct the output to include the status field. +«««save_artifact: filename="validate_output_corrected.json" mime_type="application/json" description="Corrected validation result" +{"customer_name": "Test Customer", "order_id": "ORD-123", "amount": 100, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=50, completion_tokens=30, total_tokens=80), + ).model_dump(exclude_none=True) + + # Retry result embed - now with valid artifact + llm_response_4 = ChatCompletionResponse( + id="chatcmpl-schema-retry-4", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Corrected output saved. «result:artifact=validate_output_corrected.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Second node (process_order): First response + llm_response_5 = ChatCompletionResponse( + id="chatcmpl-schema-retry-5", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing the order. +«««save_artifact: filename="process_output.json" mime_type="application/json" description="Process result" +{"customer_name": "Test Customer", "order_id": "ORD-123", "amount": 100, "status": "processed", "processed": true} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=25, total_tokens=35), + ).model_dump(exclude_none=True) + + # Second node result embed + llm_response_6 = ChatCompletionResponse( + id="chatcmpl-schema-retry-6", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Order processed. «result:artifact=process_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [ + llm_response_1, + llm_response_2, + llm_response_3, + llm_response_4, + llm_response_5, + llm_response_6, + ], + ) + + # Submit valid input to the structured workflow (using invoked_with_artifacts pattern) + # Note: Use "external_context" (not "external_context_override") - this is what the test gateway reads + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Process the order data"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + # Allow extra time for retry loop + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=45.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event received" + + # The workflow should complete successfully after the retry + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "completed", ( + f"Scenario {scenario_id}: Expected workflow to complete after retry, " + f"got state: {terminal_event.status.state}" + ) + elif isinstance(terminal_event, JSONRPCError): + pytest.fail( + f"Scenario {scenario_id}: Workflow failed with error: {terminal_event.error}" + ) + + # Verify the LLM was called at least 4 times (2 initial + retry + continue) + # This confirms the retry actually happened + captured_requests = test_llm_server.get_captured_requests() + call_count = len(captured_requests) + print(f"Scenario {scenario_id}: LLM was called {call_count} times") + assert call_count >= 4, ( + f"Scenario {scenario_id}: Expected at least 4 LLM calls (indicating retry), " + f"but only got {call_count}" + ) + + print(f"Scenario {scenario_id}: Workflow successfully retried after output schema validation failure.") + + +async def test_workflow_output_schema_multiple_retries( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that output schema validation can retry multiple times before succeeding. + + This tests the retry loop more thoroughly by having the agent: + 1. First attempt: Missing required field 'status' + 2. Second attempt (retry 1): Wrong type for 'amount' field + 3. Third attempt (retry 2): Valid output + + The workflow should complete successfully after multiple retries. + """ + scenario_id = "workflow_multiple_retries_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save the input artifact + user_identity = "multi_retry_user@example.com" + session_id = f"session_{scenario_id}" + artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-456", + "amount": 200, + } + artifact_filename = "workflow_input.json" + + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + + # First attempt: Save artifact MISSING 'status' field + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-multi-retry-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""I'll validate this order. +«««save_artifact: filename="validate_attempt1.json" mime_type="application/json" description="First attempt" +{"customer_name": "Test Customer", "order_id": "ORD-456", "amount": 200} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-multi-retry-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Done. «result:artifact=validate_attempt1.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Second attempt (retry 1): Has status but wrong type for amount (string instead of int) + llm_response_3 = ChatCompletionResponse( + id="chatcmpl-multi-retry-3", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Let me fix that - I was missing the status field. +«««save_artifact: filename="validate_attempt2.json" mime_type="application/json" description="Second attempt" +{"customer_name": "Test Customer", "order_id": "ORD-456", "amount": "two hundred", "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=50, completion_tokens=30, total_tokens=80), + ).model_dump(exclude_none=True) + + llm_response_4 = ChatCompletionResponse( + id="chatcmpl-multi-retry-4", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Fixed. «result:artifact=validate_attempt2.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Third attempt (retry 2): All fields valid + llm_response_5 = ChatCompletionResponse( + id="chatcmpl-multi-retry-5", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""I apologize - amount should be a number. Here's the corrected version. +«««save_artifact: filename="validate_attempt3.json" mime_type="application/json" description="Third attempt - correct" +{"customer_name": "Test Customer", "order_id": "ORD-456", "amount": 200, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=80, completion_tokens=35, total_tokens=115), + ).model_dump(exclude_none=True) + + llm_response_6 = ChatCompletionResponse( + id="chatcmpl-multi-retry-6", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Validation complete. «result:artifact=validate_attempt3.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Second node (process_order) - should run after successful validation + llm_response_7 = ChatCompletionResponse( + id="chatcmpl-multi-retry-7", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing the validated order. +«««save_artifact: filename="process_output.json" mime_type="application/json" description="Process result" +{"customer_name": "Test Customer", "order_id": "ORD-456", "amount": 200, "status": "processed", "processed": true} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=25, total_tokens=35), + ).model_dump(exclude_none=True) + + llm_response_8 = ChatCompletionResponse( + id="chatcmpl-multi-retry-8", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Order processed. «result:artifact=process_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [ + llm_response_1, llm_response_2, # First attempt (fails validation) + llm_response_3, llm_response_4, # Retry 1 (fails validation again) + llm_response_5, llm_response_6, # Retry 2 (succeeds) + llm_response_7, llm_response_8, # Second node + ], + ) + + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Validate and process the order"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + # Allow extra time for multiple retries + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=60.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event" + + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "completed", ( + f"Scenario {scenario_id}: Expected completed after retries, " + f"got: {terminal_event.status.state}" + ) + elif isinstance(terminal_event, JSONRPCError): + pytest.fail(f"Scenario {scenario_id}: Workflow failed: {terminal_event.error}") + + # Verify multiple LLM calls happened (indicating retries) + captured_requests = test_llm_server.get_captured_requests() + call_count = len(captured_requests) + print(f"Scenario {scenario_id}: LLM was called {call_count} times") + + # Should have at least 6 calls: 2 per attempt × 3 attempts for first node + assert call_count >= 6, ( + f"Scenario {scenario_id}: Expected at least 6 LLM calls for multiple retries, " + f"but only got {call_count}" + ) + + print(f"Scenario {scenario_id}: Workflow succeeded after multiple output validation retries.") + + +async def test_workflow_missing_result_embed_triggers_retry( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that a missing result embed triggers a retry. + + When an agent in a structured workflow saves an artifact but fails to output + the mandatory result embed «result:artifact=... status=success», the system + should provide feedback and retry. + + This tests the retry path in handler.py lines 687-706. + """ + scenario_id = "workflow_missing_result_embed_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save the input artifact + user_identity = "missing_embed_user@example.com" + session_id = f"session_{scenario_id}" + artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-789", + "amount": 150, + } + artifact_filename = "workflow_input.json" + + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + + # First attempt: Agent saves artifact but DOES NOT include result embed + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-missing-embed-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""I'll validate this order. +«««save_artifact: filename="validate_output.json" mime_type="application/json" description="Validation result" +{"customer_name": "Test Customer", "order_id": "ORD-789", "amount": 150, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + # System notification after save - agent still doesn't provide result embed + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-missing-embed-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="I have saved the validation result.", # Missing result embed! + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Retry attempt: Agent now provides the result embed correctly + llm_response_3 = ChatCompletionResponse( + id="chatcmpl-missing-embed-3", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""I apologize for the missing embed. Here is the corrected response. +«««save_artifact: filename="validate_output_fixed.json" mime_type="application/json" description="Fixed validation" +{"customer_name": "Test Customer", "order_id": "ORD-789", "amount": 150, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=50, completion_tokens=25, total_tokens=75), + ).model_dump(exclude_none=True) + + llm_response_4 = ChatCompletionResponse( + id="chatcmpl-missing-embed-4", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Validation complete. «result:artifact=validate_output_fixed.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Second node (process_order) + llm_response_5 = ChatCompletionResponse( + id="chatcmpl-missing-embed-5", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing the order. +«««save_artifact: filename="process_output.json" mime_type="application/json" description="Process result" +{"customer_name": "Test Customer", "order_id": "ORD-789", "amount": 150, "status": "processed", "processed": true} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=25, total_tokens=35), + ).model_dump(exclude_none=True) + + llm_response_6 = ChatCompletionResponse( + id="chatcmpl-missing-embed-6", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Order processed. «result:artifact=process_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [ + llm_response_1, llm_response_2, # First attempt (missing embed) + llm_response_3, llm_response_4, # Retry (correct) + llm_response_5, llm_response_6, # Second node + ], + ) + + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Validate and process the order"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=30.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event" + + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "completed", ( + f"Scenario {scenario_id}: Expected completed after retry, " + f"got: {terminal_event.status.state}" + ) + elif isinstance(terminal_event, JSONRPCError): + pytest.fail(f"Scenario {scenario_id}: Workflow failed: {terminal_event.error}") + + # Verify retry happened - should have more than 4 LLM calls + captured_requests = test_llm_server.get_captured_requests() + call_count = len(captured_requests) + print(f"Scenario {scenario_id}: LLM was called {call_count} times") + + # Should have at least 4 calls: 2 for first attempt, 2 for retry + assert call_count >= 4, ( + f"Scenario {scenario_id}: Expected at least 4 LLM calls for retry, " + f"but only got {call_count}" + ) + + print(f"Scenario {scenario_id}: Workflow successfully retried after missing result embed.") + + +async def test_workflow_missing_result_embed_max_retries_exceeded( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that workflow fails when agent repeatedly misses result embed and exhausts retries. + + When an agent fails to include the mandatory result embed after all retry attempts + (default is 2 retries = 3 total attempts), the workflow should fail. + + This tests the max retries exceeded path in handler.py line 708. + """ + scenario_id = "workflow_missing_embed_max_retries_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save the input artifact + user_identity = "max_retries_user@example.com" + session_id = f"session_{scenario_id}" + artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-MAX", + "amount": 100, + } + artifact_filename = "workflow_input.json" + + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + + # All attempts will save artifacts but NEVER include the result embed + # Attempt 1: Initial try + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-max-retry-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing the order. +«««save_artifact: filename="attempt1.json" mime_type="application/json" description="First attempt" +{"customer_name": "Test Customer", "order_id": "ORD-MAX", "amount": 100, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-max-retry-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Done with first attempt.", # Missing result embed + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Attempt 2: First retry - still no result embed + llm_response_3 = ChatCompletionResponse( + id="chatcmpl-max-retry-3", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Let me try again. +«««save_artifact: filename="attempt2.json" mime_type="application/json" description="Second attempt" +{"customer_name": "Test Customer", "order_id": "ORD-MAX", "amount": 100, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=50, completion_tokens=20, total_tokens=70), + ).model_dump(exclude_none=True) + + llm_response_4 = ChatCompletionResponse( + id="chatcmpl-max-retry-4", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Second attempt complete.", # Still missing result embed + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Attempt 3: Second retry - still no result embed (this exhausts retries) + llm_response_5 = ChatCompletionResponse( + id="chatcmpl-max-retry-5", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""One more try. +«««save_artifact: filename="attempt3.json" mime_type="application/json" description="Third attempt" +{"customer_name": "Test Customer", "order_id": "ORD-MAX", "amount": 100, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=80, completion_tokens=20, total_tokens=100), + ).model_dump(exclude_none=True) + + llm_response_6 = ChatCompletionResponse( + id="chatcmpl-max-retry-6", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Third attempt done.", # Still missing - will fail now + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [ + llm_response_1, llm_response_2, # Initial attempt + llm_response_3, llm_response_4, # Retry 1 + llm_response_5, llm_response_6, # Retry 2 (final) + ], + ) + + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Validate the order"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + # Allow time for multiple retry attempts + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=60.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event" + + # The workflow should fail after exhausting retries + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "failed", ( + f"Scenario {scenario_id}: Expected failed state after max retries, " + f"got: {terminal_event.status.state}" + ) + # Check for appropriate error message + if terminal_event.status.message: + error_text = get_message_text(terminal_event.status.message, delimiter="") + print(f"Scenario {scenario_id}: Error message: {error_text}") + assert "result embed" in error_text.lower(), ( + f"Scenario {scenario_id}: Expected error about result embed, " + f"got: {error_text}" + ) + elif isinstance(terminal_event, JSONRPCError): + # This is also acceptable - workflow failed + print(f"Scenario {scenario_id}: Workflow failed with error: {terminal_event.error}") + + # Verify all retry attempts were made + captured_requests = test_llm_server.get_captured_requests() + call_count = len(captured_requests) + print(f"Scenario {scenario_id}: LLM was called {call_count} times") + + # Should have at least 6 calls: 2 per attempt × 3 attempts + assert call_count >= 6, ( + f"Scenario {scenario_id}: Expected at least 6 LLM calls for 3 attempts, " + f"but only got {call_count}" + ) + + print(f"Scenario {scenario_id}: Workflow correctly failed after exhausting retries for missing result embed.") + + +async def test_workflow_output_schema_max_retries_exceeded( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that workflow fails when output schema validation repeatedly fails after all retries. + + Unlike the missing result embed test, the agent DOES include the result embed, + but the artifact content fails schema validation every time. + + This tests the max retries exceeded path in handler.py line 813. + """ + scenario_id = "workflow_output_schema_max_retries_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save the input artifact + user_identity = "schema_max_retries_user@example.com" + session_id = f"session_{scenario_id}" + artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-SCHEMA", + "amount": 100, + } + artifact_filename = "workflow_input.json" + + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + + # All attempts will include result embed but fail schema validation + # The output_schema_override requires: customer_name, order_id, amount, status + # We'll always omit 'status' to cause validation failure + + # Attempt 1 + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-schema-max-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Validating the order. +«««save_artifact: filename="output1.json" mime_type="application/json" description="Attempt 1" +{"customer_name": "Test Customer", "order_id": "ORD-SCHEMA", "amount": 100} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-schema-max-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Done. «result:artifact=output1.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Attempt 2 (retry 1) - still missing 'status' + llm_response_3 = ChatCompletionResponse( + id="chatcmpl-schema-max-3", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Let me try again. +«««save_artifact: filename="output2.json" mime_type="application/json" description="Attempt 2" +{"customer_name": "Test Customer", "order_id": "ORD-SCHEMA", "amount": 100} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=50, completion_tokens=20, total_tokens=70), + ).model_dump(exclude_none=True) + + llm_response_4 = ChatCompletionResponse( + id="chatcmpl-schema-max-4", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Fixed. «result:artifact=output2.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Attempt 3 (retry 2) - still missing 'status' - exhausts retries + llm_response_5 = ChatCompletionResponse( + id="chatcmpl-schema-max-5", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""One more try. +«««save_artifact: filename="output3.json" mime_type="application/json" description="Attempt 3" +{"customer_name": "Test Customer", "order_id": "ORD-SCHEMA", "amount": 100} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=80, completion_tokens=20, total_tokens=100), + ).model_dump(exclude_none=True) + + llm_response_6 = ChatCompletionResponse( + id="chatcmpl-schema-max-6", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Done. «result:artifact=output3.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [ + llm_response_1, llm_response_2, # Initial attempt + llm_response_3, llm_response_4, # Retry 1 + llm_response_5, llm_response_6, # Retry 2 (final) + ], + ) + + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Validate the order"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=60.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event" + + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "failed", ( + f"Scenario {scenario_id}: Expected failed state after max retries, " + f"got: {terminal_event.status.state}" + ) + if terminal_event.status.message: + error_text = get_message_text(terminal_event.status.message, delimiter="") + print(f"Scenario {scenario_id}: Error message: {error_text}") + # Should mention validation failure + assert "validation" in error_text.lower() or "failed" in error_text.lower(), ( + f"Scenario {scenario_id}: Expected error about validation, got: {error_text}" + ) + elif isinstance(terminal_event, JSONRPCError): + print(f"Scenario {scenario_id}: Workflow failed with error: {terminal_event.error}") + + print(f"Scenario {scenario_id}: Workflow correctly failed after exhausting retries for output schema validation.") + + +async def test_workflow_artifact_not_found( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that workflow fails when agent references an artifact that doesn't exist. + + The agent includes a result embed referencing an artifact name that was never saved. + + This tests the artifact not found path in handler.py lines 748-756. + """ + scenario_id = "workflow_artifact_not_found_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save the input artifact + user_identity = "artifact_not_found_user@example.com" + session_id = f"session_{scenario_id}" + artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-NOTFOUND", + "amount": 100, + } + artifact_filename = "workflow_input.json" + + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + + # Agent responds with result embed referencing a NON-EXISTENT artifact + # Note: Agent does NOT save any artifact, just references one that doesn't exist + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-notfound-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="I have completed the validation.", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Agent claims success with non-existent artifact (no version specified triggers lookup) + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-notfound-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Done. «result:artifact=nonexistent_artifact.json status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [llm_response_1, llm_response_2], + ) + + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Validate the order"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=30.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event" + + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "failed", ( + f"Scenario {scenario_id}: Expected failed state for missing artifact, " + f"got: {terminal_event.status.state}" + ) + if terminal_event.status.message: + error_text = get_message_text(terminal_event.status.message, delimiter="") + print(f"Scenario {scenario_id}: Error message: {error_text}") + # Should mention artifact not found + assert "not found" in error_text.lower() or "artifact" in error_text.lower(), ( + f"Scenario {scenario_id}: Expected error about artifact not found, got: {error_text}" + ) + elif isinstance(terminal_event, JSONRPCError): + print(f"Scenario {scenario_id}: Workflow failed with error: {terminal_event.error}") + + print(f"Scenario {scenario_id}: Workflow correctly failed when artifact not found.") + + +async def test_workflow_input_validation_failure( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that workflow fails when input doesn't match the input schema. + + The workflow has an input_schema requiring customer_name, order_id, and amount. + We provide input that's missing required fields. + + This tests the input validation path in handler.py lines 203-211. + """ + scenario_id = "workflow_input_validation_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save an input artifact that's MISSING required fields + user_identity = "input_validation_user@example.com" + session_id = f"session_{scenario_id}" + + # Missing 'amount' which is required by the input schema + invalid_artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-INVALID", + # "amount" is missing - required field + } + artifact_filename = "workflow_input.json" + + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(invalid_artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + + # We don't expect any LLM calls since input validation should fail immediately + # But prime with empty responses just in case + prime_llm_server(test_llm_server, []) + + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Validate the order"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=30.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event" + + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + assert terminal_event.status.state == "failed", ( + f"Scenario {scenario_id}: Expected failed state for input validation failure, " + f"got: {terminal_event.status.state}" + ) + if terminal_event.status.message: + error_text = get_message_text(terminal_event.status.message, delimiter="") + print(f"Scenario {scenario_id}: Error message: {error_text}") + elif isinstance(terminal_event, JSONRPCError): + print(f"Scenario {scenario_id}: Workflow failed with error: {terminal_event.error}") + + # Verify no LLM calls were made (input validation should fail before agent execution) + captured_requests = test_llm_server.get_captured_requests() + call_count = len(captured_requests) + print(f"Scenario {scenario_id}: LLM was called {call_count} times") + + # Input validation happens at workflow level, so agent might still be called + # But the workflow should ultimately fail + + print(f"Scenario {scenario_id}: Workflow correctly failed due to input validation failure.") + + +async def test_workflow_cancellation( + test_llm_server: TestLLMServer, + test_gateway_app_instance: TestGatewayComponent, + test_artifact_service_instance: TestInMemoryArtifactService, +): + """ + Test that a workflow can be cancelled while running. + + This tests the cancellation logic in: + - event_handlers.py handle_cancel_request() + - component.py finalize_workflow_cancelled() + - dag_executor.py cancellation checks + """ + import asyncio + scenario_id = "workflow_cancellation_001" + print(f"\nRunning programmatic scenario: {scenario_id}") + + # Setup: Pre-save the input artifact + user_identity = "cancellation_test_user@example.com" + session_id = f"session_{scenario_id}" + artifact_content = { + "customer_name": "Test Customer", + "order_id": "ORD-CANCEL", + "amount": 100, + } + artifact_filename = "workflow_input.json" + + artifact_part = adk_types.Part( + inline_data=adk_types.Blob( + mime_type="application/json", + data=json.dumps(artifact_content).encode("utf-8"), + ) + ) + await test_artifact_service_instance.save_artifact( + app_name="test_namespace", + user_id=user_identity, + session_id=session_id, + filename=artifact_filename, + artifact=artifact_part, + ) + + # Prime LLM with responses for first node - make it slow by including save_artifact + # The first node will save an artifact, giving us time to send cancellation + llm_response_1 = ChatCompletionResponse( + id="chatcmpl-cancel-1", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Starting validation. +«««save_artifact: filename="validate_output.json" mime_type="application/json" description="Validation result" +{"customer_name": "Test Customer", "order_id": "ORD-CANCEL", "amount": 100, "status": "validated"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_2 = ChatCompletionResponse( + id="chatcmpl-cancel-2", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Validation complete. «result:artifact=validate_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + # Second node responses (in case cancellation doesn't work) + llm_response_3 = ChatCompletionResponse( + id="chatcmpl-cancel-3", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="""Processing order. +«««save_artifact: filename="process_output.json" mime_type="application/json" description="Process result" +{"status": "processed"} +»»»""", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30), + ).model_dump(exclude_none=True) + + llm_response_4 = ChatCompletionResponse( + id="chatcmpl-cancel-4", + model="test-model", + choices=[ + Choice( + message=Message( + role="assistant", + content="Process complete. «result:artifact=process_output.json:0 status=success»", + ), + finish_reason="stop", + ) + ], + usage=Usage(prompt_tokens=10, completion_tokens=10, total_tokens=20), + ).model_dump(exclude_none=True) + + prime_llm_server( + test_llm_server, + [llm_response_1, llm_response_2, llm_response_3, llm_response_4], + ) + + input_data = { + "target_agent_name": "StructuredTestWorkflow", + "user_identity": user_identity, + "a2a_parts": [{"type": "text", "text": "Process the order"}], + "external_context": { + "test_case": scenario_id, + "a2a_session_id": session_id, + }, + "invoked_with_artifacts": [ + {"filename": artifact_filename, "version": 0} + ], + } + + # Submit the workflow task + task_id = await submit_test_input( + test_gateway_app_instance, input_data, scenario_id + ) + print(f"Scenario {scenario_id}: Submitted workflow task: {task_id}") + + # Wait briefly for the workflow to start, then cancel it + await asyncio.sleep(0.5) + + # Send cancellation request + print(f"Scenario {scenario_id}: Sending cancellation request") + await test_gateway_app_instance.cancel_task( + agent_name="StructuredTestWorkflow", + task_id=task_id, + ) + + # Wait for the workflow to complete (should be cancelled) + all_events = await get_all_task_events( + test_gateway_app_instance, task_id, overall_timeout=30.0 + ) + + terminal_event, _, _ = extract_outputs_from_event_list(all_events, scenario_id) + + assert terminal_event is not None, f"Scenario {scenario_id}: No terminal event" + + if isinstance(terminal_event, Task): + print(f"Scenario {scenario_id}: Task state: {terminal_event.status.state}") + # The workflow could be cancelled or completed depending on timing + # Both are acceptable outcomes for this test + if terminal_event.status.state == "canceled": + print(f"Scenario {scenario_id}: Workflow was successfully cancelled") + elif terminal_event.status.state == "completed": + print(f"Scenario {scenario_id}: Workflow completed before cancellation took effect (timing issue, acceptable)") + else: + # Failed state should not happen + assert terminal_event.status.state in ("canceled", "completed"), ( + f"Scenario {scenario_id}: Unexpected state: {terminal_event.status.state}" + ) + elif isinstance(terminal_event, JSONRPCError): + pytest.fail(f"Scenario {scenario_id}: Unexpected error: {terminal_event.error}") + + print(f"Scenario {scenario_id}: Workflow cancellation test completed.") diff --git a/tests/unit/workflow/__init__.py b/tests/unit/workflow/__init__.py new file mode 100644 index 000000000..512ba5629 --- /dev/null +++ b/tests/unit/workflow/__init__.py @@ -0,0 +1 @@ +# Unit tests for workflow components diff --git a/tests/unit/workflow/test_agent_caller.py b/tests/unit/workflow/test_agent_caller.py new file mode 100644 index 000000000..8a50e9c5f --- /dev/null +++ b/tests/unit/workflow/test_agent_caller.py @@ -0,0 +1,308 @@ +""" +Unit tests for workflow agent caller input resolution. + +Tests the _resolve_node_input method which handles: +- Explicit input mapping with template resolution +- Implicit input inference for nodes without explicit input +""" + +import pytest +from datetime import datetime +from unittest.mock import Mock, MagicMock +from solace_agent_mesh.workflow.agent_caller import AgentCaller +from solace_agent_mesh.workflow.app import AgentNode, ConditionalNode +from solace_agent_mesh.workflow.workflow_execution_context import ( + WorkflowExecutionState, +) + + +def create_mock_host(): + """Create a mock host component with required attributes.""" + host = Mock() + host.log_identifier = "[Test]" + host.dag_executor = Mock() + host.dag_executor.nodes = {} + host.dag_executor.resolve_value = Mock(side_effect=lambda v, s: v) + return host + + +def create_workflow_state( + node_outputs: dict = None, + completed_nodes: dict = None, +) -> WorkflowExecutionState: + """Create a workflow state with given outputs.""" + state = WorkflowExecutionState( + execution_id="test-exec-001", + workflow_name="TestWorkflow", + start_time=datetime.now(), + ) + if node_outputs: + state.node_outputs = node_outputs + if completed_nodes: + state.completed_nodes = completed_nodes + return state + + +class TestResolveNodeInputExplicit: + """Tests for explicit input mapping resolution.""" + + @pytest.mark.asyncio + async def test_explicit_input_resolved_via_dag_executor(self): + """When node has explicit input, each value is resolved via DAGExecutor.""" + host = create_mock_host() + # Mock resolve_value to transform templates + host.dag_executor.resolve_value = Mock( + side_effect=lambda v, s: f"resolved_{v}" if isinstance(v, str) else v + ) + + caller = AgentCaller(host) + + node = AgentNode( + id="test_node", + type="agent", + agent_name="TestAgent", + input={"field1": "{{workflow.input.x}}", "field2": "literal"}, + ) + + state = create_workflow_state( + node_outputs={"workflow_input": {"output": {"x": 42}}} + ) + + result = await caller._resolve_node_input(node, state) + + # Each value should be passed through resolve_value + assert host.dag_executor.resolve_value.call_count == 2 + assert result == { + "field1": "resolved_{{workflow.input.x}}", + "field2": "resolved_literal", + } + + @pytest.mark.asyncio + async def test_explicit_input_empty_dict_returns_empty(self): + """When node has empty input dict, return empty dict.""" + host = create_mock_host() + caller = AgentCaller(host) + + node = AgentNode( + id="test_node", + type="agent", + agent_name="TestAgent", + input={}, + ) + + state = create_workflow_state() + result = await caller._resolve_node_input(node, state) + + assert result == {} + + +class TestResolveNodeInputImplicitInitialNode: + """Tests for implicit input inference - initial nodes (no dependencies).""" + + @pytest.mark.asyncio + async def test_initial_node_uses_workflow_input(self): + """Node with no dependencies uses workflow input.""" + host = create_mock_host() + caller = AgentCaller(host) + + node = AgentNode( + id="first_node", + type="agent", + agent_name="TestAgent", + # No input mapping, no depends_on + ) + + workflow_input_data = {"customer": "Alice", "amount": 100} + state = create_workflow_state( + node_outputs={"workflow_input": {"output": workflow_input_data}} + ) + + result = await caller._resolve_node_input(node, state) + + assert result == workflow_input_data + + @pytest.mark.asyncio + async def test_initial_node_raises_when_workflow_input_missing(self): + """Node with no dependencies raises if workflow_input not initialized.""" + host = create_mock_host() + caller = AgentCaller(host) + + node = AgentNode( + id="first_node", + type="agent", + agent_name="TestAgent", + ) + + # No workflow_input in state + state = create_workflow_state(node_outputs={}) + + with pytest.raises(ValueError) as exc_info: + await caller._resolve_node_input(node, state) + + assert "Workflow input has not been initialized" in str(exc_info.value) + + +class TestResolveNodeInputImplicitSingleDependency: + """Tests for implicit input inference - single dependency.""" + + @pytest.mark.asyncio + async def test_single_dependency_uses_dependency_output(self): + """Node with one non-conditional dependency uses that dependency's output.""" + host = create_mock_host() + # Register the dependency as a regular agent node + host.dag_executor.nodes = { + "step_1": AgentNode(id="step_1", type="agent", agent_name="Agent1") + } + caller = AgentCaller(host) + + node = AgentNode( + id="step_2", + type="agent", + agent_name="TestAgent", + depends_on=["step_1"], + # No explicit input + ) + + step1_output = {"processed": True, "data": "result"} + state = create_workflow_state( + node_outputs={ + "workflow_input": {"output": {"original": "input"}}, + "step_1": {"output": step1_output}, + } + ) + + result = await caller._resolve_node_input(node, state) + + assert result == step1_output + + @pytest.mark.asyncio + async def test_single_dependency_raises_when_dependency_not_complete(self): + """Node raises if its single dependency hasn't completed.""" + host = create_mock_host() + host.dag_executor.nodes = { + "step_1": AgentNode(id="step_1", type="agent", agent_name="Agent1") + } + caller = AgentCaller(host) + + node = AgentNode( + id="step_2", + type="agent", + agent_name="TestAgent", + depends_on=["step_1"], + ) + + # step_1 not in node_outputs (hasn't completed) + state = create_workflow_state( + node_outputs={"workflow_input": {"output": {}}} + ) + + with pytest.raises(ValueError) as exc_info: + await caller._resolve_node_input(node, state) + + assert "Dependency 'step_1' has not completed" in str(exc_info.value) + + @pytest.mark.asyncio + async def test_conditional_dependency_uses_workflow_input(self): + """Node depending on conditional uses workflow input, not conditional output.""" + host = create_mock_host() + # Register the dependency as a conditional node + host.dag_executor.nodes = { + "branch": ConditionalNode( + id="branch", + type="conditional", + condition="{{step_1.output.status}} == 'success'", + true_branch="success_path", + ) + } + caller = AgentCaller(host) + + node = AgentNode( + id="success_path", + type="agent", + agent_name="TestAgent", + depends_on=["branch"], + # No explicit input + ) + + workflow_input_data = {"original": "workflow input"} + state = create_workflow_state( + node_outputs={ + "workflow_input": {"output": workflow_input_data}, + "branch": {"output": {"selected_branch": "success_path"}}, + } + ) + + result = await caller._resolve_node_input(node, state) + + # Should use workflow input, not branch output + assert result == workflow_input_data + + @pytest.mark.asyncio + async def test_conditional_dependency_raises_when_workflow_input_missing(self): + """Node depending on conditional raises if workflow_input missing.""" + host = create_mock_host() + host.dag_executor.nodes = { + "branch": ConditionalNode( + id="branch", + type="conditional", + condition="true", + true_branch="next", + ) + } + caller = AgentCaller(host) + + node = AgentNode( + id="next", + type="agent", + agent_name="TestAgent", + depends_on=["branch"], + ) + + # No workflow_input + state = create_workflow_state( + node_outputs={"branch": {"output": {}}} + ) + + with pytest.raises(ValueError) as exc_info: + await caller._resolve_node_input(node, state) + + assert "Workflow input has not been initialized" in str(exc_info.value) + + +class TestResolveNodeInputImplicitMultipleDependencies: + """Tests for implicit input inference - multiple dependencies (error case).""" + + @pytest.mark.asyncio + async def test_multiple_dependencies_without_explicit_input_raises(self): + """Node with multiple dependencies but no explicit input raises error.""" + host = create_mock_host() + host.dag_executor.nodes = { + "step_1": AgentNode(id="step_1", type="agent", agent_name="Agent1"), + "step_2": AgentNode(id="step_2", type="agent", agent_name="Agent2"), + } + caller = AgentCaller(host) + + node = AgentNode( + id="merge_node", + type="agent", + agent_name="MergeAgent", + depends_on=["step_1", "step_2"], + # No explicit input - ambiguous which dependency to use + ) + + state = create_workflow_state( + node_outputs={ + "workflow_input": {"output": {}}, + "step_1": {"output": {"data": "from_1"}}, + "step_2": {"output": {"data": "from_2"}}, + } + ) + + with pytest.raises(ValueError) as exc_info: + await caller._resolve_node_input(node, state) + + error_msg = str(exc_info.value) + assert "multiple dependencies" in error_msg.lower() + assert "step_1" in error_msg + assert "step_2" in error_msg + assert "explicit 'input' mapping" in error_msg diff --git a/tests/unit/workflow/test_conditional_evaluation.py b/tests/unit/workflow/test_conditional_evaluation.py new file mode 100644 index 000000000..31a6d3f0b --- /dev/null +++ b/tests/unit/workflow/test_conditional_evaluation.py @@ -0,0 +1,305 @@ +""" +Unit tests for conditional expression evaluation in workflows. + +Tests the evaluate_condition() function which is a pure function that +evaluates condition expressions against workflow state. +""" + +import pytest +from datetime import datetime, timezone + +from solace_agent_mesh.workflow.flow_control.conditional import ( + evaluate_condition, + ConditionalEvaluationError, + _apply_template_aliases, +) +from solace_agent_mesh.workflow.workflow_execution_context import WorkflowExecutionState + + +def create_workflow_state(node_outputs: dict) -> WorkflowExecutionState: + """Create a WorkflowExecutionState with given node outputs.""" + return WorkflowExecutionState( + workflow_name="test_workflow", + execution_id="test_exec_001", + start_time=datetime.now(timezone.utc), + node_outputs=node_outputs, + ) + + +class TestStringComparison: + """Tests for string comparison conditions.""" + + def test_string_equality_true(self): + """String equality returns True when strings match.""" + state = create_workflow_state({ + "step1": {"output": {"status": "success"}} + }) + + result = evaluate_condition("'{{step1.output.status}}' == 'success'", state) + assert result is True + + def test_string_equality_false(self): + """String equality returns False when strings don't match.""" + state = create_workflow_state({ + "step1": {"output": {"status": "failure"}} + }) + + result = evaluate_condition("'{{step1.output.status}}' == 'success'", state) + assert result is False + + def test_string_inequality(self): + """String inequality comparison works.""" + state = create_workflow_state({ + "step1": {"output": {"status": "pending"}} + }) + + result = evaluate_condition("'{{step1.output.status}}' != 'success'", state) + assert result is True + + +class TestNumericComparison: + """Tests for numeric comparison conditions.""" + + def test_greater_than_true(self): + """Greater than comparison returns True when condition is met.""" + state = create_workflow_state({ + "step1": {"output": {"count": 15}} + }) + + result = evaluate_condition("{{step1.output.count}} > 10", state) + assert result is True + + def test_greater_than_false(self): + """Greater than comparison returns False when condition is not met.""" + state = create_workflow_state({ + "step1": {"output": {"count": 5}} + }) + + result = evaluate_condition("{{step1.output.count}} > 10", state) + assert result is False + + def test_less_than(self): + """Less than comparison works.""" + state = create_workflow_state({ + "step1": {"output": {"value": 3}} + }) + + result = evaluate_condition("{{step1.output.value}} < 5", state) + assert result is True + + def test_greater_than_or_equal(self): + """Greater than or equal comparison works.""" + state = create_workflow_state({ + "step1": {"output": {"count": 10}} + }) + + result = evaluate_condition("{{step1.output.count}} >= 10", state) + assert result is True + + def test_less_than_or_equal(self): + """Less than or equal comparison works.""" + state = create_workflow_state({ + "step1": {"output": {"count": 10}} + }) + + result = evaluate_condition("{{step1.output.count}} <= 10", state) + assert result is True + + +class TestStringContains: + """Tests for string contains/in conditions.""" + + def test_string_contains_true(self): + """'in' operator returns True when substring is found.""" + state = create_workflow_state({ + "step1": {"output": {"message": "Error: something went wrong"}} + }) + + result = evaluate_condition("'Error' in '{{step1.output.message}}'", state) + assert result is True + + def test_string_contains_false(self): + """'in' operator returns False when substring is not found.""" + state = create_workflow_state({ + "step1": {"output": {"message": "Everything is fine"}} + }) + + result = evaluate_condition("'Error' in '{{step1.output.message}}'", state) + assert result is False + + def test_string_not_in(self): + """'not in' operator works correctly.""" + state = create_workflow_state({ + "step1": {"output": {"message": "Everything is fine"}} + }) + + result = evaluate_condition("'Error' not in '{{step1.output.message}}'", state) + assert result is True + + +class TestWorkflowInputReference: + """Tests for referencing workflow input in conditions.""" + + def test_workflow_input_comparison(self): + """Workflow input can be referenced in conditions.""" + state = create_workflow_state({ + "workflow_input": {"output": {"mode": "production"}} + }) + + result = evaluate_condition("'{{workflow.input.mode}}' == 'production'", state) + assert result is True + + def test_workflow_input_nested(self): + """Nested workflow input fields can be referenced.""" + state = create_workflow_state({ + "workflow_input": {"output": {"config": {"enabled": "true"}}} + }) + + result = evaluate_condition("'{{workflow.input.config.enabled}}' == 'true'", state) + assert result is True + + +class TestArgoAliases: + """Tests for Argo-compatible template aliases.""" + + def test_item_alias_in_condition(self): + """{{item}} is aliased to {{_map_item}} in conditions. + + Note: Unlike DAGExecutor.resolve_value(), the conditional evaluator + doesn't auto-unwrap the 'output' key for _map_item, so we need to + use {{item.output}} or structure the state without the output wrapper. + """ + state = create_workflow_state({ + "_map_item": {"output": "current_value"} + }) + + # Need to use item.output since conditional evaluator doesn't unwrap + result = evaluate_condition("'{{item.output}}' == 'current_value'", state) + assert result is True + + def test_item_field_alias_in_condition(self): + """{{item.field}} is aliased to {{_map_item.field}}.""" + state = create_workflow_state({ + "_map_item": {"output": {"status": "ready"}} + }) + + # Need to include 'output' in path for conditional evaluator + result = evaluate_condition("'{{item.output.status}}' == 'ready'", state) + assert result is True + + def test_workflow_parameters_alias(self): + """{{workflow.parameters.x}} is aliased to {{workflow.input.x}}.""" + state = create_workflow_state({ + "workflow_input": {"output": {"threshold": 50}} + }) + + result = evaluate_condition("{{workflow.parameters.threshold}} > 25", state) + assert result is True + + +class TestApplyTemplateAliases: + """Tests for the _apply_template_aliases helper function.""" + + def test_item_alias(self): + """{{item}} is replaced with {{_map_item}}.""" + result = _apply_template_aliases("{{item}}") + assert result == "{{_map_item}}" + + def test_item_field_alias(self): + """{{item.field}} is replaced with {{_map_item.field}}.""" + result = _apply_template_aliases("{{item.name}}") + assert result == "{{_map_item.name}}" + + def test_workflow_parameters_alias(self): + """workflow.parameters is replaced with workflow.input.""" + result = _apply_template_aliases("{{workflow.parameters.x}}") + assert result == "{{workflow.input.x}}" + + def test_no_change_for_non_aliases(self): + """Non-alias templates are unchanged.""" + result = _apply_template_aliases("{{step1.output.value}}") + assert result == "{{step1.output.value}}" + + +class TestErrorHandling: + """Tests for error handling in condition evaluation.""" + + def test_missing_node_raises_error(self): + """Referencing a non-existent node raises ConditionalEvaluationError.""" + state = create_workflow_state({ + "existing_node": {"output": {"x": 1}} + }) + + with pytest.raises(ConditionalEvaluationError, match="has not completed"): + evaluate_condition("{{nonexistent.output.x}} > 0", state) + + def test_missing_workflow_input_raises_error(self): + """Referencing workflow input before initialization raises error.""" + state = create_workflow_state({}) # No workflow_input + + with pytest.raises(ConditionalEvaluationError, match="has not been initialized"): + evaluate_condition("'{{workflow.input.x}}' == 'y'", state) + + def test_invalid_expression_raises_error(self): + """Invalid expression syntax raises ConditionalEvaluationError.""" + state = create_workflow_state({ + "step1": {"output": {"value": 1}} + }) + + with pytest.raises(ConditionalEvaluationError): + evaluate_condition("{{step1.output.value}} >>> 5", state) + + +class TestBooleanLogic: + """Tests for boolean logic in conditions.""" + + def test_and_condition(self): + """AND logic works in conditions.""" + state = create_workflow_state({ + "step1": {"output": {"a": 5, "b": 10}} + }) + + result = evaluate_condition( + "{{step1.output.a}} > 0 and {{step1.output.b}} > 0", + state + ) + assert result is True + + def test_or_condition(self): + """OR logic works in conditions.""" + state = create_workflow_state({ + "step1": {"output": {"status": "error"}} + }) + + result = evaluate_condition( + "'{{step1.output.status}}' == 'success' or '{{step1.output.status}}' == 'error'", + state + ) + assert result is True + + def test_not_condition(self): + """NOT logic works in conditions.""" + state = create_workflow_state({ + "step1": {"output": {"enabled": False}} + }) + + # When comparing to Python's False, use lowercase + result = evaluate_condition( + "not {{step1.output.enabled}}", + state + ) + assert result is True + + +class TestNullHandling: + """Tests for handling None/null values.""" + + def test_none_value_in_path(self): + """None values in path are handled gracefully.""" + state = create_workflow_state({ + "step1": {"output": {"value": None}} + }) + + # None is converted to string "None" for comparison + result = evaluate_condition("'{{step1.output.value}}' == 'None'", state) + assert result is True diff --git a/tests/unit/workflow/test_dag_logic.py b/tests/unit/workflow/test_dag_logic.py new file mode 100644 index 000000000..02b231ad9 --- /dev/null +++ b/tests/unit/workflow/test_dag_logic.py @@ -0,0 +1,314 @@ +""" +Unit tests for DAG (Directed Acyclic Graph) logic in workflow executor. + +Tests the DAG traversal logic including initial node detection, dependency +checking, cycle detection, and skip propagation. +""" + +import pytest +from datetime import datetime, timezone +from unittest.mock import Mock + +from solace_agent_mesh.workflow.dag_executor import DAGExecutor +from solace_agent_mesh.workflow.workflow_execution_context import WorkflowExecutionState +from solace_agent_mesh.workflow.app import ( + WorkflowDefinition, + AgentNode, + ConditionalNode, + MapNode, +) + + +def create_dag_executor(nodes: list) -> DAGExecutor: + """Create a DAGExecutor with given nodes.""" + workflow_def = WorkflowDefinition( + description="Test workflow", + nodes=nodes, + output_mapping={"result": "dummy"}, + ) + mock_host = Mock() + return DAGExecutor(workflow_def, mock_host) + + +def create_workflow_state( + completed_nodes: dict = None, + pending_nodes: list = None, + skipped_nodes: dict = None, +) -> WorkflowExecutionState: + """Create a WorkflowExecutionState for testing.""" + return WorkflowExecutionState( + workflow_name="test_workflow", + execution_id="test_exec_001", + start_time=datetime.now(timezone.utc), + completed_nodes=completed_nodes or {}, + pending_nodes=pending_nodes or [], + skipped_nodes=skipped_nodes or {}, + ) + + +class TestGetInitialNodes: + """Tests for get_initial_nodes() - finding nodes with no dependencies.""" + + def test_single_node_no_deps_is_initial(self): + """A single node with no dependencies is returned as initial.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1") + ]) + + initial = executor.get_initial_nodes() + assert initial == ["step1"] + + def test_multiple_initial_nodes(self): + """Multiple nodes without dependencies are all returned.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2"), + AgentNode(id="step3", type="agent", agent_name="Agent3"), + ]) + + initial = executor.get_initial_nodes() + assert set(initial) == {"step1", "step2", "step3"} + + def test_node_with_deps_not_initial(self): + """Nodes with dependencies are not returned as initial.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + ]) + + initial = executor.get_initial_nodes() + assert initial == ["step1"] + + def test_complex_dag_initial_nodes(self): + """In a complex DAG, only true entry points are initial.""" + # DAG shape: + # step1 --> step3 + # step2 --> step3 + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2"), + AgentNode(id="step3", type="agent", agent_name="Agent3", depends_on=["step1", "step2"]), + ]) + + initial = executor.get_initial_nodes() + assert set(initial) == {"step1", "step2"} + + def test_map_inner_node_not_initial(self): + """Inner nodes of MapNode are not returned as initial even if they have no deps.""" + executor = create_dag_executor([ + AgentNode(id="prepare", type="agent", agent_name="Agent1"), + MapNode( + id="map_node", + type="map", + node="process_item", + items="{{prepare.output.items}}", + depends_on=["prepare"], + ), + AgentNode(id="process_item", type="agent", agent_name="Agent2"), + ]) + + initial = executor.get_initial_nodes() + # process_item is an inner node of map_node, so it shouldn't be initial + assert "process_item" not in initial + assert "prepare" in initial + + +class TestGetNextNodes: + """Tests for get_next_nodes() - finding nodes ready to execute.""" + + def test_node_ready_when_all_deps_complete(self): + """Node becomes ready when all its dependencies are complete.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + ]) + + state = create_workflow_state( + completed_nodes={"step1": "artifact1"} + ) + + next_nodes = executor.get_next_nodes(state) + assert next_nodes == ["step2"] + + def test_node_not_ready_when_dep_incomplete(self): + """Node is not ready if any dependency is incomplete.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2"), + AgentNode(id="step3", type="agent", agent_name="Agent3", depends_on=["step1", "step2"]), + ]) + + # Only step1 is complete + state = create_workflow_state( + completed_nodes={"step1": "artifact1"} + ) + + next_nodes = executor.get_next_nodes(state) + # step2 should be ready (no deps), step3 should not (missing step2) + assert "step2" in next_nodes + assert "step3" not in next_nodes + + def test_node_ready_when_all_multiple_deps_complete(self): + """Node with multiple dependencies is ready when all are complete.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2"), + AgentNode(id="step3", type="agent", agent_name="Agent3", depends_on=["step1", "step2"]), + ]) + + state = create_workflow_state( + completed_nodes={"step1": "artifact1", "step2": "artifact2"} + ) + + next_nodes = executor.get_next_nodes(state) + assert "step3" in next_nodes + + def test_completed_node_not_returned(self): + """Already completed nodes are not returned as next.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + ]) + + state = create_workflow_state( + completed_nodes={"step1": "artifact1", "step2": "artifact2"} + ) + + next_nodes = executor.get_next_nodes(state) + assert next_nodes == [] + + def test_pending_node_not_returned(self): + """Nodes already pending execution are not returned again.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + ]) + + state = create_workflow_state( + completed_nodes={"step1": "artifact1"}, + pending_nodes=["step2"], + ) + + next_nodes = executor.get_next_nodes(state) + assert "step2" not in next_nodes + + def test_map_inner_node_not_returned(self): + """Inner nodes of MapNode are not returned by get_next_nodes.""" + executor = create_dag_executor([ + AgentNode(id="prepare", type="agent", agent_name="Agent1"), + MapNode( + id="map_node", + type="map", + node="process_item", + items="{{prepare.output.items}}", + depends_on=["prepare"], + ), + AgentNode(id="process_item", type="agent", agent_name="Agent2"), + ]) + + state = create_workflow_state( + completed_nodes={"prepare": "artifact1"} + ) + + next_nodes = executor.get_next_nodes(state) + # map_node should be ready, but process_item should not be returned directly + assert "map_node" in next_nodes + assert "process_item" not in next_nodes + + +class TestValidateDAG: + """Tests for validate_dag() - DAG structure validation.""" + + def test_valid_linear_dag(self): + """A valid linear DAG passes validation.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + AgentNode(id="step3", type="agent", agent_name="Agent3", depends_on=["step2"]), + ]) + + errors = executor.validate_dag() + assert errors == [] + + def test_valid_diamond_dag(self): + """A valid diamond-shaped DAG passes validation.""" + # DAG shape: + # step1 + # / \ + # step2 step3 + # \ / + # step4 + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + AgentNode(id="step3", type="agent", agent_name="Agent3", depends_on=["step1"]), + AgentNode(id="step4", type="agent", agent_name="Agent4", depends_on=["step2", "step3"]), + ]) + + errors = executor.validate_dag() + assert errors == [] + + def test_invalid_dependency_reference_rejected_at_model_level(self): + """Invalid dependency reference is rejected at WorkflowDefinition level. + + Note: This validation happens in Pydantic model validation, not in + DAGExecutor.validate_dag(). This is correct behavior - fail fast. + """ + from pydantic import ValidationError + + with pytest.raises(ValidationError, match="non-existent node"): + create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["nonexistent"]), + ]) + + +class TestDependencyGraph: + """Tests for dependency graph building.""" + + def test_dependency_graph_structure(self): + """Dependency graph is correctly built.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + AgentNode(id="step3", type="agent", agent_name="Agent3", depends_on=["step1", "step2"]), + ]) + + assert executor.dependencies["step1"] == [] + assert executor.dependencies["step2"] == ["step1"] + assert set(executor.dependencies["step3"]) == {"step1", "step2"} + + def test_reverse_dependency_graph(self): + """Reverse dependency graph is correctly built.""" + executor = create_dag_executor([ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + AgentNode(id="step3", type="agent", agent_name="Agent3", depends_on=["step1"]), + ]) + + # step1 has step2 and step3 depending on it + assert set(executor.reverse_dependencies["step1"]) == {"step2", "step3"} + assert executor.reverse_dependencies["step2"] == [] + assert executor.reverse_dependencies["step3"] == [] + + +class TestInnerNodeTracking: + """Tests for inner node (map/loop target) tracking.""" + + def test_map_node_target_tracked_as_inner(self): + """MapNode's target node is tracked as an inner node.""" + executor = create_dag_executor([ + AgentNode(id="prepare", type="agent", agent_name="Agent1"), + MapNode( + id="map_node", + type="map", + node="process_item", + items="{{prepare.output.items}}", + depends_on=["prepare"], + ), + AgentNode(id="process_item", type="agent", agent_name="Agent2"), + ]) + + assert "process_item" in executor.inner_nodes + assert "map_node" not in executor.inner_nodes + assert "prepare" not in executor.inner_nodes diff --git a/tests/unit/workflow/test_template_resolution.py b/tests/unit/workflow/test_template_resolution.py new file mode 100644 index 000000000..eca3189b0 --- /dev/null +++ b/tests/unit/workflow/test_template_resolution.py @@ -0,0 +1,338 @@ +""" +Unit tests for template resolution in workflow DAG executor. + +Tests the resolve_value() and _resolve_template() methods which are pure functions +that transform template strings into resolved values based on workflow state. +""" + +import pytest +from datetime import datetime, timezone +from unittest.mock import Mock + +from solace_agent_mesh.workflow.dag_executor import DAGExecutor +from solace_agent_mesh.workflow.workflow_execution_context import WorkflowExecutionState +from solace_agent_mesh.workflow.app import WorkflowDefinition, AgentNode + + +def create_minimal_dag_executor() -> DAGExecutor: + """Create a minimal DAGExecutor for testing resolve_value.""" + # Minimal workflow definition with one node + workflow_def = WorkflowDefinition( + description="Test workflow", + nodes=[ + AgentNode(id="test_node", type="agent", agent_name="TestAgent") + ], + output_mapping={"result": "{{test_node.output}}"}, + ) + # Mock the host component - not used by resolve_value + mock_host = Mock() + return DAGExecutor(workflow_def, mock_host) + + +def create_workflow_state(node_outputs: dict) -> WorkflowExecutionState: + """Create a WorkflowExecutionState with given node outputs.""" + return WorkflowExecutionState( + workflow_name="test_workflow", + execution_id="test_exec_001", + start_time=datetime.now(timezone.utc), + node_outputs=node_outputs, + ) + + +class TestResolveWorkflowInput: + """Tests for resolving {{workflow.input.*}} templates.""" + + def test_resolve_workflow_input_simple(self): + """{{workflow.input.x}} resolves to the value from workflow input.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {"x": 42}} + }) + + result = executor.resolve_value("{{workflow.input.x}}", state) + assert result == 42 + + def test_resolve_workflow_input_nested(self): + """{{workflow.input.a.b.c}} resolves nested paths.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {"a": {"b": {"c": "deep_value"}}}} + }) + + result = executor.resolve_value("{{workflow.input.a.b.c}}", state) + assert result == "deep_value" + + def test_resolve_workflow_input_missing_field_returns_none(self): + """Missing workflow input field returns None (for coalesce support).""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {"x": 1}} + }) + + result = executor.resolve_value("{{workflow.input.nonexistent}}", state) + assert result is None + + def test_resolve_workflow_input_not_initialized_raises(self): + """Referencing workflow input before initialization raises ValueError.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) # No workflow_input + + with pytest.raises(ValueError, match="Workflow input has not been initialized"): + executor.resolve_value("{{workflow.input.x}}", state) + + +class TestResolveNodeOutput: + """Tests for resolving {{node.output.*}} templates.""" + + def test_resolve_node_output_simple(self): + """{{node.output.field}} resolves to the value from node output.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "step1": {"output": {"result": "success"}} + }) + + result = executor.resolve_value("{{step1.output.result}}", state) + assert result == "success" + + def test_resolve_node_output_nested(self): + """{{node.output.a.b}} resolves nested paths in node output.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "process_node": {"output": {"data": {"items": [1, 2, 3]}}} + }) + + result = executor.resolve_value("{{process_node.output.data.items}}", state) + assert result == [1, 2, 3] + + def test_resolve_node_output_missing_node_returns_none(self): + """Referencing non-existent node returns None (for skipped nodes).""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "existing_node": {"output": {"x": 1}} + }) + + result = executor.resolve_value("{{nonexistent_node.output.x}}", state) + assert result is None + + def test_resolve_node_output_missing_field_raises(self): + """Missing field in existing node output raises ValueError.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "step1": {"output": {"x": 1}} + }) + + with pytest.raises(ValueError, match="Output field 'nonexistent' not found"): + executor.resolve_value("{{step1.output.nonexistent}}", state) + + +class TestResolveLiteralValues: + """Tests for literal value passthrough.""" + + def test_literal_string_passthrough(self): + """Non-template strings are returned unchanged.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + result = executor.resolve_value("hello world", state) + assert result == "hello world" + + def test_literal_number_passthrough(self): + """Numbers are returned unchanged.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + assert executor.resolve_value(42, state) == 42 + assert executor.resolve_value(3.14, state) == 3.14 + + def test_literal_bool_passthrough(self): + """Booleans are returned unchanged.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + assert executor.resolve_value(True, state) is True + assert executor.resolve_value(False, state) is False + + def test_literal_none_passthrough(self): + """None is returned unchanged.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + result = executor.resolve_value(None, state) + assert result is None + + def test_literal_dict_passthrough(self): + """Plain dicts (not operators) are returned unchanged.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + # Dict with multiple keys is not an operator, passed through + input_dict = {"key1": "value1", "key2": "value2"} + result = executor.resolve_value(input_dict, state) + assert result == input_dict + + +class TestCoalesceOperator: + """Tests for the coalesce operator.""" + + def test_coalesce_returns_first_non_null(self): + """Coalesce returns first non-null value.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {}} + }) + + result = executor.resolve_value( + {"coalesce": [None, "fallback", "ignored"]}, + state + ) + assert result == "fallback" + + def test_coalesce_with_template_resolution(self): + """Coalesce resolves templates before checking null.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {"optional": None, "default": "default_val"}} + }) + + result = executor.resolve_value( + {"coalesce": ["{{workflow.input.optional}}", "{{workflow.input.default}}"]}, + state + ) + assert result == "default_val" + + def test_coalesce_all_null_returns_none(self): + """Coalesce returns None if all values are null.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {}} + }) + + result = executor.resolve_value( + {"coalesce": [None, None]}, + state + ) + assert result is None + + def test_coalesce_requires_list(self): + """Coalesce operator requires a list argument.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + with pytest.raises(ValueError, match="'coalesce' operator requires a list"): + executor.resolve_value({"coalesce": "not_a_list"}, state) + + +class TestConcatOperator: + """Tests for the concat operator.""" + + def test_concat_joins_strings(self): + """Concat joins string values.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + result = executor.resolve_value( + {"concat": ["hello", " ", "world"]}, + state + ) + assert result == "hello world" + + def test_concat_with_template_resolution(self): + """Concat resolves templates before joining.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {"name": "Alice"}} + }) + + result = executor.resolve_value( + {"concat": ["Hello, ", "{{workflow.input.name}}", "!"]}, + state + ) + assert result == "Hello, Alice!" + + def test_concat_converts_numbers_to_string(self): + """Concat converts non-string values to strings.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + result = executor.resolve_value( + {"concat": ["Value: ", 42]}, + state + ) + assert result == "Value: 42" + + def test_concat_skips_none_values(self): + """Concat skips None values in the list.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + result = executor.resolve_value( + {"concat": ["a", None, "b"]}, + state + ) + assert result == "ab" + + def test_concat_requires_list(self): + """Concat operator requires a list argument.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({}) + + with pytest.raises(ValueError, match="'concat' operator requires a list"): + executor.resolve_value({"concat": "not_a_list"}, state) + + +class TestMapLoopVariables: + """Tests for map/loop special variables.""" + + def test_resolve_map_item(self): + """{{_map_item}} resolves to the current map iteration item.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "_map_item": {"output": {"id": "item_123", "value": 100}} + }) + + result = executor.resolve_value("{{_map_item.id}}", state) + assert result == "item_123" + + def test_resolve_map_index(self): + """{{_map_index}} resolves to the current map iteration index.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "_map_index": {"output": 5} + }) + + result = executor.resolve_value("{{_map_index}}", state) + assert result == 5 + + def test_argo_item_alias(self): + """{{item}} is aliased to {{_map_item}} for Argo compatibility.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "_map_item": {"output": "current_item_value"} + }) + + result = executor.resolve_value("{{item}}", state) + assert result == "current_item_value" + + def test_argo_item_field_alias(self): + """{{item.field}} is aliased to {{_map_item.field}}.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "_map_item": {"output": {"name": "test_name"}} + }) + + result = executor.resolve_value("{{item.name}}", state) + assert result == "test_name" + + +class TestArgoParametersAlias: + """Tests for Argo workflow.parameters alias.""" + + def test_workflow_parameters_alias(self): + """{{workflow.parameters.x}} is aliased to {{workflow.input.x}}.""" + executor = create_minimal_dag_executor() + state = create_workflow_state({ + "workflow_input": {"output": {"x": "argo_style_value"}} + }) + + result = executor.resolve_value("{{workflow.parameters.x}}", state) + assert result == "argo_style_value" diff --git a/tests/unit/workflow/test_utils.py b/tests/unit/workflow/test_utils.py new file mode 100644 index 000000000..c9ebf1c7b --- /dev/null +++ b/tests/unit/workflow/test_utils.py @@ -0,0 +1,130 @@ +""" +Unit tests for workflow utility functions. + +Tests the parse_duration function for converting duration strings to seconds. +""" + +import pytest +from solace_agent_mesh.workflow.utils import parse_duration + + +class TestParseDuration: + """Tests for parse_duration function.""" + + # --- Numeric input tests --- + + def test_integer_input_treated_as_seconds(self): + """Integer input should be treated as seconds.""" + assert parse_duration(30) == 30.0 + + def test_float_input_treated_as_seconds(self): + """Float input should be treated as seconds.""" + assert parse_duration(30.5) == 30.5 + + def test_zero_returns_zero(self): + """Zero should return zero seconds.""" + assert parse_duration(0) == 0.0 + + # --- String with seconds suffix --- + + def test_seconds_suffix_lowercase(self): + """'30s' should return 30 seconds.""" + assert parse_duration("30s") == 30.0 + + def test_seconds_suffix_with_decimal(self): + """'1.5s' should return 1.5 seconds.""" + assert parse_duration("1.5s") == 1.5 + + def test_seconds_suffix_uppercase(self): + """'30S' should return 30 seconds (case insensitive).""" + assert parse_duration("30S") == 30.0 + + # --- String with minutes suffix --- + + def test_minutes_suffix(self): + """'5m' should return 300 seconds (5 * 60).""" + assert parse_duration("5m") == 300.0 + + def test_minutes_suffix_with_decimal(self): + """'1.5m' should return 90 seconds.""" + assert parse_duration("1.5m") == 90.0 + + def test_one_minute(self): + """'1m' should return 60 seconds.""" + assert parse_duration("1m") == 60.0 + + # --- String with hours suffix --- + + def test_hours_suffix(self): + """'2h' should return 7200 seconds (2 * 3600).""" + assert parse_duration("2h") == 7200.0 + + def test_one_hour(self): + """'1h' should return 3600 seconds.""" + assert parse_duration("1h") == 3600.0 + + def test_hours_with_decimal(self): + """'0.5h' should return 1800 seconds (half hour).""" + assert parse_duration("0.5h") == 1800.0 + + # --- String with days suffix --- + + def test_days_suffix(self): + """'1d' should return 86400 seconds.""" + assert parse_duration("1d") == 86400.0 + + def test_days_with_decimal(self): + """'0.5d' should return 43200 seconds (half day).""" + assert parse_duration("0.5d") == 43200.0 + + # --- String without suffix (defaults to seconds) --- + + def test_string_number_without_suffix(self): + """'30' (string without suffix) should default to seconds.""" + assert parse_duration("30") == 30.0 + + def test_string_decimal_without_suffix(self): + """'30.5' (string without suffix) should default to seconds.""" + assert parse_duration("30.5") == 30.5 + + # --- Whitespace handling --- + + def test_whitespace_trimmed(self): + """Leading/trailing whitespace should be trimmed.""" + assert parse_duration(" 30s ") == 30.0 + + def test_whitespace_between_number_and_suffix(self): + """Whitespace between number and suffix should work.""" + assert parse_duration("30 s") == 30.0 + + # --- Invalid input tests --- + + def test_invalid_format_raises_value_error(self): + """Invalid format should raise ValueError.""" + with pytest.raises(ValueError) as exc_info: + parse_duration("invalid") + assert "Invalid duration format" in str(exc_info.value) + + def test_negative_number_raises_value_error(self): + """Negative numbers should raise ValueError (regex doesn't match).""" + with pytest.raises(ValueError) as exc_info: + parse_duration("-30s") + assert "Invalid duration format" in str(exc_info.value) + + def test_invalid_suffix_raises_value_error(self): + """Invalid suffix should raise ValueError.""" + with pytest.raises(ValueError) as exc_info: + parse_duration("30x") + assert "Invalid duration format" in str(exc_info.value) + + def test_empty_string_raises_value_error(self): + """Empty string should raise ValueError.""" + with pytest.raises(ValueError) as exc_info: + parse_duration("") + assert "Invalid duration format" in str(exc_info.value) + + def test_only_suffix_raises_value_error(self): + """Just a suffix without number should raise ValueError.""" + with pytest.raises(ValueError) as exc_info: + parse_duration("s") + assert "Invalid duration format" in str(exc_info.value) diff --git a/tests/unit/workflow/test_workflow_models.py b/tests/unit/workflow/test_workflow_models.py new file mode 100644 index 000000000..e9922f365 --- /dev/null +++ b/tests/unit/workflow/test_workflow_models.py @@ -0,0 +1,378 @@ +""" +Unit tests for workflow definition Pydantic model validation. + +Tests that WorkflowDefinition and node types validate correctly, +catching invalid configurations at construction time. +""" + +import pytest +from pydantic import ValidationError + +from solace_agent_mesh.workflow.app import ( + WorkflowDefinition, + AgentNode, + ConditionalNode, + SwitchNode, + SwitchCase, + LoopNode, + MapNode, +) + + +class TestValidWorkflowParsing: + """Tests for valid workflow definitions.""" + + def test_simple_linear_workflow(self): + """A simple linear workflow parses correctly.""" + workflow = WorkflowDefinition( + description="Simple workflow", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + ], + output_mapping={"result": "{{step2.output}}"}, + ) + + assert workflow.description == "Simple workflow" + assert len(workflow.nodes) == 2 + assert workflow.nodes[0].id == "step1" + assert workflow.nodes[1].depends_on == ["step1"] + + def test_workflow_with_schemas(self): + """Workflow with input/output schemas parses correctly.""" + workflow = WorkflowDefinition( + description="Workflow with schemas", + nodes=[ + AgentNode(id="process", type="agent", agent_name="Agent1"), + ], + output_mapping={"result": "{{process.output}}"}, + input_schema={ + "type": "object", + "properties": {"name": {"type": "string"}}, + "required": ["name"], + }, + output_schema={ + "type": "object", + "properties": {"processed": {"type": "boolean"}}, + }, + ) + + assert workflow.input_schema is not None + assert workflow.output_schema is not None + + def test_workflow_with_conditional_node(self): + """Workflow with properly configured conditional node parses.""" + workflow = WorkflowDefinition( + description="Conditional workflow", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + ConditionalNode( + id="branch", + type="conditional", + condition="'{{step1.output.status}}' == 'success'", + true_branch="success_path", + false_branch="failure_path", + depends_on=["step1"], + ), + AgentNode(id="success_path", type="agent", agent_name="Agent2", depends_on=["branch"]), + AgentNode(id="failure_path", type="agent", agent_name="Agent3", depends_on=["branch"]), + ], + output_mapping={"result": "done"}, + ) + + assert workflow.nodes[1].type == "conditional" + assert workflow.nodes[1].true_branch == "success_path" + + def test_workflow_with_map_node(self): + """Workflow with map node parses correctly.""" + workflow = WorkflowDefinition( + description="Map workflow", + nodes=[ + AgentNode(id="prepare", type="agent", agent_name="Agent1"), + MapNode( + id="process_all", + type="map", + node="process_item", + items="{{prepare.output.items}}", + depends_on=["prepare"], + ), + AgentNode(id="process_item", type="agent", agent_name="Agent2"), + ], + output_mapping={"results": "{{process_all.output}}"}, + ) + + assert workflow.nodes[1].type == "map" + assert workflow.nodes[1].items == "{{prepare.output.items}}" + + +class TestInvalidDependencyReference: + """Tests for invalid dependency references.""" + + def test_depends_on_nonexistent_node_rejected(self): + """Depending on a non-existent node raises ValidationError.""" + with pytest.raises(ValidationError, match="non-existent node"): + WorkflowDefinition( + description="Invalid workflow", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["missing"]), + ], + output_mapping={"result": "{{step2.output}}"}, + ) + + +class TestConditionalNodeValidation: + """Tests for ConditionalNode validation.""" + + def test_conditional_requires_true_branch(self): + """ConditionalNode without true_branch raises ValidationError.""" + with pytest.raises(ValidationError, match="true_branch|trueBranch"): + ConditionalNode( + id="branch", + type="conditional", + condition="true", + # Missing true_branch + ) + + def test_conditional_requires_condition(self): + """ConditionalNode without condition raises ValidationError.""" + with pytest.raises(ValidationError, match="condition"): + ConditionalNode( + id="branch", + type="conditional", + true_branch="next", + # Missing condition + ) + + def test_conditional_true_branch_must_depend_on_conditional(self): + """true_branch target must depend on the conditional node.""" + with pytest.raises(ValidationError, match="does not list.*depends_on"): + WorkflowDefinition( + description="Invalid conditional", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + ConditionalNode( + id="branch", + type="conditional", + condition="true", + true_branch="step2", + depends_on=["step1"], + ), + # step2 doesn't list 'branch' in depends_on - this is an error + AgentNode(id="step2", type="agent", agent_name="Agent2", depends_on=["step1"]), + ], + output_mapping={"result": "{{step2.output}}"}, + ) + + def test_conditional_false_branch_must_depend_on_conditional(self): + """false_branch target (if provided) must depend on the conditional node.""" + with pytest.raises(ValidationError, match="does not list.*depends_on"): + WorkflowDefinition( + description="Invalid conditional", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + ConditionalNode( + id="branch", + type="conditional", + condition="true", + true_branch="success", + false_branch="failure", + depends_on=["step1"], + ), + AgentNode(id="success", type="agent", agent_name="Agent2", depends_on=["branch"]), + # failure doesn't depend on branch - error + AgentNode(id="failure", type="agent", agent_name="Agent3", depends_on=["step1"]), + ], + output_mapping={"result": "done"}, + ) + + def test_conditional_nonexistent_true_branch_rejected(self): + """ConditionalNode referencing non-existent true_branch node is rejected.""" + with pytest.raises(ValidationError, match="non-existent.*true_branch"): + WorkflowDefinition( + description="Invalid conditional", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + ConditionalNode( + id="branch", + type="conditional", + condition="true", + true_branch="nonexistent", + depends_on=["step1"], + ), + ], + output_mapping={"result": "done"}, + ) + + +class TestMapNodeValidation: + """Tests for MapNode validation.""" + + def test_map_node_requires_items_source(self): + """MapNode without any items source raises ValidationError.""" + with pytest.raises(ValidationError, match="requires one of"): + MapNode( + id="map1", + type="map", + node="inner", + # Missing items, with_param, or with_items + ) + + def test_map_node_rejects_multiple_items_sources(self): + """MapNode with multiple items sources raises ValidationError.""" + with pytest.raises(ValidationError, match="only one of"): + MapNode( + id="map1", + type="map", + node="inner", + items="{{step1.output.list1}}", + with_items=["a", "b", "c"], # Can't have both! + ) + + def test_map_node_nonexistent_target_rejected(self): + """MapNode referencing non-existent target node is rejected.""" + with pytest.raises(ValidationError, match="non-existent node"): + WorkflowDefinition( + description="Invalid map", + nodes=[ + AgentNode(id="prepare", type="agent", agent_name="Agent1"), + MapNode( + id="map_node", + type="map", + node="nonexistent_inner", # This node doesn't exist + items="{{prepare.output.items}}", + depends_on=["prepare"], + ), + ], + output_mapping={"result": "{{map_node.output}}"}, + ) + + def test_map_node_with_items_literal(self): + """MapNode with literal withItems array parses correctly.""" + workflow = WorkflowDefinition( + description="Map with literal items", + nodes=[ + MapNode( + id="map_node", + type="map", + node="process", + with_items=["item1", "item2", "item3"], + ), + AgentNode(id="process", type="agent", agent_name="Agent1"), + ], + output_mapping={"result": "{{map_node.output}}"}, + ) + + assert workflow.nodes[0].with_items == ["item1", "item2", "item3"] + + +class TestLoopNodeValidation: + """Tests for LoopNode validation.""" + + def test_loop_node_requires_target(self): + """LoopNode without target node raises ValidationError.""" + with pytest.raises(ValidationError, match="node"): + LoopNode( + id="loop1", + type="loop", + condition="true", + # Missing node + ) + + def test_loop_node_requires_condition(self): + """LoopNode without condition raises ValidationError.""" + with pytest.raises(ValidationError, match="condition"): + LoopNode( + id="loop1", + type="loop", + node="inner", + # Missing condition + ) + + def test_loop_node_nonexistent_target_rejected(self): + """LoopNode referencing non-existent target node is rejected.""" + with pytest.raises(ValidationError, match="non-existent node"): + WorkflowDefinition( + description="Invalid loop", + nodes=[ + LoopNode( + id="loop_node", + type="loop", + node="nonexistent_inner", + condition="{{loop_inner.output.continue}}", + ), + ], + output_mapping={"result": "done"}, + ) + + +class TestSwitchNodeValidation: + """Tests for SwitchNode validation.""" + + def test_switch_node_requires_cases(self): + """SwitchNode without cases raises ValidationError.""" + with pytest.raises(ValidationError, match="cases"): + SwitchNode( + id="switch1", + type="switch", + # Missing cases + ) + + def test_switch_node_case_requires_condition_and_node(self): + """SwitchCase requires both condition and node.""" + with pytest.raises(ValidationError): + SwitchCase( + condition="true", + # Missing node + ) + + def test_switch_case_target_must_depend_on_switch(self): + """Switch case target must depend on the switch node.""" + with pytest.raises(ValidationError, match="does not list.*depends_on"): + WorkflowDefinition( + description="Invalid switch", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + SwitchNode( + id="router", + type="switch", + cases=[ + SwitchCase(condition="true", node="path_a"), + ], + depends_on=["step1"], + ), + # path_a doesn't depend on router - error + AgentNode(id="path_a", type="agent", agent_name="Agent2", depends_on=["step1"]), + ], + output_mapping={"result": "done"}, + ) + + +class TestArgoAliases: + """Tests for Argo-compatible field aliases.""" + + def test_dependencies_alias(self): + """'dependencies' alias for 'depends_on' works.""" + node = AgentNode( + id="step1", + type="agent", + agent_name="Agent1", + dependencies=["prev"], # Argo-style + ) + assert node.depends_on == ["prev"] + + def test_camel_case_aliases(self): + """CamelCase aliases work for various fields.""" + workflow = WorkflowDefinition( + description="Test", + nodes=[ + AgentNode(id="step1", type="agent", agent_name="Agent1"), + ], + outputMapping={"result": "{{step1.output}}"}, # CamelCase + inputSchema={"type": "object"}, # CamelCase + failFast=False, # CamelCase + ) + + assert workflow.output_mapping == {"result": "{{step1.output}}"} + assert workflow.input_schema == {"type": "object"} + assert workflow.fail_fast is False