|
26 | 26 | from empathy_os.pattern_library import PatternLibrary |
27 | 27 | from empathy_os.persistence import MetricsCollector, PatternPersistence, StateManager |
28 | 28 | from empathy_os.templates import cmd_new |
29 | | -from empathy_os.workflows import cmd_fix_all, cmd_learn, cmd_morning, cmd_ship |
| 29 | +from empathy_os.workflows import ( |
| 30 | + WorkflowConfig, |
| 31 | + cmd_fix_all, |
| 32 | + cmd_learn, |
| 33 | + cmd_morning, |
| 34 | + cmd_ship, |
| 35 | + create_example_config, |
| 36 | + get_workflow, |
| 37 | +) |
| 38 | +from empathy_os.workflows import ( |
| 39 | + list_workflows as get_workflow_list, |
| 40 | +) |
30 | 41 |
|
31 | 42 | logger = get_logger(__name__) |
32 | 43 |
|
|
70 | 81 | ("empathy costs", "View API cost tracking"), |
71 | 82 | ("empathy dashboard", "Launch visual dashboard"), |
72 | 83 | ("empathy frameworks", "List agent frameworks"), |
| 84 | + ("empathy workflow list", "List multi-model workflows"), |
73 | 85 | ("empathy new <template>", "Create project from template"), |
74 | 86 | ], |
75 | 87 | } |
@@ -1617,6 +1629,235 @@ def _generate_claude_rule(category: str, patterns: list) -> str: |
1617 | 1629 | return "\n".join(lines) |
1618 | 1630 |
|
1619 | 1631 |
|
| 1632 | +def _extract_workflow_content(final_output): |
| 1633 | + """ |
| 1634 | + Extract readable content from workflow final_output. |
| 1635 | +
|
| 1636 | + Workflows return their results in various formats - this extracts |
| 1637 | + the actual content users want to see. |
| 1638 | + """ |
| 1639 | + if final_output is None: |
| 1640 | + return None |
| 1641 | + |
| 1642 | + # If it's already a string, return it |
| 1643 | + if isinstance(final_output, str): |
| 1644 | + return final_output |
| 1645 | + |
| 1646 | + # If it's a dict, try to extract meaningful content |
| 1647 | + if isinstance(final_output, dict): |
| 1648 | + # Common keys that contain the main output |
| 1649 | + content_keys = [ |
| 1650 | + "answer", |
| 1651 | + "synthesis", |
| 1652 | + "result", |
| 1653 | + "output", |
| 1654 | + "content", |
| 1655 | + "report", |
| 1656 | + "summary", |
| 1657 | + "analysis", |
| 1658 | + "review", |
| 1659 | + "documentation", |
| 1660 | + "response", |
| 1661 | + "recommendations", |
| 1662 | + "findings", |
| 1663 | + "tests", |
| 1664 | + "plan", |
| 1665 | + ] |
| 1666 | + for key in content_keys: |
| 1667 | + if key in final_output and final_output[key]: |
| 1668 | + val = final_output[key] |
| 1669 | + if isinstance(val, str): |
| 1670 | + return val |
| 1671 | + elif isinstance(val, dict): |
| 1672 | + # Recursively extract |
| 1673 | + return _extract_workflow_content(val) |
| 1674 | + |
| 1675 | + # If no common key found, try to format the dict nicely |
| 1676 | + # Look for any string value that's substantial |
| 1677 | + for _key, val in final_output.items(): |
| 1678 | + if isinstance(val, str) and len(val) > 100: |
| 1679 | + return val |
| 1680 | + |
| 1681 | + # Last resort: return a formatted version |
| 1682 | + import json |
| 1683 | + |
| 1684 | + return json.dumps(final_output, indent=2) |
| 1685 | + |
| 1686 | + # For lists or other types, convert to string |
| 1687 | + return str(final_output) |
| 1688 | + |
| 1689 | + |
| 1690 | +def cmd_workflow(args): |
| 1691 | + """Multi-model workflow management and execution.""" |
| 1692 | + import asyncio |
| 1693 | + import json as json_mod |
| 1694 | + |
| 1695 | + action = args.action |
| 1696 | + |
| 1697 | + if action == "list": |
| 1698 | + # List available workflows |
| 1699 | + workflows = get_workflow_list() |
| 1700 | + |
| 1701 | + if args.json: |
| 1702 | + print(json_mod.dumps(workflows, indent=2)) |
| 1703 | + else: |
| 1704 | + print("\n" + "=" * 60) |
| 1705 | + print(" MULTI-MODEL WORKFLOWS") |
| 1706 | + print("=" * 60 + "\n") |
| 1707 | + |
| 1708 | + for wf in workflows: |
| 1709 | + print(f" {wf['name']:15} {wf['description']}") |
| 1710 | + stages = " → ".join(f"{s}({wf['tier_map'][s]})" for s in wf["stages"]) |
| 1711 | + print(f" Stages: {stages}") |
| 1712 | + print() |
| 1713 | + |
| 1714 | + print("-" * 60) |
| 1715 | + print(" Use: empathy workflow describe <name>") |
| 1716 | + print(" Use: empathy workflow run <name> [--input JSON]") |
| 1717 | + print("=" * 60 + "\n") |
| 1718 | + |
| 1719 | + elif action == "describe": |
| 1720 | + # Describe a specific workflow |
| 1721 | + name = args.name |
| 1722 | + if not name: |
| 1723 | + print("Error: workflow name required") |
| 1724 | + print("Usage: empathy workflow describe <name>") |
| 1725 | + return 1 |
| 1726 | + |
| 1727 | + try: |
| 1728 | + workflow_cls = get_workflow(name) |
| 1729 | + provider = getattr(args, "provider", None) |
| 1730 | + workflow = workflow_cls(provider=provider) |
| 1731 | + |
| 1732 | + # Get actual provider from workflow (may come from config) |
| 1733 | + actual_provider = getattr(workflow, "_provider_str", provider or "anthropic") |
| 1734 | + |
| 1735 | + if args.json: |
| 1736 | + info = { |
| 1737 | + "name": workflow.name, |
| 1738 | + "description": workflow.description, |
| 1739 | + "provider": actual_provider, |
| 1740 | + "stages": workflow.stages, |
| 1741 | + "tier_map": {k: v.value for k, v in workflow.tier_map.items()}, |
| 1742 | + "models": { |
| 1743 | + stage: workflow.get_model_for_tier(workflow.tier_map[stage]) |
| 1744 | + for stage in workflow.stages |
| 1745 | + }, |
| 1746 | + } |
| 1747 | + print(json_mod.dumps(info, indent=2)) |
| 1748 | + else: |
| 1749 | + print(f"Provider: {actual_provider}") |
| 1750 | + print(workflow.describe()) |
| 1751 | + |
| 1752 | + except KeyError as e: |
| 1753 | + print(f"Error: {e}") |
| 1754 | + return 1 |
| 1755 | + |
| 1756 | + elif action == "run": |
| 1757 | + # Run a workflow |
| 1758 | + name = args.name |
| 1759 | + if not name: |
| 1760 | + print("Error: workflow name required") |
| 1761 | + print('Usage: empathy workflow run <name> --input \'{"key": "value"}\'') |
| 1762 | + return 1 |
| 1763 | + |
| 1764 | + try: |
| 1765 | + workflow_cls = get_workflow(name) |
| 1766 | + |
| 1767 | + # Get provider (default to anthropic) |
| 1768 | + provider = getattr(args, "provider", "anthropic") |
| 1769 | + workflow = workflow_cls(provider=provider) |
| 1770 | + |
| 1771 | + # Parse input |
| 1772 | + input_data = {} |
| 1773 | + if args.input: |
| 1774 | + input_data = json_mod.loads(args.input) |
| 1775 | + |
| 1776 | + print(f"\n Running workflow: {name} (provider: {provider})") |
| 1777 | + print("=" * 50) |
| 1778 | + |
| 1779 | + # Execute workflow |
| 1780 | + result = asyncio.run(workflow.execute(**input_data)) |
| 1781 | + |
| 1782 | + # Extract the actual content from final_output |
| 1783 | + output_content = _extract_workflow_content(result.final_output) |
| 1784 | + |
| 1785 | + if args.json: |
| 1786 | + # JSON output includes both content and metadata |
| 1787 | + output = { |
| 1788 | + "success": result.success, |
| 1789 | + "output": output_content, |
| 1790 | + "cost": result.cost_report.total_cost, |
| 1791 | + "savings": result.cost_report.savings, |
| 1792 | + "duration_ms": result.total_duration_ms, |
| 1793 | + "error": result.error, |
| 1794 | + } |
| 1795 | + print(json_mod.dumps(output, indent=2)) |
| 1796 | + else: |
| 1797 | + # Display the actual results - this is what users want to see |
| 1798 | + if result.success: |
| 1799 | + if output_content: |
| 1800 | + print(f"\n{output_content}\n") |
| 1801 | + else: |
| 1802 | + print("\n✓ Workflow completed successfully.\n") |
| 1803 | + |
| 1804 | + # Brief footer with timing (detailed costs available via 'empathy costs') |
| 1805 | + print("-" * 50) |
| 1806 | + print( |
| 1807 | + f"Completed in {result.total_duration_ms}ms | Cost: ${result.cost_report.total_cost:.4f} (saved ${result.cost_report.savings:.4f})" |
| 1808 | + ) |
| 1809 | + else: |
| 1810 | + print(f"\n✗ Workflow failed: {result.error}\n") |
| 1811 | + |
| 1812 | + except KeyError as e: |
| 1813 | + print(f"Error: {e}") |
| 1814 | + return 1 |
| 1815 | + except json_mod.JSONDecodeError as e: |
| 1816 | + print(f"Error parsing input JSON: {e}") |
| 1817 | + return 1 |
| 1818 | + |
| 1819 | + elif action == "config": |
| 1820 | + # Generate or show workflow configuration |
| 1821 | + from pathlib import Path |
| 1822 | + |
| 1823 | + config_path = Path(".empathy/workflows.yaml") |
| 1824 | + |
| 1825 | + if config_path.exists() and not getattr(args, "force", False): |
| 1826 | + print(f"Config already exists: {config_path}") |
| 1827 | + print("Use --force to overwrite") |
| 1828 | + print("\nCurrent configuration:") |
| 1829 | + print("-" * 40) |
| 1830 | + config = WorkflowConfig.load() |
| 1831 | + print(f" Default provider: {config.default_provider}") |
| 1832 | + if config.workflow_providers: |
| 1833 | + print(" Workflow providers:") |
| 1834 | + for wf, prov in config.workflow_providers.items(): |
| 1835 | + print(f" {wf}: {prov}") |
| 1836 | + if config.custom_models: |
| 1837 | + print(" Custom models configured") |
| 1838 | + return 0 |
| 1839 | + |
| 1840 | + # Create config directory and file |
| 1841 | + config_path.parent.mkdir(parents=True, exist_ok=True) |
| 1842 | + config_path.write_text(create_example_config()) |
| 1843 | + print(f"✓ Created workflow config: {config_path}") |
| 1844 | + print("\nEdit this file to customize:") |
| 1845 | + print(" - Default provider (anthropic, openai, ollama)") |
| 1846 | + print(" - Per-workflow provider overrides") |
| 1847 | + print(" - Custom model mappings") |
| 1848 | + print(" - Model pricing") |
| 1849 | + print("\nOr use environment variables:") |
| 1850 | + print(" EMPATHY_WORKFLOW_PROVIDER=openai") |
| 1851 | + print(" EMPATHY_MODEL_PREMIUM=gpt-5.2") |
| 1852 | + |
| 1853 | + else: |
| 1854 | + print(f"Unknown action: {action}") |
| 1855 | + print("Available: list, describe, run, config") |
| 1856 | + return 1 |
| 1857 | + |
| 1858 | + return 0 |
| 1859 | + |
| 1860 | + |
1620 | 1861 | def cmd_frameworks(args): |
1621 | 1862 | """List and manage agent frameworks.""" |
1622 | 1863 | import json as json_mod |
@@ -2028,6 +2269,41 @@ def main(): |
2028 | 2269 | parser_frameworks.add_argument("--json", action="store_true", help="Output as JSON") |
2029 | 2270 | parser_frameworks.set_defaults(func=cmd_frameworks) |
2030 | 2271 |
|
| 2272 | + # Workflow command (multi-model workflow management) |
| 2273 | + parser_workflow = subparsers.add_parser( |
| 2274 | + "workflow", |
| 2275 | + help="Multi-model workflows for cost-optimized task pipelines", |
| 2276 | + ) |
| 2277 | + parser_workflow.add_argument( |
| 2278 | + "action", |
| 2279 | + choices=["list", "describe", "run", "config"], |
| 2280 | + help="Action: list, describe, run, or config", |
| 2281 | + ) |
| 2282 | + parser_workflow.add_argument( |
| 2283 | + "name", |
| 2284 | + nargs="?", |
| 2285 | + help="Workflow name (for describe/run)", |
| 2286 | + ) |
| 2287 | + parser_workflow.add_argument( |
| 2288 | + "--input", |
| 2289 | + "-i", |
| 2290 | + help="JSON input data for workflow execution", |
| 2291 | + ) |
| 2292 | + parser_workflow.add_argument( |
| 2293 | + "--provider", |
| 2294 | + "-p", |
| 2295 | + choices=["anthropic", "openai", "ollama", "hybrid"], |
| 2296 | + default=None, # None means use config |
| 2297 | + help="Model provider: anthropic, openai, ollama, or hybrid (mix of best models)", |
| 2298 | + ) |
| 2299 | + parser_workflow.add_argument( |
| 2300 | + "--force", |
| 2301 | + action="store_true", |
| 2302 | + help="Force overwrite existing config file", |
| 2303 | + ) |
| 2304 | + parser_workflow.add_argument("--json", action="store_true", help="Output as JSON") |
| 2305 | + parser_workflow.set_defaults(func=cmd_workflow) |
| 2306 | + |
2031 | 2307 | # Sync-claude command (sync patterns to Claude Code) |
2032 | 2308 | parser_sync_claude = subparsers.add_parser( |
2033 | 2309 | "sync-claude", help="Sync learned patterns to Claude Code rules" |
|
0 commit comments