Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,25 @@ For more advanced examples, including [Triton](/examples/triton/README.md), [CUD
|---------|-------------|-------------|
| `weco run [options]` | Direct optimization execution | **For advanced users** - When you know exactly what to optimize and how |
| `weco resume <run-id>` | Resume an interrupted run | Continue from the last completed step |
| `weco login` | Authenticate with Weco | First-time setup or switching accounts |
| `weco logout` | Clear authentication credentials | To switch accounts or troubleshoot authentication issues |
| `weco credits balance` | Check your current credit balance | Monitor usage |
| `weco credits topup [amount]` | Purchase additional credits | When you need more credits (default: 10) |
| `weco credits autotopup` | Configure automatic top-up | Set up automatic credit replenishment |

### Setup Commands (Experimental)

| Command | Description |
|---------|-------------|
| `weco setup claude-code` | Set up Weco skill for Claude Code |

The `setup` command installs Weco skills for AI coding assistants. Currently supports Claude Code:

```bash
weco setup claude-code
```

This clones the Weco skill to `~/.claude/skills/weco/` and optionally updates your `~/.claude/CLAUDE.md` to enable automatic skill discovery.

### Model Selection

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ name = "weco"
authors = [{ name = "Weco AI Team", email = "contact@weco.ai" }]
description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
readme = "README.md"
version = "0.3.8"
version = "0.3.9"
license = { file = "LICENSE" }
requires-python = ">=3.8"
dependencies = [
Expand Down
34 changes: 33 additions & 1 deletion weco/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,13 @@ def configure_run_parser(run_parser: argparse.ArgumentParser) -> None:
{default_models_for_providers}
""",
)
run_parser.add_argument(
"--output",
type=str,
choices=["rich", "plain"],
default="rich",
help="Output mode: 'rich' for interactive terminal UI (default), 'plain' for machine-readable text output suitable for LLM agents.",
)


def configure_credits_parser(credits_parser: argparse.ArgumentParser) -> None:
Expand Down Expand Up @@ -178,6 +185,12 @@ def _parse_credit_amount(value: str) -> float:
)


def configure_setup_parser(setup_parser: argparse.ArgumentParser) -> None:
"""Configure the setup command parser and its subcommands."""
setup_subparsers = setup_parser.add_subparsers(dest="tool", help="AI tool to set up")
setup_subparsers.add_parser("claude-code", help="Set up Weco skill for Claude Code")


def configure_resume_parser(resume_parser: argparse.ArgumentParser) -> None:
"""Configure arguments for the resume command."""
resume_parser.add_argument(
Expand Down Expand Up @@ -208,6 +221,13 @@ def configure_resume_parser(resume_parser: argparse.ArgumentParser) -> None:
Supported provider names: {supported_providers}.
""",
)
resume_parser.add_argument(
"--output",
type=str,
choices=["rich", "plain"],
default="rich",
help="Output mode: 'rich' for interactive terminal UI (default), 'plain' for machine-readable text output suitable for LLM agents.",
)


def execute_run_command(args: argparse.Namespace) -> None:
Expand Down Expand Up @@ -253,6 +273,7 @@ def execute_run_command(args: argparse.Namespace) -> None:
api_keys=api_keys,
apply_change=args.apply_change,
require_review=args.require_review,
output_mode=args.output,
)

exit_code = 0 if success else 1
Expand All @@ -269,7 +290,9 @@ def execute_resume_command(args: argparse.Namespace) -> None:
console.print(f"[bold red]Error parsing API keys: {e}[/]")
sys.exit(1)

success = resume_optimization(run_id=args.run_id, api_keys=api_keys, apply_change=args.apply_change)
success = resume_optimization(
run_id=args.run_id, api_keys=api_keys, apply_change=args.apply_change, output_mode=args.output
)

sys.exit(0 if success else 1)

Expand Down Expand Up @@ -322,6 +345,10 @@ def _main() -> None:
)
configure_resume_parser(resume_parser)

# --- Setup Command Parser Setup ---
setup_parser = subparsers.add_parser("setup", help="Set up Weco for use with AI tools")
configure_setup_parser(setup_parser)

args = parser.parse_args()

if args.command == "login":
Expand Down Expand Up @@ -349,6 +376,11 @@ def _main() -> None:
sys.exit(0)
elif args.command == "resume":
execute_resume_command(args)
elif args.command == "setup":
from .setup import handle_setup_command

handle_setup_command(args, console)
sys.exit(0)
else:
# This case should be hit if 'weco' is run alone and chatbot logic didn't catch it,
# or if an invalid command is provided.
Expand Down
47 changes: 37 additions & 10 deletions weco/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
)
from .auth import handle_authentication
from .browser import open_browser
from .ui import OptimizationUI, LiveOptimizationUI
from .ui import OptimizationUI, LiveOptimizationUI, PlainOptimizationUI
from .utils import read_additional_instructions, read_from_path, write_to_path, run_evaluation_with_file_swap


Expand Down Expand Up @@ -324,7 +324,11 @@ def _offer_apply_best_solution(


def resume_optimization(
run_id: str, api_keys: Optional[dict] = None, poll_interval: float = 2.0, apply_change: bool = False
run_id: str,
api_keys: Optional[dict] = None,
poll_interval: float = 2.0,
apply_change: bool = False,
output_mode: str = "rich",
) -> bool:
"""
Resume an interrupted run using the queue-based optimization loop.
Expand All @@ -337,11 +341,12 @@ def resume_optimization(
api_keys: Optional API keys for LLM providers.
poll_interval: Seconds between polling attempts.
apply_change: If True, automatically apply best solution; if False, prompt user.
output_mode: "rich" for interactive terminal UI, "plain" for machine-readable output.

Returns:
True if optimization completed successfully, False otherwise.
"""
console = Console()
console = Console(force_terminal=output_mode == "rich")

# Authenticate
weco_api_key, auth_headers = handle_authentication(console)
Expand Down Expand Up @@ -431,9 +436,17 @@ def resume_optimization(

result: Optional[OptimizationResult] = None
try:
with LiveOptimizationUI(
console, run_id, run_name, total_steps, dashboard_url, model=model_name, metric_name=metric_name
) as ui:
# Select UI implementation based on output mode
if output_mode == "plain":
ui_instance = PlainOptimizationUI(
run_id, run_name, total_steps, dashboard_url, model=model_name, metric_name=metric_name
)
else:
ui_instance = LiveOptimizationUI(
console, run_id, run_name, total_steps, dashboard_url, model=model_name, metric_name=metric_name
)

with ui_instance as ui:
# Populate UI with best solution from previous run if available
if best_metric_value is not None and best_step is not None:
ui.on_metric(best_step, best_metric_value)
Expand All @@ -459,7 +472,10 @@ def resume_optimization(

# Show resume message if interrupted
if result.status == "terminated":
console.print(f"\n[cyan]To resume this run, use:[/] [bold]weco resume {run_id}[/]\n")
if output_mode == "plain":
print(f"\nTo resume this run, use: weco resume {run_id}\n", flush=True)
else:
console.print(f"\n[cyan]To resume this run, use:[/] [bold]weco resume {run_id}[/]\n")

# Offer to apply best solution
_offer_apply_best_solution(
Expand Down Expand Up @@ -507,6 +523,7 @@ def optimize(
poll_interval: float = 2.0,
apply_change: bool = False,
require_review: bool = False,
output_mode: str = "rich",
) -> bool:
"""
Simplified queue-based optimization loop.
Expand All @@ -528,11 +545,12 @@ def optimize(
api_keys: Optional API keys for LLM providers.
poll_interval: Seconds between polling attempts.
apply_change: If True, automatically apply best solution; if False, prompt user.
output_mode: "rich" for interactive terminal UI, "plain" for machine-readable output.

Returns:
True if optimization completed successfully, False otherwise.
"""
console = Console()
console = Console(force_terminal=output_mode == "rich")

# Authenticate
weco_api_key, auth_headers = handle_authentication(console)
Expand Down Expand Up @@ -596,7 +614,13 @@ def optimize(

result: Optional[OptimizationResult] = None
try:
with LiveOptimizationUI(console, run_id, run_name, steps, dashboard_url, model=model, metric_name=metric) as ui:
# Select UI implementation based on output mode
if output_mode == "plain":
ui_instance = PlainOptimizationUI(run_id, run_name, steps, dashboard_url, model=model, metric_name=metric)
else:
ui_instance = LiveOptimizationUI(console, run_id, run_name, steps, dashboard_url, model=model, metric_name=metric)

with ui_instance as ui:
result = _run_optimization_loop(
ui=ui,
run_id=run_id,
Expand All @@ -618,7 +642,10 @@ def optimize(

# Show resume message if interrupted
if result.status == "terminated":
console.print(f"\n[cyan]To resume this run, use:[/] [bold]weco resume {run_id}[/]\n")
if output_mode == "plain":
print(f"\nTo resume this run, use: weco resume {run_id}\n", flush=True)
else:
console.print(f"\n[cyan]To resume this run, use:[/] [bold]weco resume {run_id}[/]\n")

# Offer to apply best solution
_offer_apply_best_solution(
Expand Down
Loading