diff --git a/clai/README.md b/clai/README.md index 01a7e2b8d8..1831b32f04 100644 --- a/clai/README.md +++ b/clai/README.md @@ -54,28 +54,27 @@ Either way, running `clai` will start an interactive session where you can chat ## Help ``` -usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt] - -Pydantic AI CLI v... - -Special prompts: -* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work) -* `/markdown` - show the last markdown output of the last question -* `/multiline` - toggle multiline mode -* `/cp` - copy the last response to clipboard - -positional arguments: - prompt AI Prompt, if omitted fall into interactive mode - -options: - -h, --help show this help message and exit - -m [MODEL], --model [MODEL] - Model to use, in format ":" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1". - -a AGENT, --agent AGENT - Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent" - -l, --list-models List all available models and exit - -t [CODE_THEME], --code-theme [CODE_THEME] - Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals. - --no-stream Disable streaming from the model - --version Show version and exit +Usage: clai [OPTIONS] [PROMPT] + + Pydantic AI CLI v... + + Special prompts: * `/exit` - exit the interactive mode (ctrl-c and ctrl-d + also work) * `/markdown` - show the last markdown output of the last + question * `/multiline` - toggle multiline mode * `/cp` - copy the last + response to clipboard + +Options: + -m, --model MODEL Model to use, in format ":" e.g. + "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". + Defaults to "openai:gpt-4.1". + -a, --agent MODULE:VAR Custom Agent to use, in format "module:variable", + e.g. "mymodule.submodule:my_agent" + -l, --list-models List all available models and exit + -t, --code-theme THEME Which colors to use for code, can be "dark", "light" + or any theme from pygments.org/styles/. Defaults to + "dark" which works well on dark terminals. + [default: dark] + --no-stream Disable streaming from the model + --version Show version and exit + -h, --help Show this message and exit. ``` diff --git a/pydantic_ai_slim/pydantic_ai/_cli.py b/pydantic_ai_slim/pydantic_ai/_cli.py index 95fcf8b520..6c260357f8 100644 --- a/pydantic_ai_slim/pydantic_ai/_cli.py +++ b/pydantic_ai_slim/pydantic_ai/_cli.py @@ -1,6 +1,5 @@ from __future__ import annotations as _annotations -import argparse import asyncio import importlib import os @@ -10,8 +9,9 @@ from contextlib import ExitStack from datetime import datetime, timezone from pathlib import Path -from typing import Any, cast +from typing import Any +import click from typing_inspection.introspection import get_literal_values from . import __version__ @@ -23,7 +23,6 @@ from .output import OutputDataT try: - import argcomplete import pyperclip from prompt_toolkit import PromptSession from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion @@ -39,7 +38,7 @@ from rich.text import Text except ImportError as _import_error: raise ImportError( - 'Please install `rich`, `prompt-toolkit`, `pyperclip` and `argcomplete` to use the Pydantic AI CLI, ' + 'Please install `rich`, `prompt-toolkit`, and `pyperclip` to use the Pydantic AI CLI, ' 'you can use the `cli` optional group — `pip install "pydantic-ai-slim[cli]"`' ) from _import_error @@ -105,119 +104,152 @@ def cli_exit(prog_name: str = 'pai'): # pragma: no cover def cli( # noqa: C901 args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-4.1' ) -> int: - """Run the CLI and return the exit code for the process.""" - parser = argparse.ArgumentParser( - prog=prog_name, - description=f"""\ -Pydantic AI CLI v{__version__}\n\n - -Special prompts: -* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work) -* `/markdown` - show the last markdown output of the last question -* `/multiline` - toggle multiline mode -* `/cp` - copy the last response to clipboard -""", - formatter_class=argparse.RawTextHelpFormatter, + """Run the CLI and return the exit code for the process. + + Uses Click for parsing, while preserving the previous API: + - Raises SystemExit on `--help` to satisfy the README hook test. + - Returns an int exit code for other invocations. + """ + + @click.command( + context_settings={ + 'help_option_names': ['-h', '--help'], + }, + help=( + f'Pydantic AI CLI v{__version__}\n\n' + 'Special prompts:\n' + '* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)\n' + '* `/markdown` - show the last markdown output of the last question\n' + '* `/multiline` - toggle multiline mode\n' + '* `/cp` - copy the last response to clipboard\n' + ), ) - parser.add_argument('prompt', nargs='?', help='AI Prompt, if omitted fall into interactive mode') - arg = parser.add_argument( + @click.argument('prompt', required=False) + @click.option( '-m', '--model', - nargs='?', - help=f'Model to use, in format ":" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "{default_model}".', + metavar='MODEL', + help=( + f'Model to use, in format ":" e.g. "openai:gpt-4.1" or ' + f'"anthropic:claude-sonnet-4-0". Defaults to "{default_model}".' + ), ) - # we don't want to autocomplete or list models that don't include the provider, - # e.g. we want to show `openai:gpt-4o` but not `gpt-4o` - qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n] - arg.completer = argcomplete.ChoicesCompleter(qualified_model_names) # type: ignore[reportPrivateUsage] - parser.add_argument( + @click.option( '-a', '--agent', - help='Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"', - ) - parser.add_argument( - '-l', - '--list-models', - action='store_true', - help='List all available models and exit', + metavar='MODULE:VAR', + help=('Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"'), ) - parser.add_argument( + @click.option('-l', '--list-models', is_flag=True, help='List all available models and exit') + @click.option( '-t', '--code-theme', - nargs='?', - help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.', default='dark', + metavar='THEME', + help=( + 'Which colors to use for code, can be "dark", "light" or any theme from ' + 'pygments.org/styles/. Defaults to "dark" which works well on dark terminals.' + ), + show_default=True, ) - parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model') - parser.add_argument('--version', action='store_true', help='Show version and exit') - - argcomplete.autocomplete(parser) - args = parser.parse_args(args_list) - - console = Console() - name_version = f'[green]{prog_name} - Pydantic AI CLI v{__version__}[/green]' - if args.version: - console.print(name_version, highlight=False) - return 0 - if args.list_models: - console.print(f'{name_version}\n\n[green]Available models:[/green]') - for model in qualified_model_names: - console.print(f' {model}', highlight=False) - return 0 + @click.option('--no-stream', is_flag=True, help='Disable streaming from the model') + @click.option('--version', is_flag=True, help='Show version and exit') + def _click_main( # noqa: C901 + prompt: str | None, + model: str | None, + agent: str | None, + list_models: bool, + code_theme: str, + no_stream: bool, + version: bool, + ) -> int | None: + """Command body (invoked by Click).""" + # we don't want to autocomplete or list models that don't include the provider, + # e.g. we want to show `openai:gpt-4o` but not `gpt-4o` + qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n] + + console = Console() + name_version = f'[green]{prog_name} - Pydantic AI CLI v{__version__}[/green]' + if version: + console.print(name_version, highlight=False) + return 0 + if list_models: + console.print(f'{name_version}\n\n[green]Available models:[/green]') + for m in qualified_model_names: + console.print(f' {m}', highlight=False) + return 0 - agent: Agent[None, str] = cli_agent - if args.agent: - sys.path.append(os.getcwd()) - try: - module_path, variable_name = args.agent.split(':') - except ValueError: - console.print('[red]Error: Agent must be specified in "module:variable" format[/red]') - return 1 - - module = importlib.import_module(module_path) - agent = getattr(module, variable_name) - if not isinstance(agent, Agent): - console.print(f'[red]Error: {args.agent} is not an Agent instance[/red]') - return 1 - - model_arg_set = args.model is not None - if agent.model is None or model_arg_set: - try: - agent.model = infer_model(args.model or default_model) - except UserError as e: - console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]') - return 1 - - model_name = agent.model if isinstance(agent.model, str) else f'{agent.model.system}:{agent.model.model_name}' - if args.agent and model_arg_set: - console.print( - f'{name_version} using custom agent [magenta]{args.agent}[/magenta] with [magenta]{model_name}[/magenta]', - highlight=False, + agent_obj: Agent[None, str] = cli_agent + if agent: + sys.path.append(os.getcwd()) + try: + module_path, variable_name = agent.split(':') + except ValueError: + console.print('[red]Error: Agent must be specified in "module:variable" format[/red]') + raise click.exceptions.Exit(1) + + module = importlib.import_module(module_path) + agent_obj = getattr(module, variable_name) + if not isinstance(agent_obj, Agent): + console.print(f'[red]Error: {agent} is not an Agent instance[/red]') + raise click.exceptions.Exit(1) + + model_arg_set = model is not None + if agent_obj.model is None or model_arg_set: + try: + agent_obj.model = infer_model(model or default_model) + except UserError as e: + console.print(f'Error initializing [magenta]{model}[/magenta]:\n[red]{e}[/red]') + raise click.exceptions.Exit(1) + + model_name = ( + agent_obj.model + if isinstance(agent_obj.model, str) + else f'{agent_obj.model.system}:{agent_obj.model.model_name}' ) - elif args.agent: - console.print(f'{name_version} using custom agent [magenta]{args.agent}[/magenta]', highlight=False) - else: - console.print(f'{name_version} with [magenta]{model_name}[/magenta]', highlight=False) + if agent and model_arg_set: + console.print( + f'{name_version} using custom agent [magenta]{agent}[/magenta] with [magenta]{model_name}[/magenta]', + highlight=False, + ) + elif agent: + console.print(f'{name_version} using custom agent [magenta]{agent}[/magenta]', highlight=False) + else: + console.print(f'{name_version} with [magenta]{model_name}[/magenta]', highlight=False) - stream = not args.no_stream - if args.code_theme == 'light': - code_theme = 'default' - elif args.code_theme == 'dark': - code_theme = 'monokai' - else: - code_theme = args.code_theme # pragma: no cover + stream = not no_stream + if code_theme == 'light': + code_theme_name = 'default' + elif code_theme == 'dark': + code_theme_name = 'monokai' + else: + code_theme_name = code_theme # pragma: no cover + + if prompt: + try: + asyncio.run(ask_agent(agent_obj, prompt, stream, console, code_theme_name)) + except KeyboardInterrupt: + pass + return 0 - if prompt := cast(str, args.prompt): try: - asyncio.run(ask_agent(agent, prompt, stream, console, code_theme)) - except KeyboardInterrupt: - pass + return asyncio.run(run_chat(stream, agent_obj, console, code_theme_name, prog_name)) + except KeyboardInterrupt: # pragma: no cover + return 0 + + args = list(args_list or []) + if any(a in ('-h', '--help') for a in args): # pragma: no cover - exercised via hook + _click_main.main(args=args, prog_name=prog_name, standalone_mode=True) + # should not get here return 0 try: - return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name)) - except KeyboardInterrupt: # pragma: no cover - return 0 + _click_main.main(args=args, prog_name=prog_name, standalone_mode=True) + except SystemExit as e: + code = e.code + if isinstance(code, int): + return code + return 0 if code is None else 1 # pragma: no cover async def run_chat( diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index de6e164d4b..37d3122817 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -85,6 +85,7 @@ cli = [ "prompt-toolkit>=3", "argcomplete>=3.5.0", "pyperclip>=1.9.0", + "click>=8.3.0", ] # MCP mcp = ["mcp>=1.12.3"]