Skip to content

Use click in CLI #2421

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 10 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 17 additions & 20 deletions clai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,27 +53,24 @@ Either way, running `clai` will start an interactive session where you can chat
## Help

```
usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt]
Usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream]
[--version] [PROMPT]

Pydantic AI CLI v...

Special prompts:
* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
* `/markdown` - show the last markdown output of the last question
* `/multiline` - toggle multiline mode

positional arguments:
prompt AI Prompt, if omitted fall into interactive mode

options:
-h, --help show this help message and exit
-m [MODEL], --model [MODEL]
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1".
-a AGENT, --agent AGENT
Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
-l, --list-models List all available models and exit
-t [CODE_THEME], --code-theme [CODE_THEME]
Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.
--no-stream Disable streaming from the model
--version Show version and exit
prompt AI Prompt, if omitted fall into interactive mode

Options:
-m, --model TEXT Model to use, in format "<provider>:<model>" e.g.
"openai:gpt-4.1" or "anthropic:claude-sonnet-4-0".
Defaults to "openai:gpt-4.1".
-a, --agent TEXT Custom Agent to use, in format "module:variable",
e.g. "mymodule.submodule:my_agent"
-l, --list-models List all available models and exit
-t, --code-theme TEXT Which colors to use for code, can be "dark", "light"
or any theme from pygments.org/styles/. Defaults to
"dark" which works well on dark terminals.
--no-stream Disable streaming from the model
--version Show version and exit
-h, --help Show this message and exit.
```
3 changes: 3 additions & 0 deletions clai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ requires-python = ">=3.9"
[tool.hatch.metadata.hooks.uv-dynamic-versioning]
dependencies = [
"pydantic-ai=={{ version }}",
"rich>=13",
"prompt-toolkit>=3",
"click>=8.0.0",
]

[tool.hatch.metadata]
Expand Down
6 changes: 6 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

211 changes: 147 additions & 64 deletions pydantic_ai_slim/pydantic_ai/_cli.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from __future__ import annotations as _annotations

import argparse
import asyncio
import importlib
import os
Expand All @@ -10,7 +9,7 @@
from contextlib import ExitStack
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, cast
from typing import Any

from typing_inspection.introspection import get_literal_values

Expand All @@ -23,7 +22,8 @@
from .output import OutputDataT

try:
import argcomplete
import click
from click import HelpFormatter
from prompt_toolkit import PromptSession
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion
from prompt_toolkit.buffer import Buffer
Expand All @@ -38,7 +38,7 @@
from rich.text import Text
except ImportError as _import_error:
raise ImportError(
'Please install `rich`, `prompt-toolkit` and `argcomplete` to use the Pydantic AI CLI, '
'Please install `rich`, `prompt-toolkit` and `click` to use the Pydantic AI CLI, '
'you can use the `cli` optional group — `pip install "pydantic-ai-slim[cli]"`'
) from _import_error

Expand All @@ -64,7 +64,13 @@ class SimpleCodeBlock(CodeBlock):
def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
code = str(self.text).rstrip()
yield Text(self.lexer_name, style='dim')
yield Syntax(code, self.lexer_name, theme=self.theme, background_color='default', word_wrap=True)
yield Syntax(
code,
self.lexer_name,
theme=self.theme,
background_color='default',
word_wrap=True,
)
yield Text(f'/{self.lexer_name}', style='dim')


Expand All @@ -82,6 +88,25 @@ def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderR
)


class ArgparseStyleCommand(click.Command):
"""Command subclass that keeps the argparse-style output."""

def get_usage(self, ctx: click.Context) -> str:
return '[-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [PROMPT]'

def format_usage(self, ctx: click.Context, formatter: HelpFormatter) -> None:
formatter.write_usage(ctx.command_path, self.get_usage(ctx))

def format_help(self, ctx: click.Context, formatter: HelpFormatter) -> None:
self.format_usage(ctx, formatter)
formatter.write_paragraph()

with formatter.section('positional arguments'):
formatter.write_dl([('prompt', 'AI Prompt, if omitted fall into interactive mode')])

self.format_options(ctx, formatter)


cli_agent = Agent()


Expand All @@ -101,119 +126,173 @@ def cli_exit(prog_name: str = 'pai'): # pragma: no cover
sys.exit(cli(prog_name=prog_name))


def cli( # noqa: C901
args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-4.1'
def cli(
args_list: Sequence[str] | None = None,
*,
prog_name: str = 'pai',
default_model: str = 'openai:gpt-4.1',
) -> int:
"""Run the CLI and return the exit code for the process."""
parser = argparse.ArgumentParser(
prog=prog_name,
description=f"""\
Pydantic AI CLI v{__version__}\n\n

Special prompts:
* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
* `/markdown` - show the last markdown output of the last question
* `/multiline` - toggle multiline mode
""",
formatter_class=argparse.RawTextHelpFormatter,

# Create click command for parsing
@click.command(
cls=ArgparseStyleCommand,
context_settings={'help_option_names': ['-h', '--help']},
)
parser.add_argument('prompt', nargs='?', help='AI Prompt, if omitted fall into interactive mode')
arg = parser.add_argument(
@click.argument('prompt', required=False)
@click.option(
'-m',
'--model',
nargs='?',
shell_complete=get_literal_values(KnownModelName.__value__),
help=f'Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "{default_model}".',
)
# we don't want to autocomplete or list models that don't include the provider,
# e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n]
arg.completer = argcomplete.ChoicesCompleter(qualified_model_names) # type: ignore[reportPrivateUsage]
parser.add_argument(
@click.option(
'-a',
'--agent',
help='Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"',
)
parser.add_argument(
@click.option(
'-l',
'--list-models',
action='store_true',
is_flag=True,
help='List all available models and exit',
)
parser.add_argument(
@click.option(
'-t',
'--code-theme',
nargs='?',
help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.',
default='dark',
help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.',
)
parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model')
parser.add_argument('--version', action='store_true', help='Show version and exit')
@click.option('--no-stream', is_flag=True, help='Disable streaming from the model')
@click.option('--version', is_flag=True, help='Show version and exit')
def click_cli(
prompt: str | None,
model: str | None,
agent: str | None,
list_models: bool,
code_theme: str,
no_stream: bool,
version: bool,
) -> int:
f"""Pydantic AI CLI v{__version__}

Special prompts:
* /exit - exit the interactive mode (ctrl-c and ctrl-d also work)
* /markdown - show the last markdown output of the last question
* /multiline - toggle multiline mode
"""
return _cli_impl(
prompt=prompt,
model=model,
agent=agent,
list_models=list_models,
code_theme=code_theme,
no_stream=no_stream,
version=version,
prog_name=prog_name,
default_model=default_model,
)

argcomplete.autocomplete(parser)
args = parser.parse_args(args_list)
# Detect if the user explicitly asked for help (should mimic argparse behaviour)
help_requested = args_list and any(arg in ('--help', '-h') for arg in args_list)

try:
result = click_cli.main(args_list, standalone_mode=False, prog_name=prog_name)

if help_requested:
raise SystemExit(0)

return result if result is not None else 0
except click.ClickException as e:
e.show() # pragma: no cover
return 1 # pragma: no cover


def _cli_impl( # noqa: C901
prompt: str | None,
model: str | None,
agent: str | None,
list_models: bool,
code_theme: str,
no_stream: bool,
version: bool,
prog_name: str,
default_model: str,
) -> int:
"""Implementation of CLI logic, separated from click decorators."""
# we don't want to autocomplete or list models that don't include the provider,
# e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n]

console = Console()
name_version = f'[green]{prog_name} - Pydantic AI CLI v{__version__}[/green]'
if args.version:
if version:
console.print(name_version, highlight=False)
return 0
if args.list_models:
if list_models:
console.print(f'{name_version}\n\n[green]Available models:[/green]')
for model in qualified_model_names:
console.print(f' {model}', highlight=False)
for model_name in qualified_model_names:
console.print(f' {model_name}', highlight=False)
return 0

agent: Agent[None, str] = cli_agent
if args.agent:
agent_instance: Agent[None, str] = cli_agent
if agent:
sys.path.append(os.getcwd())
try:
module_path, variable_name = args.agent.split(':')
module_path, variable_name = agent.split(':')
except ValueError:
console.print('[red]Error: Agent must be specified in "module:variable" format[/red]')
return 1

module = importlib.import_module(module_path)
agent = getattr(module, variable_name)
if not isinstance(agent, Agent):
console.print(f'[red]Error: {args.agent} is not an Agent instance[/red]')
agent_instance = getattr(module, variable_name)
if not isinstance(agent_instance, Agent):
console.print(f'[red]Error: {agent} is not an Agent instance[/red]')
return 1

model_arg_set = args.model is not None
if agent.model is None or model_arg_set:
model_arg_set = model is not None
if agent_instance.model is None or model_arg_set:
try:
agent.model = infer_model(args.model or default_model)
agent_instance.model = infer_model(model or default_model)
except UserError as e:
console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]')
console.print(f'Error initializing [magenta]{model}[/magenta]:\n[red]{e}[/red]')
return 1

model_name = agent.model if isinstance(agent.model, str) else f'{agent.model.system}:{agent.model.model_name}'
if args.agent and model_arg_set:
model_name = (
agent_instance.model
if isinstance(agent_instance.model, str)
else f'{agent_instance.model.system}:{agent_instance.model.model_name}'
)
if agent and model_arg_set:
console.print(
f'{name_version} using custom agent [magenta]{agent}[/magenta] with [magenta]{model_name}[/magenta]',
highlight=False,
)
elif agent:
console.print(
f'{name_version} using custom agent [magenta]{args.agent}[/magenta] with [magenta]{model_name}[/magenta]',
f'{name_version} using custom agent [magenta]{agent}[/magenta]',
highlight=False,
)
elif args.agent:
console.print(f'{name_version} using custom agent [magenta]{args.agent}[/magenta]', highlight=False)
else:
console.print(f'{name_version} with [magenta]{model_name}[/magenta]', highlight=False)

stream = not args.no_stream
if args.code_theme == 'light':
code_theme = 'default'
elif args.code_theme == 'dark':
code_theme = 'monokai'
stream = not no_stream
if code_theme == 'light':
theme = 'default'
elif code_theme == 'dark':
theme = 'monokai'
else:
code_theme = args.code_theme # pragma: no cover
theme = code_theme # pragma: no cover

if prompt := cast(str, args.prompt):
if prompt:
try:
asyncio.run(ask_agent(agent, prompt, stream, console, code_theme))
asyncio.run(ask_agent(agent_instance, prompt, stream, console, theme))
except KeyboardInterrupt:
pass
return 0

try:
return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name))
return asyncio.run(run_chat(stream, agent_instance, console, theme, prog_name))
except KeyboardInterrupt: # pragma: no cover
return 0

Expand Down Expand Up @@ -314,7 +393,11 @@ def get_suggestion(self, buffer: Buffer, document: Document) -> Suggestion | Non


def handle_slash_command(
ident_prompt: str, messages: list[ModelMessage], multiline: bool, console: Console, code_theme: str
ident_prompt: str,
messages: list[ModelMessage],
multiline: bool,
console: Console,
code_theme: str,
) -> tuple[int | None, bool]:
if ident_prompt == '/markdown':
try:
Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ huggingface = ["huggingface-hub[inference]>=0.33.5"]
duckduckgo = ["ddgs>=9.0.0"]
tavily = ["tavily-python>=0.5.0"]
# CLI
cli = ["rich>=13", "prompt-toolkit>=3", "argcomplete>=3.5.0"]
cli = ["rich>=13", "prompt-toolkit>=3", "click>=8.0.0"]
# MCP
mcp = ["mcp>=1.10.0; python_version >= '3.10'"]
# Evals
Expand Down
Loading