Skip to content

Commit 428a11b

Browse files
committed
Backwards compatiable refactor + make sure tests pass
1 parent 4e60e9d commit 428a11b

File tree

6 files changed

+2867
-2294
lines changed

6 files changed

+2867
-2294
lines changed

clai/README.md

Lines changed: 15 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -54,28 +54,19 @@ Either way, running `clai` will start an interactive session where you can chat
5454
## Help
5555

5656
```
57-
usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt]
58-
59-
Pydantic AI CLI v...
60-
61-
Special prompts:
62-
* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
63-
* `/markdown` - show the last markdown output of the last question
64-
* `/multiline` - toggle multiline mode
65-
* `/cp` - copy the last response to clipboard
66-
67-
positional arguments:
68-
prompt AI Prompt, if omitted fall into interactive mode
69-
70-
options:
71-
-h, --help show this help message and exit
72-
-m [MODEL], --model [MODEL]
73-
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1".
74-
-a AGENT, --agent AGENT
75-
Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
76-
-l, --list-models List all available models and exit
77-
-t [CODE_THEME], --code-theme [CODE_THEME]
78-
Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.
79-
--no-stream Disable streaming from the model
80-
--version Show version and exit
57+
Usage: clai [OPTIONS] [PROMPT]
58+
59+
Options:
60+
-m, --model TEXT Model to use, in format "<provider>:<model>" e.g.
61+
"openai:gpt-4.1" or "anthropic:claude-sonnet-4-0".
62+
Defaults to "openai:gpt-4.1".
63+
-a, --agent TEXT Custom Agent to use, in format "module:variable",
64+
e.g. "mymodule.submodule:my_agent"
65+
-l, --list-models List all available models and exit
66+
-t, --code-theme TEXT Which colors to use for code, can be "dark", "light"
67+
or any theme from pygments.org/styles/. Defaults to
68+
"dark" which works well on dark terminals.
69+
--no-stream Disable streaming from the model
70+
--version Show version and exit
71+
-h, --help Show this message and exit.
8172
```

clai/pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,9 @@ requires-python = ">=3.10"
4848
[tool.hatch.metadata.hooks.uv-dynamic-versioning]
4949
dependencies = [
5050
"pydantic-ai=={{ version }}",
51+
"rich>=13",
52+
"prompt-toolkit>=3",
53+
"click>=8.0.0",
5154
]
5255

5356
[tool.hatch.metadata]

package-lock.json

Lines changed: 6 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pydantic_ai_slim/pydantic_ai/_cli.py

Lines changed: 126 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
from __future__ import annotations as _annotations
22

3-
import argparse
43
import asyncio
54
import importlib
65
import os
@@ -10,7 +9,7 @@
109
from contextlib import ExitStack
1110
from datetime import datetime, timezone
1211
from pathlib import Path
13-
from typing import Any, cast
12+
from typing import Any
1413

1514
from typing_inspection.introspection import get_literal_values
1615

@@ -23,8 +22,7 @@
2322
from .output import OutputDataT
2423

2524
try:
26-
import argcomplete
27-
import pyperclip
25+
import click
2826
from prompt_toolkit import PromptSession
2927
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion
3028
from prompt_toolkit.buffer import Buffer
@@ -39,7 +37,7 @@
3937
from rich.text import Text
4038
except ImportError as _import_error:
4139
raise ImportError(
42-
'Please install `rich`, `prompt-toolkit`, `pyperclip` and `argcomplete` to use the Pydantic AI CLI, '
40+
'Please install `rich`, `prompt-toolkit` and `click` to use the Pydantic AI CLI, '
4341
'you can use the `cli` optional group — `pip install "pydantic-ai-slim[cli]"`'
4442
) from _import_error
4543

@@ -65,7 +63,13 @@ class SimpleCodeBlock(CodeBlock):
6563
def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
6664
code = str(self.text).rstrip()
6765
yield Text(self.lexer_name, style='dim')
68-
yield Syntax(code, self.lexer_name, theme=self.theme, background_color='default', word_wrap=True)
66+
yield Syntax(
67+
code,
68+
self.lexer_name,
69+
theme=self.theme,
70+
background_color='default',
71+
word_wrap=True,
72+
)
6973
yield Text(f'/{self.lexer_name}', style='dim')
7074

7175

@@ -102,120 +106,171 @@ def cli_exit(prog_name: str = 'pai'): # pragma: no cover
102106
sys.exit(cli(prog_name=prog_name))
103107

104108

105-
def cli( # noqa: C901
106-
args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-4.1'
109+
def cli(
110+
args_list: Sequence[str] | None = None,
111+
*,
112+
prog_name: str = 'pai',
113+
default_model: str = 'openai:gpt-4.1',
107114
) -> int:
108115
"""Run the CLI and return the exit code for the process."""
109-
parser = argparse.ArgumentParser(
110-
prog=prog_name,
111-
description=f"""\
112-
Pydantic AI CLI v{__version__}\n\n
113-
114-
Special prompts:
115-
* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
116-
* `/markdown` - show the last markdown output of the last question
117-
* `/multiline` - toggle multiline mode
118-
* `/cp` - copy the last response to clipboard
119-
""",
120-
formatter_class=argparse.RawTextHelpFormatter,
121-
)
122-
parser.add_argument('prompt', nargs='?', help='AI Prompt, if omitted fall into interactive mode')
123-
arg = parser.add_argument(
116+
117+
# Create click command for parsing
118+
@click.command(context_settings={'help_option_names': ['-h', '--help']})
119+
@click.argument('prompt', required=False)
120+
@click.option(
124121
'-m',
125122
'--model',
126-
nargs='?',
127123
help=f'Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "{default_model}".',
128124
)
129-
# we don't want to autocomplete or list models that don't include the provider,
130-
# e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
131-
qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n]
132-
arg.completer = argcomplete.ChoicesCompleter(qualified_model_names) # type: ignore[reportPrivateUsage]
133-
parser.add_argument(
125+
@click.option(
134126
'-a',
135127
'--agent',
136128
help='Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"',
137129
)
138-
parser.add_argument(
130+
@click.option(
139131
'-l',
140132
'--list-models',
141-
action='store_true',
133+
is_flag=True,
142134
help='List all available models and exit',
143135
)
144-
parser.add_argument(
136+
@click.option(
145137
'-t',
146138
'--code-theme',
147-
nargs='?',
148-
help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.',
149139
default='dark',
140+
help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.',
150141
)
151-
parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model')
152-
parser.add_argument('--version', action='store_true', help='Show version and exit')
142+
@click.option('--no-stream', is_flag=True, help='Disable streaming from the model')
143+
@click.option('--version', is_flag=True, help='Show version and exit')
144+
def click_cli(
145+
prompt: str | None,
146+
model: str | None,
147+
agent: str | None,
148+
list_models: bool,
149+
code_theme: str,
150+
no_stream: bool,
151+
version: bool,
152+
) -> int:
153+
f"""Pydantic AI CLI v{__version__}
154+
155+
Special prompts:
156+
* /exit - exit the interactive mode (ctrl-c and ctrl-d also work)
157+
* /markdown - show the last markdown output of the last question
158+
* /multiline - toggle multiline mode
159+
"""
160+
return _cli_impl(
161+
prompt=prompt,
162+
model=model,
163+
agent=agent,
164+
list_models=list_models,
165+
code_theme=code_theme,
166+
no_stream=no_stream,
167+
version=version,
168+
prog_name=prog_name,
169+
default_model=default_model,
170+
)
153171

154-
argcomplete.autocomplete(parser)
155-
args = parser.parse_args(args_list)
172+
# Check if this is a help or version request that should raise SystemExit
173+
should_exit = args_list and any(arg in ['--help', '-h', '--version'] for arg in args_list)
174+
175+
# Invoke click command with appropriate mode
176+
try:
177+
if should_exit:
178+
# Use standalone_mode=True for --help/--version to get SystemExit behavior
179+
click_cli.main(args_list, standalone_mode=True, prog_name=prog_name)
180+
else:
181+
# Use standalone_mode=False for normal operations
182+
result = click_cli.main(args_list, standalone_mode=False, prog_name=prog_name)
183+
return result if result is not None else 0
184+
except click.ClickException as e:
185+
e.show()
186+
return 1
187+
188+
189+
def _cli_impl( # noqa: C901
190+
prompt: str | None,
191+
model: str | None,
192+
agent: str | None,
193+
list_models: bool,
194+
code_theme: str,
195+
no_stream: bool,
196+
version: bool,
197+
prog_name: str,
198+
default_model: str,
199+
) -> int:
200+
"""Implementation of CLI logic, separated from click decorators."""
201+
# we don't want to autocomplete or list models that don't include the provider,
202+
# e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
203+
qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n]
156204

157205
console = Console()
158206
name_version = f'[green]{prog_name} - Pydantic AI CLI v{__version__}[/green]'
159-
if args.version:
207+
if version:
160208
console.print(name_version, highlight=False)
161209
return 0
162-
if args.list_models:
210+
if list_models:
163211
console.print(f'{name_version}\n\n[green]Available models:[/green]')
164-
for model in qualified_model_names:
165-
console.print(f' {model}', highlight=False)
212+
for model_name in qualified_model_names:
213+
console.print(f' {model_name}', highlight=False)
166214
return 0
167215

168-
agent: Agent[None, str] = cli_agent
169-
if args.agent:
216+
agent_instance: Agent[None, str] = cli_agent
217+
if agent:
170218
sys.path.append(os.getcwd())
171219
try:
172-
module_path, variable_name = args.agent.split(':')
220+
module_path, variable_name = agent.split(':')
173221
except ValueError:
174222
console.print('[red]Error: Agent must be specified in "module:variable" format[/red]')
175223
return 1
176224

177225
module = importlib.import_module(module_path)
178-
agent = getattr(module, variable_name)
179-
if not isinstance(agent, Agent):
180-
console.print(f'[red]Error: {args.agent} is not an Agent instance[/red]')
226+
agent_instance = getattr(module, variable_name)
227+
if not isinstance(agent_instance, Agent):
228+
console.print(f'[red]Error: {agent} is not an Agent instance[/red]')
181229
return 1
182230

183-
model_arg_set = args.model is not None
184-
if agent.model is None or model_arg_set:
231+
model_arg_set = model is not None
232+
if agent_instance.model is None or model_arg_set:
185233
try:
186-
agent.model = infer_model(args.model or default_model)
234+
agent_instance.model = infer_model(model or default_model)
187235
except UserError as e:
188-
console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]')
236+
console.print(f'Error initializing [magenta]{model}[/magenta]:\n[red]{e}[/red]')
189237
return 1
190238

191-
model_name = agent.model if isinstance(agent.model, str) else f'{agent.model.system}:{agent.model.model_name}'
192-
if args.agent and model_arg_set:
239+
model_name = (
240+
agent_instance.model
241+
if isinstance(agent_instance.model, str)
242+
else f'{agent_instance.model.system}:{agent_instance.model.model_name}'
243+
)
244+
if agent and model_arg_set:
245+
console.print(
246+
f'{name_version} using custom agent [magenta]{agent}[/magenta] with [magenta]{model_name}[/magenta]',
247+
highlight=False,
248+
)
249+
elif agent:
193250
console.print(
194-
f'{name_version} using custom agent [magenta]{args.agent}[/magenta] with [magenta]{model_name}[/magenta]',
251+
f'{name_version} using custom agent [magenta]{agent}[/magenta]',
195252
highlight=False,
196253
)
197-
elif args.agent:
198-
console.print(f'{name_version} using custom agent [magenta]{args.agent}[/magenta]', highlight=False)
199254
else:
200255
console.print(f'{name_version} with [magenta]{model_name}[/magenta]', highlight=False)
201256

202-
stream = not args.no_stream
203-
if args.code_theme == 'light':
204-
code_theme = 'default'
205-
elif args.code_theme == 'dark':
206-
code_theme = 'monokai'
257+
stream = not no_stream
258+
if code_theme == 'light':
259+
theme = 'default'
260+
elif code_theme == 'dark':
261+
theme = 'monokai'
207262
else:
208-
code_theme = args.code_theme # pragma: no cover
263+
theme = code_theme # pragma: no cover
209264

210-
if prompt := cast(str, args.prompt):
265+
if prompt:
211266
try:
212-
asyncio.run(ask_agent(agent, prompt, stream, console, code_theme))
267+
asyncio.run(ask_agent(agent_instance, prompt, stream, console, theme))
213268
except KeyboardInterrupt:
214269
pass
215270
return 0
216271

217272
try:
218-
return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name))
273+
return asyncio.run(run_chat(stream, agent_instance, console, theme, prog_name))
219274
except KeyboardInterrupt: # pragma: no cover
220275
return 0
221276

@@ -317,7 +372,11 @@ def get_suggestion(self, buffer: Buffer, document: Document) -> Suggestion | Non
317372

318373

319374
def handle_slash_command(
320-
ident_prompt: str, messages: list[ModelMessage], multiline: bool, console: Console, code_theme: str
375+
ident_prompt: str,
376+
messages: list[ModelMessage],
377+
multiline: bool,
378+
console: Console,
379+
code_theme: str,
321380
) -> tuple[int | None, bool]:
322381
if ident_prompt == '/markdown':
323382
try:

pydantic_ai_slim/pyproject.toml

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,7 @@ huggingface = ["huggingface-hub[inference]>=0.33.5"]
8080
duckduckgo = ["ddgs>=9.0.0"]
8181
tavily = ["tavily-python>=0.5.0"]
8282
# CLI
83-
cli = [
84-
"rich>=13",
85-
"prompt-toolkit>=3",
86-
"argcomplete>=3.5.0",
87-
"pyperclip>=1.9.0",
88-
]
83+
cli = ["rich>=13", "prompt-toolkit>=3", "click>=8.0.0"]
8984
# MCP
9085
mcp = ["mcp>=1.12.3"]
9186
# Evals

0 commit comments

Comments
 (0)