Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions pydantic_ai_slim/pydantic_ai/_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from ._run_context import AgentDepsT
from .agent import AbstractAgent, Agent
from .exceptions import UserError
from .messages import ModelMessage, ModelResponse
from .messages import FunctionToolCallEvent, FunctionToolResultEvent, ModelMessage, ModelResponse
from .models import KnownModelName, infer_model
from .output import OutputDataT

Expand Down Expand Up @@ -229,6 +229,7 @@ async def run_chat(
config_dir: Path | None = None,
deps: AgentDepsT = None,
message_history: list[ModelMessage] | None = None,
show_tool_calls: bool = False,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed with @Kludex -- we can always show tool calls, and remove the flag.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok nice, just wasn't sure if some people maybe don't want them. I removed the option.

) -> int:
prompt_history_path = (config_dir or PYDANTIC_AI_HOME) / PROMPT_HISTORY_FILENAME
prompt_history_path.parent.mkdir(parents=True, exist_ok=True)
Expand All @@ -255,7 +256,7 @@ async def run_chat(
return exit_value
else:
try:
messages = await ask_agent(agent, text, stream, console, code_theme, deps, messages)
messages = await ask_agent(agent, text, stream, console, code_theme, deps, messages, show_tool_calls)
except CancelledError: # pragma: no cover
console.print('[dim]Interrupted[/dim]')
except Exception as e: # pragma: no cover
Expand All @@ -273,6 +274,7 @@ async def ask_agent(
code_theme: str,
deps: AgentDepsT = None,
messages: list[ModelMessage] | None = None,
show_tool_calls: bool = False,
) -> list[ModelMessage]:
status = Status('[dim]Working on it…[/dim]', console=console)

Expand All @@ -294,6 +296,17 @@ async def ask_agent(

async for content in handle_stream.stream_output(debounce_by=None):
live.update(Markdown(str(content), code_theme=code_theme))
elif show_tool_calls and Agent.is_call_tools_node(node):
async with node.stream(agent_run.ctx) as handle_stream:
async for event in handle_stream:
if isinstance(event, FunctionToolCallEvent):
console.print(
Markdown(f'[Tool] {event.part.tool_name!r} called with args={event.part.args}')
)
elif isinstance(event, FunctionToolResultEvent):
console.print(
Markdown(f'[Tool] {event.result.tool_name!r} returned => {event.result.content}')
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you show a screenshot of what this looks like please?

I wonder if we can get some prettier styling from Rich as well.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agree, we probably can. I updated it today a bit, so the status update works correctly and shows when a model is called and when tools are exectuted. Currently it looks like this:

pydantic_ai_model_call

I could involve colors or rich Panels. If you have any preference, please tell me :)

I wasn't sure how to manage long tool results (e.g. whole document parts from retrieval tools) or long tool calls. I just truncated it for now, because it feels cluttered otherwise. On the other hand, this feature (.to_cli()) feels more like just a debugging tool for dev's, which might prefer to just have all the output instead of nice visuals (assuming they are still readable).
I think other cli's manages it by having a keybind for collapsing and expaning long content sections, so that might be an option at some point, although more complex.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah it looks a bit too much like logging now, rather than a UI. People definitely use this as a full blown LLM chat client, not just a debugging tool. So having this optional is good, but I'd also want a way to have it display more like in chat apps, showing that the LLM is doing work more than dumping the entire payload. Perhaps we can start with a verbosity flag, and then add keybinding at some future point?

)

assert agent_run.result is not None
return agent_run.result.all_messages()
Expand Down
9 changes: 8 additions & 1 deletion pydantic_ai_slim/pydantic_ai/agent/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -1113,13 +1113,15 @@ async def to_cli(
deps: AgentDepsT = None,
prog_name: str = 'pydantic-ai',
message_history: list[_messages.ModelMessage] | None = None,
show_tool_calls: bool = False,
) -> None:
"""Run the agent in a CLI chat interface.

Args:
deps: The dependencies to pass to the agent.
prog_name: The name of the program to use for the CLI. Defaults to 'pydantic-ai'.
message_history: History of the conversation so far.
show_tool_calls: Whether to show tool calls in the CLI.

Example:
```python {title="agent_to_cli.py" test="skip"}
Expand All @@ -1143,20 +1145,23 @@ async def main():
code_theme='monokai',
prog_name=prog_name,
message_history=message_history,
show_tool_calls=show_tool_calls,
)

def to_cli_sync(
self: Self,
deps: AgentDepsT = None,
prog_name: str = 'pydantic-ai',
message_history: list[_messages.ModelMessage] | None = None,
show_tool_calls: bool = False,
) -> None:
"""Run the agent in a CLI chat interface with the non-async interface.

Args:
deps: The dependencies to pass to the agent.
prog_name: The name of the program to use for the CLI. Defaults to 'pydantic-ai'.
message_history: History of the conversation so far.
show_tool_calls: Whether to show tool calls in the CLI.

```python {title="agent_to_cli_sync.py" test="skip"}
from pydantic_ai import Agent
Expand All @@ -1167,5 +1172,7 @@ def to_cli_sync(
```
"""
return get_event_loop().run_until_complete(
self.to_cli(deps=deps, prog_name=prog_name, message_history=message_history)
self.to_cli(
deps=deps, prog_name=prog_name, message_history=message_history, show_tool_calls=show_tool_calls
)
)
4 changes: 4 additions & 0 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,7 @@ def test_agent_to_cli_sync(mocker: MockerFixture, env: TestEnv):
prog_name='pydantic-ai',
deps=None,
message_history=None,
show_tool_calls=False,
)


Expand All @@ -309,6 +310,7 @@ async def test_agent_to_cli_async(mocker: MockerFixture, env: TestEnv):
prog_name='pydantic-ai',
deps=None,
message_history=None,
show_tool_calls=False,
)


Expand All @@ -329,6 +331,7 @@ async def test_agent_to_cli_with_message_history(mocker: MockerFixture, env: Tes
prog_name='pydantic-ai',
deps=None,
message_history=test_messages,
show_tool_calls=False,
)


Expand All @@ -348,4 +351,5 @@ def test_agent_to_cli_sync_with_message_history(mocker: MockerFixture, env: Test
prog_name='pydantic-ai',
deps=None,
message_history=test_messages,
show_tool_calls=False,
)
Loading