Skip to content

Commit 6db9293

Browse files
committed
Support custom conversation load/store paths and optional --continue
1 parent 6dbe144 commit 6db9293

File tree

2 files changed

+52
-35
lines changed

2 files changed

+52
-35
lines changed

clai/README.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ Either way, running `clai` will start an interactive session where you can chat
5454
## Help
5555

5656
```
57-
usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [-c] [--no-stream] [--version] [prompt]
57+
usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [-c [CONTINUE_]] [--store STORE] [--no-stream] [--version] [prompt]
5858
5959
Pydantic AI CLI v...
6060
@@ -76,7 +76,9 @@ options:
7676
-l, --list-models List all available models and exit
7777
-t [CODE_THEME], --code-theme [CODE_THEME]
7878
Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.
79-
-c, --continue Continue last conversation, if any, instead of starting a new one.
79+
-c [CONTINUE_], --continue [CONTINUE_]
80+
Continue last conversation, if any, instead of starting a new one.
81+
--store STORE Store the last conversation to the specified path instead of the default location.
8082
--no-stream Disable streaming from the model
8183
--version Show version and exit
8284
```

pydantic_ai_slim/pydantic_ai/_cli.py

Lines changed: 48 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -153,10 +153,17 @@ def cli( # noqa: C901
153153
parser.add_argument(
154154
'-c',
155155
'--continue',
156+
nargs='?',
156157
dest='continue_',
157-
action='store_true',
158+
const=str(PYDANTIC_AI_HOME / LAST_CONVERSATION_FILENAME),
159+
default=None,
158160
help='Continue last conversation, if any, instead of starting a new one.',
159161
)
162+
parser.add_argument(
163+
'--store',
164+
help='Store the last conversation to the specified path instead of the default location.',
165+
default=None,
166+
)
160167
parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model')
161168
parser.add_argument('--version', action='store_true', help='Show version and exit')
162169

@@ -216,40 +223,47 @@ def cli( # noqa: C901
216223
else:
217224
code_theme = args.code_theme # pragma: no cover
218225

226+
load_path: Path | None = None
227+
if args.continue_:
228+
load_path = Path(args.continue_)
229+
230+
store_path: Path = PYDANTIC_AI_HOME / LAST_CONVERSATION_FILENAME
231+
if args.store:
232+
store_path = Path(args.store)
233+
219234
try:
220-
history = load_last_conversation() if args.continue_ else None
235+
history = load_conversation(load_path) if load_path else None
221236
except ValidationError:
222237
console.print(
223-
'[red]Error loading last conversation, it is corrupted or invalid.\nStarting a new conversation.[/red]'
238+
'[red]Error loading conversation, it is corrupted or invalid.\nStarting a new conversation.[/red]'
224239
)
225240
history = None
226241

227242
if prompt := cast(str, args.prompt):
228243
try:
229-
asyncio.run(ask_agent(agent, prompt, stream, console, code_theme, messages=history))
244+
asyncio.run(ask_agent(agent, prompt, stream, console, code_theme, messages=history, store_path=store_path))
230245
except KeyboardInterrupt:
231246
pass
232247
return 0
233248

234249
try:
235-
return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name, message_history=history))
250+
return asyncio.run(
251+
run_chat(stream, agent, console, code_theme, prog_name, message_history=history, store_path=store_path)
252+
)
236253
except KeyboardInterrupt: # pragma: no cover
237254
return 0
238255

239256

240-
def store_last_conversation(messages: list[ModelMessage], config_dir: Path | None = None) -> None:
241-
last_conversation_path = (config_dir or PYDANTIC_AI_HOME) / LAST_CONVERSATION_FILENAME
242-
last_conversation_path.parent.mkdir(parents=True, exist_ok=True)
243-
last_conversation_path.write_bytes(ModelMessagesTypeAdapter.dump_json(messages))
244-
257+
def store_conversation(messages: list[ModelMessage], store_path: Path) -> None:
258+
store_path.parent.mkdir(parents=True, exist_ok=True)
259+
store_path.write_bytes(ModelMessagesTypeAdapter.dump_json(messages))
245260

246-
def load_last_conversation(config_dir: Path | None = None) -> list[ModelMessage] | None:
247-
last_conversation_path = (config_dir or PYDANTIC_AI_HOME) / LAST_CONVERSATION_FILENAME
248261

249-
if not last_conversation_path.exists():
262+
def load_conversation(load_path: Path) -> list[ModelMessage] | None:
263+
if not load_path.exists():
250264
return None
251265

252-
return ModelMessagesTypeAdapter.validate_json(last_conversation_path.read_bytes())
266+
return ModelMessagesTypeAdapter.validate_json(load_path.read_bytes())
253267

254268

255269
async def run_chat(
@@ -261,6 +275,7 @@ async def run_chat(
261275
config_dir: Path | None = None,
262276
deps: AgentDepsT = None,
263277
message_history: Sequence[ModelMessage] | None = None,
278+
store_path: Path | None = None,
264279
) -> int:
265280
prompt_history_path = (config_dir or PYDANTIC_AI_HOME) / PROMPT_HISTORY_FILENAME
266281
prompt_history_path.parent.mkdir(parents=True, exist_ok=True)
@@ -287,7 +302,7 @@ async def run_chat(
287302
return exit_value
288303
else:
289304
try:
290-
messages = await ask_agent(agent, text, stream, console, code_theme, deps, messages, config_dir)
305+
messages = await ask_agent(agent, text, stream, console, code_theme, deps, messages, store_path)
291306
except CancelledError: # pragma: no cover
292307
console.print('[dim]Interrupted[/dim]')
293308
except Exception as e: # pragma: no cover
@@ -305,7 +320,7 @@ async def ask_agent(
305320
code_theme: str,
306321
deps: AgentDepsT = None,
307322
messages: Sequence[ModelMessage] | None = None,
308-
config_dir: Path | None = None,
323+
store_path: Path | None = None,
309324
) -> list[ModelMessage]:
310325
status = Status('[dim]Working on it…[/dim]', console=console)
311326

@@ -314,28 +329,28 @@ async def ask_agent(
314329
result = await agent.run(prompt, message_history=messages, deps=deps)
315330
content = str(result.output)
316331
console.print(Markdown(content, code_theme=code_theme))
317-
result_messages = result.all_messages()
318-
store_last_conversation(result_messages, config_dir)
332+
else:
333+
with status, ExitStack() as stack:
334+
async with agent.iter(prompt, message_history=messages, deps=deps) as agent_run:
335+
live = Live('', refresh_per_second=15, console=console, vertical_overflow='ellipsis')
336+
async for node in agent_run:
337+
if Agent.is_model_request_node(node):
338+
async with node.stream(agent_run.ctx) as handle_stream:
339+
status.stop() # stopping multiple times is idempotent
340+
stack.enter_context(live) # entering multiple times is idempotent
319341

320-
return result_messages
342+
async for content in handle_stream.stream_output(debounce_by=None):
343+
live.update(Markdown(str(content), code_theme=code_theme))
321344

322-
with status, ExitStack() as stack:
323-
async with agent.iter(prompt, message_history=messages, deps=deps) as agent_run:
324-
live = Live('', refresh_per_second=15, console=console, vertical_overflow='ellipsis')
325-
async for node in agent_run:
326-
if Agent.is_model_request_node(node):
327-
async with node.stream(agent_run.ctx) as handle_stream:
328-
status.stop() # stopping multiple times is idempotent
329-
stack.enter_context(live) # entering multiple times is idempotent
345+
assert agent_run.result is not None
346+
result = agent_run.result
330347

331-
async for content in handle_stream.stream_output(debounce_by=None):
332-
live.update(Markdown(str(content), code_theme=code_theme))
348+
result_messages = result.all_messages()
333349

334-
assert agent_run.result is not None
335-
result_messages = agent_run.result.all_messages()
336-
store_last_conversation(result_messages, config_dir)
350+
if store_path:
351+
store_conversation(result_messages, store_path)
337352

338-
return result_messages
353+
return result_messages
339354

340355

341356
class CustomAutoSuggest(AutoSuggestFromHistory):

0 commit comments

Comments
 (0)