diff --git a/clai/README.md b/clai/README.md index 01a7e2b8d8..dfd46e76ff 100644 --- a/clai/README.md +++ b/clai/README.md @@ -54,7 +54,7 @@ Either way, running `clai` will start an interactive session where you can chat ## Help ``` -usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt] +usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--tui] [--version] [prompt] Pydantic AI CLI v... @@ -77,5 +77,6 @@ options: -t [CODE_THEME], --code-theme [CODE_THEME] Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals. --no-stream Disable streaming from the model + --tui Launch clai as a TUI application. --version Show version and exit ``` diff --git a/pydantic_ai_slim/pydantic_ai/_cli.py b/pydantic_ai_slim/pydantic_ai/_cli/__init__.py similarity index 86% rename from pydantic_ai_slim/pydantic_ai/_cli.py rename to pydantic_ai_slim/pydantic_ai/_cli/__init__.py index bc3f2a271b..809e403513 100644 --- a/pydantic_ai_slim/pydantic_ai/_cli.py +++ b/pydantic_ai_slim/pydantic_ai/_cli/__init__.py @@ -14,13 +14,14 @@ from typing_inspection.introspection import get_literal_values -from . import __version__ -from ._run_context import AgentDepsT -from .agent import AbstractAgent, Agent -from .exceptions import UserError -from .messages import ModelMessage, TextPart -from .models import KnownModelName, infer_model -from .output import OutputDataT +from .. import __version__ +from .._run_context import AgentDepsT +from ..agent import AbstractAgent, Agent +from ..exceptions import UserError +from ..messages import ModelMessage, TextPart +from ..models import KnownModelName, infer_model +from ..output import OutputDataT +from .tui import CLAIApp try: import argcomplete @@ -102,7 +103,7 @@ def cli_exit(prog_name: str = 'pai'): # pragma: no cover sys.exit(cli(prog_name=prog_name)) -def cli( # noqa: C901 +def cli( args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-4.1' ) -> int: """Run the CLI and return the exit code for the process.""" @@ -149,18 +150,19 @@ def cli( # noqa: C901 default='dark', ) parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model') + parser.add_argument('--tui', action='store_true', help='Launch clai as a TUI application.') parser.add_argument('--version', action='store_true', help='Show version and exit') argcomplete.autocomplete(parser) args = parser.parse_args(args_list) console = Console() - name_version = f'[green]{prog_name} - Pydantic AI CLI v{__version__}[/green]' + name_version = f'{prog_name} - Pydantic AI CLI v{__version__}' if args.version: - console.print(name_version, highlight=False) + console.print(wrap_color(name_version, 'green'), highlight=False) return 0 if args.list_models: - console.print(f'{name_version}\n\n[green]Available models:[/green]') + console.print(wrap_color(f'{name_version}\n\nAvailable models:', 'green')) for model in qualified_model_names: console.print(f' {model}', highlight=False) return 0 @@ -185,19 +187,22 @@ def cli( # noqa: C901 try: agent.model = infer_model(args.model or default_model) except UserError as e: - console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]') + console.print(f'Error initializing {wrap_color(args.model, "magenta")}:\n{wrap_color(e, "red")}') return 1 model_name = agent.model if isinstance(agent.model, str) else f'{agent.model.system}:{agent.model.model_name}' - if args.agent and model_arg_set: - console.print( - f'{name_version} using custom agent [magenta]{args.agent}[/magenta] with [magenta]{model_name}[/magenta]', - highlight=False, + + if args.tui: + app = CLAIApp( + agent, + PYDANTIC_AI_HOME / PROMPT_HISTORY_FILENAME, + prompt=args.prompt, + title=title(name_version, args.agent, model_name, tui=args.tui), ) - elif args.agent: - console.print(f'{name_version} using custom agent [magenta]{args.agent}[/magenta]', highlight=False) - else: - console.print(f'{name_version} with [magenta]{model_name}[/magenta]', highlight=False) + app.run() + return 0 + + console.print(title(name_version, args.agent, model_name, tui=args.tui), highlight=False) stream = not args.no_stream if args.code_theme == 'light': @@ -366,3 +371,24 @@ def handle_slash_command( else: console.print(f'[red]Unknown command[/red] [magenta]`{ident_prompt}`[/magenta]') return None, multiline + + +def wrap_color(obj: Any, color: str) -> str: + return f'[{color}]{obj}[/{color}]' + + +def title(name_version: str, agent: str | None = None, model: str | None = None, tui: bool = False) -> str: + if tui: + if agent and model: + return f'{name_version} using custom agent **{agent}** with `{model}`' + elif agent: + return f'{name_version} using custom agent **{agent}**' + else: + return f'{name_version} with `{model}`' + else: + if agent and model: + return f'{wrap_color(name_version, "green")} using custom agent {wrap_color(agent, "magenta")} with {wrap_color(model, "magenta")}' + elif agent: + return f'{wrap_color(name_version, "green")} using custom agent {wrap_color(agent, "magenta")}' + else: + return f'{wrap_color(name_version, "green")} with {wrap_color(model, "magenta")}' diff --git a/pydantic_ai_slim/pydantic_ai/_cli/clai.tcss b/pydantic_ai_slim/pydantic_ai/_cli/clai.tcss new file mode 100644 index 0000000000..8e204e7b66 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/_cli/clai.tcss @@ -0,0 +1,77 @@ +Conversation { + height: auto; + max-height: 1fr; + + #contents { + layout: stream; + height: 1fr; + } + + #contents > * { + margin-bottom: 1; + } + + Prompt { + height: auto; + padding: 0 0 0 1; + #prompt { + padding-left: 0; + color: $text-primary; + text-style: bold; + } + Input { + background: transparent; + padding: 0 1; + border: none; + height: 1; + } + TextArea { + background: transparent; + height: auto; + min-height: 3; + } + } + + UserText { + background: black 10%; + padding: 1 0; + border-left: wide $success; + #prompt { + color: $text-muted; + } + #message { + color: $text-muted; + padding: 0 1; + } + } + + Response { + padding: 0 1 0 1; + & > MarkdownBlock { + padding: 0; + &:last-child { + margin-bottom:0; + } + } + } + + ErrorMessage { + background: $error 10%; + color: $text-error; + } +} + +Footer { + background: black 10%; + .footer-key--key { + color: $text; + background: transparent; + text-style: bold; + padding: 0 1; + } + .footer-key--description { + padding: 0 1 0 0; + color: $text-muted; + background: $footer-description-background; + } +} diff --git a/pydantic_ai_slim/pydantic_ai/_cli/tui.py b/pydantic_ai_slim/pydantic_ai/_cli/tui.py new file mode 100644 index 0000000000..7158735c9a --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/_cli/tui.py @@ -0,0 +1,383 @@ +from __future__ import annotations + +from asyncio import Queue +from dataclasses import dataclass +from pathlib import Path +from string import Template + +from prompt_toolkit.history import FileHistory +from textual import containers, getters, on, work +from textual.app import App, ComposeResult +from textual.binding import Binding +from textual.geometry import clamp +from textual.message import Message +from textual.reactive import reactive, var +from textual.screen import Screen +from textual.suggester import SuggestFromList +from textual.widget import Widget +from textual.widgets import Footer, Input, Label, Markdown, Static, TextArea +from textual.widgets.input import Selection + +from pydantic_ai._run_context import AgentDepsT +from pydantic_ai.agent import Agent +from pydantic_ai.messages import ModelMessage +from pydantic_ai.output import OutputDataT + +DEFAULT_THEME = 'nord' + + +class CLAIApp(App[None]): + """The CLA TUI app.""" + + BINDING_GROUP_TITLE = 'App' + CSS_PATH = 'clai.tcss' + + BINDINGS = [Binding('ctrl+c', 'app.quit', 'Exit', priority=True)] + + def __init__( + self, + agent: Agent[AgentDepsT, OutputDataT], + history_path: Path, + prompt: str | None = None, + title: str | None = None, + *, + # TODO(Marcelo)We need to find a way to expose a way to create the deps object. + _deps: AgentDepsT = None, + ): + super().__init__() + self._agent = agent + self.history_path = history_path + self.title = title or 'Pydantic AI CLI' + self._prompt = prompt + self._deps = _deps + + def on_load(self) -> None: + """Called before application mode.""" + # Set the default theme here to avoid flash of different theme + self.theme = DEFAULT_THEME + + def get_default_screen(self) -> MainScreen: + return MainScreen(self._agent, self.history_path, self.title, prompt=self._prompt, deps=self._deps) + + +HELP = Template("""\ +## $title + +- **Powered by Pydantic AI** + + The Python agent framework designed to make it less painful to build production grade applications with Generative AI. + +| Command | Purpose | +| --- | --- | +| `/markdown` | Show markdown output of last question. | +| `/multiline` | Enable multiline mode. | +| `/exit` | Exit CLAI. | +""") + + +class ErrorMessage(Static): + """An error message for the user.""" + + +class Response(Markdown): + """Response from the agent.""" + + +class UserText(containers.HorizontalGroup): + """Copy of what the user prompted.""" + + def __init__(self, prompt: str) -> None: + self._prompt = prompt + super().__init__() + + def compose(self) -> ComposeResult: + yield Label('clai ➤', id='prompt') + yield Label(self._prompt, id='message') + + +class PromptInput(Input): + """Custom prompt to disable maximize.""" + + BINDING_GROUP_TITLE = 'Prompt' + ALLOW_MAXIMIZE = False + + +class PromptTextArea(TextArea): + """A custom textarea.""" + + BINDING_GROUP_TITLE = 'Prompt' + + +class Prompt(containers.HorizontalGroup, can_focus=False): + """Takes input from the user.""" + + BINDINGS = [ + Binding('shift+up', 'history(-1)', 'History up', priority=True), + Binding('shift+down', 'history(+1)', 'History down', priority=True), + Binding('ctrl+j', 'submit', 'Submit prompt', key_display='shift+⏎', priority=True), + Binding('escape', 'escape', 'Exit multiline'), + ] + + history_position = var(0, bindings=True) + multiline = reactive(False) + input = getters.query_one('#prompt-input', Input) + text_area = getters.query_one('#prompt-textarea', TextArea) + + @dataclass + class Submitted(Message): + """Prompt text was submitted.""" + + value: str + + def __init__(self, history: FileHistory, id: str | None = None) -> None: + self.history = history + self.history_strings: list[str] = [] + self.edit_prompt = '' + super().__init__(id=id) + + def compose(self) -> ComposeResult: + yield Label('clai ➤', id='prompt') + yield PromptInput( + id='prompt-input', + placeholder='Ask me anything', + suggester=SuggestFromList( + [ + '/markdown', + '/multiline', + '/exit', + ] + ), + ) + yield PromptTextArea( + id='prompt-textarea', + language='markdown', + highlight_cursor_line=False, + ) + + def watch_multiline(self, multiline: bool) -> None: + if multiline: + self.input.display = False + self.text_area.display = True + self.text_area.load_text(self.input.value) + self.text_area.focus() + else: + self.input.display = True + self.text_area.display = False + self.input.value = self.text_area.text.partition('\n')[0] + self.input.focus() + + @property + def value(self) -> str: + """Value of prompt.""" + if self.multiline: + return self.text_area.text + else: + return self.input.value + + @value.setter + def value(self, value: str) -> None: + multiline = '\n' in value + self.multiline = multiline + if multiline: + self.text_area.load_text(value) + else: + self.input.value = value + self.input.selection = Selection.cursor(len(value)) + + def clear(self) -> None: + with self.prevent(Input.Changed): + self.input.clear() + with self.prevent(TextArea.Changed): + self.text_area.load_text('') + + async def action_history(self, direction: int) -> None: + if self.history_position == 0: + self.history_strings.clear() + async for prompt in self.history.load(): + if prompt.strip(): + self.history_strings.append(prompt) + self.history_strings.reverse() + self.history_position = self.history_position + direction + + def action_submit(self) -> None: + self.post_message(self.Submitted(self.text_area.text)) + self.clear() + self.action_escape() + self.history_position = 0 + + def action_escape(self) -> None: + self.history_position = 0 + self.multiline = False + + def check_action(self, action: str, parameters: tuple[object, ...]) -> bool | None: + if action == 'history': + if parameters[0] == +1 and self.history_position == 0: + return None + if parameters[0] == -1 and self.history_strings and self.history_position == -len(self.history_strings): + return None + if action in ('submit', 'escape'): + return self.multiline + return True + + def validate_history_position(self, history_position: int) -> int: + return clamp(history_position, -len(self.history_strings), 0) + + async def watch_history_position(self, previous_position: int, position: int) -> None: + if previous_position == 0: + self.edit_prompt = self.value + if position == 0: + self.value = self.edit_prompt + elif position < 0: + self.value = self.history_strings[position] + + @on(Input.Submitted) + def on_input_submitted(self, event: Input.Submitted) -> None: + self.post_message(self.Submitted(event.value)) + self.clear() + self.history_position = 0 + + +class Contents(containers.VerticalScroll): + """The conversation contents.""" + + BINDING_GROUP_TITLE = 'Conversation' + BINDINGS = [Binding('tab', 'screen.focus-next', 'Focus prompt')] + + +class Conversation(containers.Vertical): + """The conversation with the AI.""" + + contents = getters.query_one('#contents', containers.VerticalScroll) + prompt = getters.query_one(Prompt) + + def __init__(self, history: FileHistory, title: str) -> None: + self.history = history + self.title = title + super().__init__() + + def compose(self) -> ComposeResult: + yield Contents(id='contents') + yield Prompt(self.history, id='prompt') + + def get_last_markdown_source(self) -> str | None: + """Get the source of the last markdown response, or `None` if there is no markdown response.""" + for child in reversed(self.contents.children): + if isinstance(child, Markdown): + return child.source + return None + + async def on_mount(self) -> None: + await self.post(Response(HELP.safe_substitute(title=self.title))) + + async def post(self, widget: Widget) -> None: + await self.contents.mount(widget) + self.contents.anchor() + + async def post_prompt(self, prompt: str) -> None: + await self.post(UserText(prompt)) + + +class MainScreen(Screen[None]): + """Main screen containing conversation.""" + + app: CLAIApp + + BINDING_GROUP_TITLE = 'Screen' + AUTO_FOCUS = 'Conversation Prompt Input' + + conversation = getters.query_one(Conversation) + + def __init__( + self, + agent: Agent[AgentDepsT, OutputDataT], + history_path: Path, + title: str, + *, + prompt: str | None = None, + deps: AgentDepsT = None, + ): + self.agent = agent + self.prompt = prompt + self.messages: list[ModelMessage] = [] + self.history = FileHistory(history_path) + self.deps = deps + super().__init__() + self.title = title + + def compose(self) -> ComposeResult: + yield Conversation(self.history, self.title or 'Pydantic AI CLI') + yield Footer() + + async def on_mount(self) -> None: + """Runs when the widget is mounted.""" + # Initialize the prompt queue + self.prompt_queue: Queue[str | None] = Queue(maxsize=10) + self.run_response_queue() + if self.prompt: + # Send initial prompt + await self.conversation.post_prompt(self.prompt) + await self.ask_agent(self.prompt) + + async def on_unmount(self) -> None: + """Called when the app exits.""" + # Tell the response queue task to finish up + await self.prompt_queue.put(None) + + @on(Prompt.Submitted) + async def on_conversation_prompt(self, event: Prompt.Submitted) -> None: + """Called when the user submits a prompt.""" + prompt = event.value.strip() + if not prompt: + self.app.bell() + return + self.history.append_string(prompt) + if prompt.startswith('/'): + await self.process_slash(prompt) + else: + await self.conversation.post_prompt(prompt) + await self.ask_agent(prompt) + + async def process_slash(self, prompt: str) -> None: + prompt = prompt.strip() + if prompt == '/markdown': + markdown = self.conversation.get_last_markdown_source() + if not markdown: + await self.conversation.post(ErrorMessage('No markdown to display')) + else: + await self.conversation.post(Static(markdown)) + elif prompt == '/multiline': + self.conversation.prompt.multiline = not self.conversation.prompt.multiline + elif prompt == '/exit': + self.app.exit() + else: + await self.conversation.post(ErrorMessage(f'Unknown command: {prompt!r}')) + + async def ask_agent(self, prompt: str) -> None: + """Send the prompt to the agent.""" + await self.prompt_queue.put(prompt) + + async def post_response(self) -> Response: + """Post a response, returns a callable to append markdown.""" + response = Response() + response.display = False + await self.conversation.post(response) + return response + + @work + async def run_response_queue(self) -> None: + """Listens to the prompt queue, posts prompts, and streams the response.""" + while (prompt := await self.prompt_queue.get()) is not None: + response = await self.post_response() + markdown_stream = Markdown.get_stream(response) + try: + async with self.agent.iter(prompt, message_history=self.messages, deps=self.deps) as agent_run: + async for node in agent_run: + if Agent.is_model_request_node(node): + async with node.stream(agent_run.ctx) as handle_stream: + async for fragment in handle_stream.stream_text(delta=True, debounce_by=None): + await markdown_stream.write(fragment) + response.display = True + assert agent_run.result is not None + self.messages[:] = agent_run.result.all_messages() + finally: + await markdown_stream.stop() diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index de6e164d4b..0c38c2f9b8 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -85,6 +85,7 @@ cli = [ "prompt-toolkit>=3", "argcomplete>=3.5.0", "pyperclip>=1.9.0", + "textual>=5.3.0", ] # MCP mcp = ["mcp>=1.12.3"] diff --git a/uv.lock b/uv.lock index 270552e04d..83c746a435 100644 --- a/uv.lock +++ b/uv.lock @@ -1688,6 +1688,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, ] +[[package]] +name = "linkify-it-py" +version = "2.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "uc-micro-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" }, +] + [[package]] name = "logfire" version = "4.0.0" @@ -1833,6 +1845,14 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" }, ] +[package.optional-dependencies] +linkify = [ + { name = "linkify-it-py" }, +] +plugins = [ + { name = "mdit-py-plugins" }, +] + [[package]] name = "markdownify" version = "0.14.1" @@ -1937,6 +1957,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f2/6f/94a7344f6d634fe3563bea8b33bccedee37f2726f7807e9a58440dc91627/mdformat-0.7.22-py3-none-any.whl", hash = "sha256:61122637c9e1d9be1329054f3fa216559f0d1f722b7919b060a8c2a4ae1850e5", size = 34447, upload-time = "2025-01-30T18:00:48.708Z" }, ] +[[package]] +name = "mdit-py-plugins" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" }, +] + [[package]] name = "mdurl" version = "0.1.2" @@ -3297,6 +3329,7 @@ cli = [ { name = "prompt-toolkit" }, { name = "pyperclip" }, { name = "rich" }, + { name = "textual" }, ] cohere = [ { name = "cohere", marker = "sys_platform != 'emscripten'" }, @@ -3379,6 +3412,7 @@ requires-dist = [ { name = "tavily-python", marker = "extra == 'tavily'", specifier = ">=0.5.0" }, { name = "temporalio", marker = "extra == 'temporal'", specifier = "==1.18.0" }, { name = "tenacity", marker = "extra == 'retries'", specifier = ">=8.2.3" }, + { name = "textual", marker = "extra == 'cli'", specifier = ">=5.3.0" }, { name = "typing-inspection", specifier = ">=0.4.0" }, ] provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "duckduckgo", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "tavily", "temporal", "vertexai"] @@ -3541,11 +3575,11 @@ wheels = [ [[package]] name = "pygments" -version = "2.19.1" +version = "2.19.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, ] [[package]] @@ -4270,6 +4304,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/3f/8ba87d9e287b9d385a02a7114ddcef61b26f86411e121c9003eb509a1773/tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687", size = 28165, upload-time = "2024-07-05T07:25:29.591Z" }, ] +[[package]] +name = "textual" +version = "6.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py", extra = ["linkify", "plugins"] }, + { name = "platformdirs" }, + { name = "pygments" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/e6/5b3aa6cc30a40246af587f3802f219f326981491a1a6b0623f07f0c4029f/textual-6.2.0.tar.gz", hash = "sha256:1561aeea3a6e9d8fa7bc3e2fcb2bb1d0c7a04e2661c0bb9fa5021cc9f1e905da", size = 1570354, upload-time = "2025-09-30T14:23:01.793Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/97/1e6fad473147f825b1d3bec807cbf34c0c24aa6b61478df8d630cc664a44/textual-6.2.0-py3-none-any.whl", hash = "sha256:d9bb3b997a8a37687faeb0201978e7b7baf0e4644f39468f34a7bbad659bbbae", size = 710628, upload-time = "2025-09-30T14:23:00.056Z" }, +] + [[package]] name = "tiktoken" version = "0.9.0" @@ -4527,6 +4577,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0f/dd/84f10e23edd882c6f968c21c2434fe67bd4a528967067515feca9e611e5e/tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639", size = 346762, upload-time = "2025-01-21T19:49:37.187Z" }, ] +[[package]] +name = "uc-micro-py" +version = "1.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" }, +] + [[package]] name = "urllib3" version = "2.3.0"