Skip to content

Commit 24b0070

Browse files
authored
feat(beeai-sdk): add CLI example (#943)
* WIP: llm service extension Signed-off-by: Jan Pokorný <[email protected]> * feat(beeai-sdk): add CLI example Signed-off-by: Jan Pokorný <[email protected]> --------- Signed-off-by: Jan Pokorný <[email protected]>
1 parent dae5f14 commit 24b0070

File tree

6 files changed

+275
-22
lines changed

6 files changed

+275
-22
lines changed

apps/beeai-sdk/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@ The examples connect to the BeeAI Platform for LLM inteference.
77
Run using:
88

99
```bash
10-
uv run examples/agents/pure-a2a.py
10+
uv run examples/agent.py
1111
```
1212

13-
Connect to the agent using the official [CLI sample](https://github.com/a2aproject/a2a-samples/tree/main/samples/python/hosts/cli):
13+
Connect to the agent using the CLI:
1414

1515
```bash
16-
uv run . --agent http://localhost:8000
16+
uv run examples/cli.py
1717
```
1818

1919
## Plan

apps/beeai-sdk/examples/agents/pure-a2a.py renamed to apps/beeai-sdk/examples/agent.py

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -73,13 +73,12 @@ async def execute(
7373

7474
if data.update.key == "final_answer":
7575
final_answer += update
76-
77-
await updater.update_status(
78-
state=a2a.types.TaskState.working,
79-
message=updater.new_agent_message(
80-
parts=[a2a.types.Part(root=a2a.types.TextPart(text=update))]
81-
),
82-
)
76+
await updater.update_status(
77+
state=a2a.types.TaskState.working,
78+
message=updater.new_agent_message(
79+
parts=[a2a.types.Part(root=a2a.types.TextPart(text=update))]
80+
),
81+
)
8382
await self.context_memory[context.context_id].add(beeai_framework.backend.AssistantMessage(final_answer))
8483
await updater.complete()
8584
except BaseException as e:
@@ -91,7 +90,7 @@ async def execute(
9190

9291
async def serve():
9392
host = os.getenv("HOST", "127.0.0.1")
94-
port = int(os.getenv("PORT", "8000"))
93+
port = int(os.getenv("PORT", "10000"))
9594
await uvicorn.Server(
9695
uvicorn.Config(
9796
app=a2a.server.apps.A2AStarletteApplication(
@@ -120,15 +119,15 @@ async def serve():
120119
- **Customizable Configuration** - Users can enable or disable specific tools for enhanced responses.
121120
"""
122121
),
123-
documentationUrl="https://github.com/i-am-bee/beeai-platform/blob/main/agents/official/beeai-framework/chat",
122+
documentation_url="https://github.com/i-am-bee/beeai-platform/blob/main/agents/official/beeai-framework/chat",
124123
url=f"http://{host}:{port}/",
125124
version="1.0.0",
126-
defaultInputModes=["text", "text/plain"],
127-
defaultOutputModes=["text", "text/plain"],
125+
default_input_modes=["text", "text/plain"],
126+
default_output_modes=["text", "text/plain"],
128127
capabilities=a2a.types.AgentCapabilities(
129128
streaming=True,
130-
pushNotifications=False,
131-
stateTransitionHistory=False,
129+
push_notifications=False,
130+
state_transition_history=False,
132131
extensions=[],
133132
),
134133
skills=[

apps/beeai-sdk/examples/cli.py

Lines changed: 231 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,231 @@
1+
# Copyright 2025 © BeeAI a Series of LF Projects, LLC
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
# Based on: https://github.com/a2aproject/a2a-samples/tree/main/samples/python/hosts/cli
5+
6+
import asyncio
7+
import base64
8+
import os
9+
from uuid import uuid4
10+
11+
import a2a
12+
import a2a.client
13+
import a2a.types
14+
import anyio
15+
import asyncclick
16+
import asyncclick.exceptions
17+
import httpx
18+
import yaml
19+
20+
21+
@asyncclick.command()
22+
@asyncclick.option("--agent", default="http://127.0.0.1:10000")
23+
@asyncclick.option("--session", default=0)
24+
@asyncclick.option("--history", default=False)
25+
async def cli(
26+
agent,
27+
session,
28+
history,
29+
):
30+
async with httpx.AsyncClient(timeout=30) as httpx_client:
31+
card_resolver = a2a.client.A2ACardResolver(httpx_client, agent)
32+
card = await card_resolver.get_agent_card()
33+
34+
print("======= Agent Card ========")
35+
print(yaml.dump(card.model_dump(mode="json", exclude_none=True)))
36+
37+
client = a2a.client.A2AClient(httpx_client, agent_card=card)
38+
39+
continue_loop = True
40+
streaming = card.capabilities.streaming
41+
context_id = session if session > 0 else uuid4().hex
42+
43+
while continue_loop:
44+
print("\n\n========= starting a new task ======== ")
45+
continue_loop, _, task_id = await complete_task(
46+
client=client,
47+
streaming=streaming,
48+
task_id=None,
49+
context_id=context_id,
50+
)
51+
52+
if history and continue_loop:
53+
print("========= history ======== ")
54+
task_response = await client.get_task(
55+
a2a.types.GetTaskRequest(
56+
id=str(uuid4()), params=a2a.types.TaskQueryParams(id=task_id, history_length=10)
57+
)
58+
)
59+
print(task_response.model_dump_json(include={"result": {"history": True}}))
60+
61+
62+
async def complete_task(
63+
client: a2a.client.A2AClient,
64+
streaming,
65+
task_id,
66+
context_id,
67+
):
68+
try:
69+
prompt = asyncclick.prompt("\n👤 User (CTRL-D to cancel)")
70+
except asyncclick.exceptions.Abort:
71+
print("Exiting...")
72+
return False, context_id, task_id
73+
74+
message = a2a.types.Message(
75+
message_id=str(uuid4()),
76+
role=a2a.types.Role.user,
77+
parts=[a2a.types.Part(root=a2a.types.TextPart(text=prompt))],
78+
task_id=task_id,
79+
context_id=context_id,
80+
)
81+
82+
try:
83+
file_path = asyncclick.prompt(
84+
"Select a file path to attach? (press enter to skip)",
85+
default="",
86+
show_default=False,
87+
)
88+
except asyncclick.exceptions.Abort:
89+
print("Exiting...")
90+
return False, context_id, task_id
91+
92+
print("🤖 Agent: ")
93+
94+
if file_path and file_path.strip() != "":
95+
message.parts.append(
96+
a2a.types.Part(
97+
root=a2a.types.FilePart(
98+
file=a2a.types.FileWithBytes(
99+
name=os.path.basename(file_path),
100+
bytes=base64.b64encode(await anyio.Path(file_path).read_bytes()).decode("utf-8"),
101+
)
102+
)
103+
)
104+
)
105+
106+
payload = a2a.types.MessageSendParams(
107+
message=message,
108+
configuration=a2a.types.MessageSendConfiguration(
109+
accepted_output_modes=["text"],
110+
),
111+
)
112+
113+
task_result = None
114+
message = None
115+
task_completed = False
116+
if streaming:
117+
response_stream = client.send_message_streaming(
118+
a2a.types.SendStreamingMessageRequest(
119+
id=str(uuid4()),
120+
params=payload,
121+
)
122+
)
123+
printing_streaming_tokens = False
124+
async for result in response_stream:
125+
if isinstance(result.root, a2a.types.JSONRPCErrorResponse):
126+
if printing_streaming_tokens:
127+
print()
128+
printing_streaming_tokens = False
129+
print(f"Error: {result.root.error}, context_id: {context_id}, task_id: {task_id}")
130+
return False, context_id, task_id
131+
event = result.root.result
132+
context_id = event.context_id
133+
if isinstance(event, a2a.types.Task):
134+
task_id = event.id
135+
if printing_streaming_tokens:
136+
print()
137+
printing_streaming_tokens = False
138+
print(f"TASK => {event.model_dump_json(exclude_none=True)}")
139+
elif isinstance(event, a2a.types.TaskArtifactUpdateEvent):
140+
task_id = event.task_id
141+
if printing_streaming_tokens:
142+
print()
143+
printing_streaming_tokens = False
144+
print(f"ARTIFACT => {event.model_dump_json(exclude_none=True)}")
145+
elif isinstance(event, a2a.types.TaskStatusUpdateEvent):
146+
task_id = event.task_id
147+
if event.status.message:
148+
if not printing_streaming_tokens:
149+
print()
150+
printing_streaming_tokens = True
151+
for part in event.status.message.parts:
152+
if isinstance(part.root, a2a.types.TextPart):
153+
print(part.root.text, end="", flush=True)
154+
if event.status.state == "completed":
155+
task_completed = True
156+
elif isinstance(event, a2a.types.Message):
157+
message = event
158+
159+
# Upon completion of the stream. Retrieve the full task if one was made.
160+
if task_id and not task_completed:
161+
task_result_response = await client.get_task(
162+
a2a.types.GetTaskRequest(
163+
id=str(uuid4()),
164+
params=a2a.types.TaskQueryParams(id=task_id),
165+
)
166+
)
167+
if isinstance(task_result_response.root, a2a.types.JSONRPCErrorResponse):
168+
print(f"Error: {task_result_response.root.error}, context_id: {context_id}, task_id: {task_id}")
169+
return False, context_id, task_id
170+
task_result = task_result_response.root.result
171+
else:
172+
try:
173+
# For non-streaming, assume the response is a task or message.
174+
event = (
175+
await client.send_message(
176+
a2a.types.SendMessageRequest(
177+
id=str(uuid4()),
178+
params=payload,
179+
)
180+
)
181+
).root.result
182+
if not context_id and event:
183+
context_id = event.context_id
184+
if isinstance(event, a2a.types.Task):
185+
if not task_id:
186+
task_id = event.id
187+
task_result = event
188+
elif isinstance(event, a2a.types.Message):
189+
message = event
190+
except Exception as e:
191+
print("Failed to complete the call", e)
192+
193+
if message:
194+
print(f"\n{message.model_dump_json(exclude_none=True)}")
195+
return True, context_id, task_id
196+
if task_result:
197+
# Don't print the contents of a file.
198+
task_content = task_result.model_dump_json(
199+
exclude={
200+
"history": {
201+
"__all__": {
202+
"parts": {
203+
"__all__": {"file"},
204+
},
205+
},
206+
},
207+
},
208+
exclude_none=True,
209+
)
210+
print(f"\n{task_content}")
211+
## if the result is that more input is required, loop again.
212+
state = a2a.types.TaskState(task_result.status.state)
213+
if state.name == a2a.types.TaskState.input_required.name:
214+
return (
215+
await complete_task(
216+
client,
217+
streaming,
218+
task_id,
219+
context_id,
220+
),
221+
context_id,
222+
task_id,
223+
)
224+
## task is complete
225+
return True, context_id, task_id
226+
## Failure case, shouldn't reach
227+
return True, context_id, task_id
228+
229+
230+
if __name__ == "__main__":
231+
asyncio.run(cli())

apps/beeai-sdk/pyproject.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@ dependencies = [
1010
"beeai-framework[duckduckgo,wikipedia]>=0.1.31",
1111
"objprint>=0.3.0",
1212
"uvicorn>=0.35.0",
13+
"asyncclick>=8.1.8",
14+
"sse-starlette>=2.2.1",
15+
"starlette>=0.47.2",
16+
"anyio>=4.9.0",
1317
]
1418

1519
[build-system]
Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
11
# Copyright 2025 © BeeAI a Series of LF Projects, LLC
22
# SPDX-License-Identifier: Apache-2.0
33

4-
5-
def hello() -> str:
6-
return "Hello from beeai-sdk!"

apps/beeai-sdk/uv.lock

Lines changed: 25 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)