@@ -68,6 +68,11 @@ def __init__(
6868 self .agents_registry : dict [str , AgentCard ] = {}
6969
7070 async def get_agents (self ) -> tuple [dict [str , AgentCard ], str ]:
71+ """Retrieve agent cards from all agent URLs and render the agent prompt.
72+
73+ Returns:
74+ tuple[dict[str, AgentCard], str]: A dictionary mapping agent names to AgentCard objects, and the rendered agent prompt string.
75+ """ # noqa: E501
7176 async with httpx .AsyncClient () as httpx_client :
7277 card_resolvers = [
7378 A2ACardResolver (httpx_client , url ) for url in self .agent_urls
@@ -85,6 +90,14 @@ async def get_agents(self) -> tuple[dict[str, AgentCard], str]:
8590 return agents_registry , agent_prompt
8691
8792 def call_llm (self , prompt : str ) -> str :
93+ """Call the LLM with the given prompt and return the response as a string or generator.
94+
95+ Args:
96+ prompt (str): The prompt to send to the LLM.
97+
98+ Returns:
99+ str or Generator[str]: The LLM response as a string or generator, depending on mode.
100+ """ # noqa: E501
88101 if self .mode == 'complete' :
89102 return stream_llm (prompt )
90103
@@ -94,10 +107,31 @@ def call_llm(self, prompt: str) -> str:
94107 return result
95108
96109 async def decide (
97- self , question : str , agents_prompt : str
110+ self ,
111+ question : str ,
112+ agents_prompt : str ,
113+ called_agents : list [dict ] | None = None ,
98114 ) -> Generator [str , None ]:
115+ """Decide which agent(s) to use to answer the question.
116+
117+ Args:
118+ question (str): The question to answer.
119+ agents_prompt (str): The prompt describing available agents.
120+ called_agents (list[dict] | None): Previously called agents and their answers.
121+
122+ Returns:
123+ Generator[str, None]: The LLM's response as a generator of strings.
124+ """ # noqa: E501
125+ if called_agents :
126+ call_agent_prompt = agent_answer_template .render (
127+ called_agents = called_agents
128+ )
129+ else :
130+ call_agent_prompt = ''
99131 prompt = decide_template .render (
100- question = question , agent_prompt = agents_prompt
132+ question = question ,
133+ agent_prompt = agents_prompt ,
134+ call_agent_prompt = call_agent_prompt ,
101135 )
102136 return self .call_llm (prompt )
103137
@@ -116,6 +150,15 @@ def extract_agents(self, response: str) -> list[dict]:
116150 async def send_message_to_an_agent (
117151 self , agent_card : AgentCard , message : str
118152 ):
153+ """Send a message to a specific agent and yield the streaming response.
154+
155+ Args:
156+ agent_card (AgentCard): The agent to send the message to.
157+ message (str): The message to send.
158+
159+ Yields:
160+ str: The streaming response from the agent.
161+ """
119162 async with httpx .AsyncClient () as httpx_client :
120163 client = A2AClient (httpx_client , agent_card = agent_card )
121164 message = MessageSendParams (
@@ -137,65 +180,73 @@ async def send_message_to_an_agent(
137180 yield message .parts [0 ].root .text
138181
139182 async def stream (self , question : str ):
140- agents_registry , agent_prompt = await self .get_agents ()
141- response = ''
142- for chunk in await self .decide (question , agent_prompt ):
143- response += chunk
144- if self .token_stream_callback :
145- self .token_stream_callback (chunk )
146- yield chunk
147-
148- agents = self .extract_agents (response )
183+ """Stream the process of answering a question, possibly involving multiple agents.
184+
185+ Args:
186+ question (str): The question to answer.
187+
188+ Yields:
189+ str: Streaming output, including agent responses and intermediate steps.
190+ """ # noqa: E501
149191 agent_answers : list [dict ] = []
150- for agent in agents :
151- agent_response = ''
152- agent_card = agents_registry [agent ['name' ]]
153- yield f'<Agent name="{ agent ["name" ]} ">\n '
154- async for chunk in self .send_message_to_an_agent (
155- agent_card , agent ['prompt' ]
192+ for _ in range (3 ):
193+ agents_registry , agent_prompt = await self .get_agents ()
194+ response = ''
195+ for chunk in await self .decide (
196+ question , agent_prompt , agent_answers
156197 ):
157- agent_response += chunk
198+ response += chunk
158199 if self .token_stream_callback :
159200 self .token_stream_callback (chunk )
160201 yield chunk
161- yield '</Agent>\n '
162- match = re .search (
163- r'<Answer>(.*?)</Answer>' , agent_response , re .DOTALL
164- )
165- answer = match .group (1 ).strip () if match else agent_response
166- if answer :
167- agent_answers .append (
168- {
169- 'name' : agent ['name' ],
170- 'prompt' : agent ['prompt' ],
171- 'answer' : answer ,
172- }
173- )
202+
203+ agents = self .extract_agents (response )
204+ if agents :
205+ for agent in agents :
206+ agent_response = ''
207+ agent_card = agents_registry [agent ['name' ]]
208+ yield f'<Agent name="{ agent ["name" ]} ">\n '
209+ async for chunk in self .send_message_to_an_agent (
210+ agent_card , agent ['prompt' ]
211+ ):
212+ agent_response += chunk
213+ if self .token_stream_callback :
214+ self .token_stream_callback (chunk )
215+ yield chunk
216+ yield '</Agent>\n '
217+ match = re .search (
218+ r'<Answer>(.*?)</Answer>' , agent_response , re .DOTALL
219+ )
220+ answer = match .group (1 ).strip () if match else agent_response
221+ agent_answers .append (
222+ {
223+ 'name' : agent ['name' ],
224+ 'prompt' : agent ['prompt' ],
225+ 'answer' : answer ,
226+ }
227+ )
174228 else :
175- print ('<Answer> tag not found' )
176- print (agent_answers )
229+ return
177230
178231
179232if __name__ == '__main__' :
180233 import asyncio
181234 import colorama
182235
183236 async def main ():
237+ """Main function to run the A2A Repo Agent client."""
184238 agent = Agent (
185239 mode = 'stream' ,
186240 token_stream_callback = None ,
187241 agent_urls = ['http://localhost:9999/' ],
188242 )
189- agents_registry , agent_prompt = await agent .get_agents ()
190- # agent_card = agents_registry['A2A Protocol Agent']
243+
191244 async for chunk in agent .stream ('What is A2A protocol?' ):
192245 if chunk .startswith ('<Agent name="' ):
193246 print (colorama .Fore .CYAN + chunk , end = '' , flush = True )
194247 elif chunk .startswith ('</Agent>' ):
195248 print (colorama .Fore .RESET + chunk , end = '' , flush = True )
196249 else :
197250 print (chunk , end = '' , flush = True )
198- # async for chunk in stream_response:
199- # print(chunk, end='', flush=True)
200251
201252 asyncio .run (main ())
0 commit comments