Skip to content

Commit 125a06a

Browse files
update from scratch agent examples (#17843)
1 parent cb803c1 commit 125a06a

File tree

2 files changed

+269
-54
lines changed

2 files changed

+269
-54
lines changed

docs/docs/examples/workflow/function_calling_agent.ipynb

Lines changed: 131 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,9 @@
7575
"\n",
7676
"To handle these steps, we need to define a few events:\n",
7777
"1. An event to handle new messages and prepare the chat history\n",
78-
"2. An event to trigger tool calls\n",
79-
"3. An event to handle the results of tool calls\n",
78+
"2. An event to handle streaming responses\n",
79+
"3. An event to trigger tool calls\n",
80+
"4. An event to handle the results of tool calls\n",
8081
"\n",
8182
"The other steps will use the built-in `StartEvent` and `StopEvent` events."
8283
]
@@ -96,6 +97,10 @@
9697
" input: list[ChatMessage]\n",
9798
"\n",
9899
"\n",
100+
"class StreamEvent(Event):\n",
101+
" delta: str\n",
102+
"\n",
103+
"\n",
99104
"class ToolCallEvent(Event):\n",
100105
" tool_calls: list[ToolSelection]\n",
101106
"\n",
@@ -126,7 +131,14 @@
126131
"from llama_index.core.llms.function_calling import FunctionCallingLLM\n",
127132
"from llama_index.core.memory import ChatMemoryBuffer\n",
128133
"from llama_index.core.tools.types import BaseTool\n",
129-
"from llama_index.core.workflow import Workflow, StartEvent, StopEvent, step\n",
134+
"from llama_index.core.workflow import (\n",
135+
" Context,\n",
136+
" Workflow,\n",
137+
" StartEvent,\n",
138+
" StopEvent,\n",
139+
" step,\n",
140+
")\n",
141+
"from llama_index.llms.openai import OpenAI\n",
130142
"\n",
131143
"\n",
132144
"class FuncationCallingAgent(Workflow):\n",
@@ -143,51 +155,71 @@
143155
" self.llm = llm or OpenAI()\n",
144156
" assert self.llm.metadata.is_function_calling_model\n",
145157
"\n",
146-
" self.memory = ChatMemoryBuffer.from_defaults(llm=llm)\n",
147-
" self.sources = []\n",
148-
"\n",
149158
" @step\n",
150-
" async def prepare_chat_history(self, ev: StartEvent) -> InputEvent:\n",
159+
" async def prepare_chat_history(\n",
160+
" self, ctx: Context, ev: StartEvent\n",
161+
" ) -> InputEvent:\n",
151162
" # clear sources\n",
152-
" self.sources = []\n",
163+
" await ctx.set(\"sources\", [])\n",
164+
"\n",
165+
" # check if memory is setup\n",
166+
" memory = await ctx.get(\"memory\", default=None)\n",
167+
" if not memory:\n",
168+
" memory = ChatMemoryBuffer.from_defaults(llm=self.llm)\n",
153169
"\n",
154170
" # get user input\n",
155171
" user_input = ev.input\n",
156172
" user_msg = ChatMessage(role=\"user\", content=user_input)\n",
157-
" self.memory.put(user_msg)\n",
173+
" memory.put(user_msg)\n",
158174
"\n",
159175
" # get chat history\n",
160-
" chat_history = self.memory.get()\n",
176+
" chat_history = memory.get()\n",
177+
"\n",
178+
" # update context\n",
179+
" await ctx.set(\"memory\", memory)\n",
180+
"\n",
161181
" return InputEvent(input=chat_history)\n",
162182
"\n",
163183
" @step\n",
164184
" async def handle_llm_input(\n",
165-
" self, ev: InputEvent\n",
185+
" self, ctx: Context, ev: InputEvent\n",
166186
" ) -> ToolCallEvent | StopEvent:\n",
167187
" chat_history = ev.input\n",
168188
"\n",
169-
" response = await self.llm.achat_with_tools(\n",
189+
" # stream the response\n",
190+
" response_stream = await self.llm.astream_chat_with_tools(\n",
170191
" self.tools, chat_history=chat_history\n",
171192
" )\n",
172-
" self.memory.put(response.message)\n",
193+
" async for response in response_stream:\n",
194+
" ctx.write_event_to_stream(StreamEvent(delta=response.delta or \"\"))\n",
195+
"\n",
196+
" # save the final response, which should have all content\n",
197+
" memory = await ctx.get(\"memory\")\n",
198+
" memory.put(response.message)\n",
199+
" await ctx.set(\"memory\", memory)\n",
173200
"\n",
201+
" # get tool calls\n",
174202
" tool_calls = self.llm.get_tool_calls_from_response(\n",
175203
" response, error_on_no_tool_call=False\n",
176204
" )\n",
177205
"\n",
178206
" if not tool_calls:\n",
207+
" sources = await ctx.get(\"sources\", default=[])\n",
179208
" return StopEvent(\n",
180-
" result={\"response\": response, \"sources\": [*self.sources]}\n",
209+
" result={\"response\": response, \"sources\": [*sources]}\n",
181210
" )\n",
182211
" else:\n",
183212
" return ToolCallEvent(tool_calls=tool_calls)\n",
184213
"\n",
185214
" @step\n",
186-
" async def handle_tool_calls(self, ev: ToolCallEvent) -> InputEvent:\n",
215+
" async def handle_tool_calls(\n",
216+
" self, ctx: Context, ev: ToolCallEvent\n",
217+
" ) -> InputEvent:\n",
187218
" tool_calls = ev.tool_calls\n",
188219
" tools_by_name = {tool.metadata.get_name(): tool for tool in self.tools}\n",
189220
"\n",
190221
" tool_msgs = []\n",
222+
" sources = await ctx.get(\"sources\", default=[])\n",
191223
"\n",
192224
" # call tools -- safely!\n",
193225
" for tool_call in tool_calls:\n",
@@ -208,7 +240,7 @@
208240
"\n",
209241
" try:\n",
210242
" tool_output = tool(**tool_call.tool_kwargs)\n",
211-
" self.sources.append(tool_output)\n",
243+
" sources.append(tool_output)\n",
212244
" tool_msgs.append(\n",
213245
" ChatMessage(\n",
214246
" role=\"tool\",\n",
@@ -225,10 +257,15 @@
225257
" )\n",
226258
" )\n",
227259
"\n",
260+
" # update memory\n",
261+
" memory = await ctx.get(\"memory\")\n",
228262
" for msg in tool_msgs:\n",
229-
" self.memory.put(msg)\n",
263+
" memory.put(msg)\n",
230264
"\n",
231-
" chat_history = self.memory.get()\n",
265+
" await ctx.set(\"sources\", sources)\n",
266+
" await ctx.set(\"memory\", memory)\n",
267+
"\n",
268+
" chat_history = memory.get()\n",
232269
" return InputEvent(input=chat_history)"
233270
]
234271
},
@@ -345,6 +382,15 @@
345382
"ret = await agent.run(input=\"What is (2123 + 2321) * 312?\")"
346383
]
347384
},
385+
{
386+
"cell_type": "markdown",
387+
"metadata": {},
388+
"source": [
389+
"## Chat History\n",
390+
"\n",
391+
"By default, the workflow is creating a fresh `Context` for each run. This means that the chat history is not preserved between runs. However, we can pass our own `Context` to the workflow to preserve chat history."
392+
]
393+
},
348394
{
349395
"cell_type": "code",
350396
"execution_count": null,
@@ -354,13 +400,79 @@
354400
"name": "stdout",
355401
"output_type": "stream",
356402
"text": [
357-
"assistant: The result of \\((2123 + 2321) \\times 312\\) is \\(1,386,528\\).\n"
403+
"Running step prepare_chat_history\n",
404+
"Step prepare_chat_history produced event InputEvent\n",
405+
"Running step handle_llm_input\n",
406+
"Step handle_llm_input produced event StopEvent\n",
407+
"assistant: Hello, Logan! How can I assist you today?\n",
408+
"Running step prepare_chat_history\n",
409+
"Step prepare_chat_history produced event InputEvent\n",
410+
"Running step handle_llm_input\n",
411+
"Step handle_llm_input produced event StopEvent\n",
412+
"assistant: Your name is Logan.\n"
358413
]
359414
}
360415
],
361416
"source": [
417+
"from llama_index.core.workflow import Context\n",
418+
"\n",
419+
"ctx = Context(agent)\n",
420+
"\n",
421+
"ret = await agent.run(input=\"Hello! My name is Logan.\", ctx=ctx)\n",
422+
"print(ret[\"response\"])\n",
423+
"\n",
424+
"ret = await agent.run(input=\"What is my name?\", ctx=ctx)\n",
362425
"print(ret[\"response\"])"
363426
]
427+
},
428+
{
429+
"cell_type": "markdown",
430+
"metadata": {},
431+
"source": [
432+
"## Streaming\n",
433+
"\n",
434+
"Using the `handler` returned from the `.run()` method, we can also access the streaming events."
435+
]
436+
},
437+
{
438+
"cell_type": "code",
439+
"execution_count": null,
440+
"metadata": {},
441+
"outputs": [
442+
{
443+
"name": "stdout",
444+
"output_type": "stream",
445+
"text": [
446+
"Once upon a time in a quaint little village, there lived a curious cat named Whiskers. Whiskers was no ordinary cat; he had a beautiful coat of orange and white fur that shimmered in the sunlight, and his emerald green eyes sparkled with mischief.\n",
447+
"\n",
448+
"Every day, Whiskers would explore the village, visiting the bakery for a whiff of freshly baked bread and the flower shop to sniff the colorful blooms. The villagers adored him, often leaving out little treats for their favorite feline.\n",
449+
"\n",
450+
"One sunny afternoon, while wandering near the edge of the village, Whiskers stumbled upon a hidden path that led into the woods. His curiosity piqued, he decided to follow the path, which was lined with tall trees and vibrant wildflowers. As he ventured deeper, he heard a soft, melodic sound that seemed to beckon him.\n",
451+
"\n",
452+
"Following the enchanting music, Whiskers soon found himself in a clearing where a group of woodland creatures had gathered. They were having a grand celebration, complete with dancing, singing, and a feast of berries and nuts. The animals welcomed Whiskers with open paws, inviting him to join their festivities.\n",
453+
"\n",
454+
"Whiskers, delighted by the warmth and joy of his new friends, danced and played until the sun began to set. As the sky turned shades of pink and orange, he realized it was time to return home. The woodland creatures gifted him a small, sparkling acorn as a token of their friendship.\n",
455+
"\n",
456+
"From that day on, Whiskers would often visit the clearing, sharing stories of the village and enjoying the company of his woodland friends. He learned that adventure and friendship could be found in the most unexpected places, and he cherished every moment spent in the magical woods.\n",
457+
"\n",
458+
"And so, Whiskers continued to live his life filled with curiosity, laughter, and the warmth of friendship, reminding everyone that sometimes, the best adventures are just a whisker away."
459+
]
460+
}
461+
],
462+
"source": [
463+
"agent = FuncationCallingAgent(\n",
464+
" llm=OpenAI(model=\"gpt-4o-mini\"), tools=tools, timeout=120, verbose=False\n",
465+
")\n",
466+
"\n",
467+
"handler = agent.run(input=\"Hello! Write me a short story about a cat.\")\n",
468+
"\n",
469+
"async for event in handler.stream_events():\n",
470+
" if isinstance(event, StreamEvent):\n",
471+
" print(event.delta, end=\"\", flush=True)\n",
472+
"\n",
473+
"response = await handler\n",
474+
"# print(ret[\"response\"])"
475+
]
364476
}
365477
],
366478
"metadata": {

0 commit comments

Comments
 (0)