|
1 | 1 | import os |
2 | 2 |
|
3 | 3 | import azure.identity |
| 4 | +import rich |
4 | 5 | from dotenv import load_dotenv |
5 | 6 | from langchain_mcp_adapters.client import MultiServerMCPClient |
6 | 7 | from langchain_openai import AzureChatOpenAI, ChatOpenAI |
7 | 8 | from langgraph.prebuilt import create_react_agent |
8 | | -from rich import print |
9 | 9 |
|
10 | 10 | # Setup the client to use either Azure OpenAI or GitHub Models |
11 | 11 | load_dotenv(override=True) |
@@ -41,8 +41,28 @@ async def setup_agent(): |
41 | 41 | stale_prompt_path = os.path.join(os.path.dirname(__file__), "staleprompt.md") |
42 | 42 | with open(stale_prompt_path) as f: |
43 | 43 | stale_prompt = f.read() |
| 44 | + final_text = "" |
44 | 45 | async for event in agent.astream_events({"messages": stale_prompt + " Find one issue from Azure-samples azure-search-openai-demo that is potentially closeable."}, version="v2"): |
45 | | - print(event) |
| 46 | + kind = event["event"] |
| 47 | + if kind == "on_chat_model_stream": |
| 48 | + # The event corresponding to a stream of new content (tokens or chunks of text) |
| 49 | + if chunk := event.get("data", {}).get("chunk"): |
| 50 | + final_text += chunk.content # Append the new content to the accumulated text |
| 51 | + |
| 52 | + elif kind == "on_tool_start": |
| 53 | + # The event signals that a tool is about to be called |
| 54 | + rich.print("Called ", event["name"]) # Show which tool is being called |
| 55 | + rich.print("Tool input: ") |
| 56 | + rich.print(event["data"].get("input")) # Display the input data sent to the tool |
| 57 | + |
| 58 | + elif kind == "on_tool_end": |
| 59 | + if output := event["data"].get("output"): |
| 60 | + # The event signals that a tool has finished executing |
| 61 | + rich.print("Tool output: ") |
| 62 | + rich.print(output.content) |
| 63 | + |
| 64 | + rich.print("Final response:") |
| 65 | + rich.print(final_text) |
46 | 66 |
|
47 | 67 |
|
48 | 68 | if __name__ == "__main__": |
|
0 commit comments