From 9e38c5cd5b0d5136e1549ea2b1ce2d43f094590b Mon Sep 17 00:00:00 2001 From: ks6088ts Date: Sun, 3 Aug 2025 05:49:49 +0900 Subject: [PATCH 1/3] rename from basic workflow to chat with tools --- docs/index.md | 6 +++--- langgraph.json | 2 +- .../__init__.py | 0 .../agent.py | 10 +++++----- .../models.py | 0 .../tasks/draw_basic_workflow_agent_mermaid_png.py | 11 ----------- .../tasks/draw_chat_with_tools_agent_mermaid_png.py | 11 +++++++++++ ...workflow_agent.py => run_chat_with_tools_agent.py} | 6 +++--- 8 files changed, 23 insertions(+), 23 deletions(-) rename template_langgraph/agents/{basic_workflow_agent => chat_with_tools_agent}/__init__.py (100%) rename template_langgraph/agents/{basic_workflow_agent => chat_with_tools_agent}/agent.py (94%) rename template_langgraph/agents/{basic_workflow_agent => chat_with_tools_agent}/models.py (100%) delete mode 100644 template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py create mode 100644 template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py rename template_langgraph/tasks/{run_basic_workflow_agent.py => run_chat_with_tools_agent.py} (73%) diff --git a/docs/index.md b/docs/index.md index d361604..bf33f8d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -25,9 +25,9 @@ uv run python -m template_langgraph.tasks.search_documents_on_elasticsearch uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。" uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOのマニュアルから禅モードに関する情報を教えて下さい" -# BasicWorkflowAgent -uv run python -m template_langgraph.tasks.draw_basic_workflow_agent_mermaid_png "data/basic_workflow_agent.png" -uv run python -m template_langgraph.tasks.run_basic_workflow_agent +# ChatWithToolsAgent +uv run python -m template_langgraph.tasks.draw_chat_with_tools_agent_mermaid_png "data/chat_with_tools_agent.png" +uv run python -m template_langgraph.tasks.run_chat_with_tools_agent # KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。KABUTO のマニュアルから、関連する情報を取得したり過去のシステムのトラブルシュート事例が蓄積されたデータベースから、関連する情報を取得して質問に答えてください # 天狗のいたずら という現象について KABUTO のマニュアルから、関連する情報を取得したり過去のシステムのトラブルシュート事例が蓄積されたデータベースから、関連する情報を取得して質問に答えてください diff --git a/langgraph.json b/langgraph.json index 3721d5c..487ad14 100644 --- a/langgraph.json +++ b/langgraph.json @@ -3,7 +3,7 @@ "." ], "graphs": { - "basic_workflow_agent": "template_langgraph.agents.basic_workflow_agent.agent:graph", + "chat_with_tools_agent": "template_langgraph.agents.chat_with_tools_agent.agent:graph", "kabuto_helpdesk_agent": "template_langgraph.agents.kabuto_helpdesk_agent:graph", "issue_formatter_agent": "template_langgraph.agents.issue_formatter_agent.agent:graph" }, diff --git a/template_langgraph/agents/basic_workflow_agent/__init__.py b/template_langgraph/agents/chat_with_tools_agent/__init__.py similarity index 100% rename from template_langgraph/agents/basic_workflow_agent/__init__.py rename to template_langgraph/agents/chat_with_tools_agent/__init__.py diff --git a/template_langgraph/agents/basic_workflow_agent/agent.py b/template_langgraph/agents/chat_with_tools_agent/agent.py similarity index 94% rename from template_langgraph/agents/basic_workflow_agent/agent.py rename to template_langgraph/agents/chat_with_tools_agent/agent.py index b55777c..c11fa0c 100644 --- a/template_langgraph/agents/basic_workflow_agent/agent.py +++ b/template_langgraph/agents/chat_with_tools_agent/agent.py @@ -3,7 +3,7 @@ from langchain_core.messages import ToolMessage from langgraph.graph import END, StateGraph -from template_langgraph.agents.basic_workflow_agent.models import AgentState +from template_langgraph.agents.chat_with_tools_agent.models import AgentState from template_langgraph.llms.azure_openais import AzureOpenAiWrapper from template_langgraph.loggers import get_logger from template_langgraph.tools.elasticsearch_tool import search_elasticsearch @@ -36,7 +36,7 @@ def __call__(self, inputs: dict): return {"messages": outputs} -class BasicWorkflowAgent: +class ChatWithToolsAgent: def __init__(self): self.llm = AzureOpenAiWrapper().chat_model @@ -80,7 +80,7 @@ def create_graph(self): def initialize(self, state: AgentState) -> AgentState: """Initialize the agent with the given state.""" - logger.info(f"Initializing BasicWorkflowAgent with state: {state}") + logger.info(f"Initializing ChatWithToolsAgent with state: {state}") # Here you can add any initialization logic if needed return state @@ -119,7 +119,7 @@ def route_tools( def finalize(self, state: AgentState) -> AgentState: """Finalize the agent's work and prepare the output.""" - logger.info(f"Finalizing BasicWorkflowAgent with state: {state}") + logger.info(f"Finalizing ChatWithToolsAgent with state: {state}") # Here you can add any finalization logic if needed return state @@ -128,4 +128,4 @@ def draw_mermaid_png(self) -> bytes: return self.create_graph().get_graph().draw_mermaid_png() -graph = BasicWorkflowAgent().create_graph() +graph = ChatWithToolsAgent().create_graph() diff --git a/template_langgraph/agents/basic_workflow_agent/models.py b/template_langgraph/agents/chat_with_tools_agent/models.py similarity index 100% rename from template_langgraph/agents/basic_workflow_agent/models.py rename to template_langgraph/agents/chat_with_tools_agent/models.py diff --git a/template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py b/template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py deleted file mode 100644 index 0bb2fc0..0000000 --- a/template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py +++ /dev/null @@ -1,11 +0,0 @@ -import sys - -from template_langgraph.agents.basic_workflow_agent.agent import BasicWorkflowAgent - -if __name__ == "__main__": - png_path = "data/basic_workflow_agent.png" - if len(sys.argv) > 1: - png_path = sys.argv[1] - - with open(png_path, "wb") as f: - f.write(BasicWorkflowAgent().draw_mermaid_png()) diff --git a/template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py b/template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py new file mode 100644 index 0000000..9590a7e --- /dev/null +++ b/template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py @@ -0,0 +1,11 @@ +import sys + +from template_langgraph.agents.chat_with_tools_agent.agent import ChatWithToolsAgent + +if __name__ == "__main__": + png_path = "data/chat_with_tools_agent.png" + if len(sys.argv) > 1: + png_path = sys.argv[1] + + with open(png_path, "wb") as f: + f.write(ChatWithToolsAgent().draw_mermaid_png()) diff --git a/template_langgraph/tasks/run_basic_workflow_agent.py b/template_langgraph/tasks/run_chat_with_tools_agent.py similarity index 73% rename from template_langgraph/tasks/run_basic_workflow_agent.py rename to template_langgraph/tasks/run_chat_with_tools_agent.py index b59e760..0c39c28 100644 --- a/template_langgraph/tasks/run_basic_workflow_agent.py +++ b/template_langgraph/tasks/run_chat_with_tools_agent.py @@ -1,7 +1,7 @@ import logging -from template_langgraph.agents.basic_workflow_agent.agent import AgentState -from template_langgraph.agents.basic_workflow_agent.agent import graph as basic_workflow_agent_graph +from template_langgraph.agents.chat_with_tools_agent.agent import AgentState +from template_langgraph.agents.chat_with_tools_agent.agent import graph as chat_with_tools_agent_graph from template_langgraph.loggers import get_logger logger = get_logger(__name__) @@ -11,7 +11,7 @@ def stream_graph_updates( state: AgentState, ) -> dict: - for event in basic_workflow_agent_graph.stream(input=state): + for event in chat_with_tools_agent_graph.stream(input=state): logger.info("-" * 20) logger.info(f"Event: {event}") return event From 441b2f4e0a129c4b32c2a61c35f413f33b4765d0 Mon Sep 17 00:00:00 2001 From: ks6088ts Date: Sun, 3 Aug 2025 06:03:39 +0900 Subject: [PATCH 2/3] add chatbot skeleton code --- langgraph.json | 3 +- .../agents/task_decomposer_agent/__init__.py | 0 .../agents/task_decomposer_agent/agent.py | 43 +++++++++++++++++++ .../agents/task_decomposer_agent/models.py | 32 ++++++++++++++ 4 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 template_langgraph/agents/task_decomposer_agent/__init__.py create mode 100644 template_langgraph/agents/task_decomposer_agent/agent.py create mode 100644 template_langgraph/agents/task_decomposer_agent/models.py diff --git a/langgraph.json b/langgraph.json index 487ad14..f1a5bad 100644 --- a/langgraph.json +++ b/langgraph.json @@ -5,7 +5,8 @@ "graphs": { "chat_with_tools_agent": "template_langgraph.agents.chat_with_tools_agent.agent:graph", "kabuto_helpdesk_agent": "template_langgraph.agents.kabuto_helpdesk_agent:graph", - "issue_formatter_agent": "template_langgraph.agents.issue_formatter_agent.agent:graph" + "issue_formatter_agent": "template_langgraph.agents.issue_formatter_agent.agent:graph", + "task_decomposer_agent": "template_langgraph.agents.task_decomposer_agent.agent:graph" }, "env": ".env" } diff --git a/template_langgraph/agents/task_decomposer_agent/__init__.py b/template_langgraph/agents/task_decomposer_agent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/template_langgraph/agents/task_decomposer_agent/agent.py b/template_langgraph/agents/task_decomposer_agent/agent.py new file mode 100644 index 0000000..8031526 --- /dev/null +++ b/template_langgraph/agents/task_decomposer_agent/agent.py @@ -0,0 +1,43 @@ +from langgraph.graph import END, StateGraph + +from template_langgraph.agents.chat_with_tools_agent.models import AgentState +from template_langgraph.llms.azure_openais import AzureOpenAiWrapper +from template_langgraph.loggers import get_logger + +logger = get_logger(__name__) + + +class TaskDecomposerAgent: + def __init__(self): + self.llm = AzureOpenAiWrapper().chat_model + + def create_graph(self): + """Create the main graph for the agent.""" + # Create the workflow state graph + workflow = StateGraph(AgentState) + + # Create nodes + workflow.add_node("chat", self.chat) + + # Create edges + workflow.set_entry_point("chat") + workflow.add_edge("chat", END) + + # Compile the graph + return workflow.compile() + + def chat(self, state: AgentState) -> AgentState: + """Chat with tools using the state.""" + logger.info(f"Chatting with tools using state: {state}") + return { + "messages": [ + self.llm.invoke(state["messages"]), + ] + } + + def draw_mermaid_png(self) -> bytes: + """Draw the graph in Mermaid format.""" + return self.create_graph().get_graph().draw_mermaid_png() + + +graph = TaskDecomposerAgent().create_graph() diff --git a/template_langgraph/agents/task_decomposer_agent/models.py b/template_langgraph/agents/task_decomposer_agent/models.py new file mode 100644 index 0000000..4319041 --- /dev/null +++ b/template_langgraph/agents/task_decomposer_agent/models.py @@ -0,0 +1,32 @@ +from collections.abc import Sequence +from typing import ( + Annotated, + TypedDict, +) + +from langchain_core.messages import ( + BaseMessage, +) +from langgraph.graph.message import add_messages +from pydantic import BaseModel, Field + + +class Task(BaseModel): + title: str = Field(..., description="Title of the task") + description: str = Field(..., description="Description of the task") + priority: int = Field(..., description="Priority of the task (1-5)") + due_date: str | None = Field(None, description="Due date of the task (YYYY-MM-DD format)") + assigned_to: str | None = Field(None, description="Name of the agent assigned to the task") + + +class AgentInput(BaseModel): + request: str = Field(..., description="Request from the user") + + +class AgentOutput(BaseModel): + response: str = Field(..., description="Response from the agent") + + +class AgentState(TypedDict): + messages: Annotated[Sequence[BaseMessage], add_messages] + decomposed_tasks: Sequence[Task] From 3bc235e7165b9475a621e682d77dd2cb09719c55 Mon Sep 17 00:00:00 2001 From: ks6088ts Date: Sun, 3 Aug 2025 06:21:12 +0900 Subject: [PATCH 3/3] implement task decomposer agent --- Makefile | 2 +- docs/index.md | 1 + .../agents/task_decomposer_agent/agent.py | 55 ++++++++++++++++--- .../agents/task_decomposer_agent/models.py | 6 +- 4 files changed, 53 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index e118fdb..6a3e94f 100644 --- a/Makefile +++ b/Makefile @@ -114,4 +114,4 @@ ci-test-docs: install-deps-docs docs ## run CI test for documentation .PHONY: langgraph-studio langgraph-studio: ## run LangGraph Studio - uv run langgraph dev --no-reload + uv run langgraph dev diff --git a/docs/index.md b/docs/index.md index bf33f8d..6104ee3 100644 --- a/docs/index.md +++ b/docs/index.md @@ -42,6 +42,7 @@ uv run python -m template_langgraph.tasks.run_issue_formatter_agent ### LangGraph - [Build a custom workflow](https://langchain-ai.github.io/langgraph/concepts/why-langgraph/) +- [LangGraphの(LLMなし)Human-in-the-loopを試してみた](https://qiita.com/te_yama/items/db38201af60dec76384d) ### Sample Codes diff --git a/template_langgraph/agents/task_decomposer_agent/agent.py b/template_langgraph/agents/task_decomposer_agent/agent.py index 8031526..942d842 100644 --- a/template_langgraph/agents/task_decomposer_agent/agent.py +++ b/template_langgraph/agents/task_decomposer_agent/agent.py @@ -1,6 +1,7 @@ from langgraph.graph import END, StateGraph +from langgraph.types import interrupt -from template_langgraph.agents.chat_with_tools_agent.models import AgentState +from template_langgraph.agents.task_decomposer_agent.models import AgentState, TaskList from template_langgraph.llms.azure_openais import AzureOpenAiWrapper from template_langgraph.loggers import get_logger @@ -18,22 +19,58 @@ def create_graph(self): # Create nodes workflow.add_node("chat", self.chat) + workflow.add_node("human_feedback", self.human_feedback) # Create edges workflow.set_entry_point("chat") - workflow.add_edge("chat", END) - - # Compile the graph + workflow.add_edge("chat", "human_feedback") + workflow.add_conditional_edges( + source="human_feedback", + path=self.route_human_feedback, + path_map={ + "loopback": "chat", + "end": END, + }, + ) return workflow.compile() def chat(self, state: AgentState) -> AgentState: """Chat with tools using the state.""" logger.info(f"Chatting with tools using state: {state}") - return { - "messages": [ - self.llm.invoke(state["messages"]), - ] - } + + task_list = self.llm.with_structured_output(TaskList).invoke( + input=state["messages"], + ) + state["task_list"] = task_list + logger.info(f"Decomposed tasks: {task_list}") + return state + + def human_feedback(self, state: AgentState) -> AgentState: + """Handle human feedback.""" + logger.info(f"Handling human feedback with state: {state}") + feedback = interrupt("Type your feedback. If you want to end the conversation, type 'end'.") + state["messages"].append( + { + "content": feedback, + "role": "user", + } + ) + return state + + def route_human_feedback( + self, + state: AgentState, + ): + """ + Use in the conditional_edge to route to the HumanFeedbackNode if the last message + has human feedback. Otherwise, route to the end. + """ + human_feedback = state["messages"][-1].content.strip().lower() + if human_feedback == "end": + logger.info("Ending the conversation as per user request.") + return "end" + logger.info("Looping back to chat for further processing.") + return "loopback" def draw_mermaid_png(self) -> bytes: """Draw the graph in Mermaid format.""" diff --git a/template_langgraph/agents/task_decomposer_agent/models.py b/template_langgraph/agents/task_decomposer_agent/models.py index 4319041..72972f2 100644 --- a/template_langgraph/agents/task_decomposer_agent/models.py +++ b/template_langgraph/agents/task_decomposer_agent/models.py @@ -19,6 +19,10 @@ class Task(BaseModel): assigned_to: str | None = Field(None, description="Name of the agent assigned to the task") +class TaskList(BaseModel): + tasks: Sequence[Task] = Field(..., description="List of tasks to be decomposed") + + class AgentInput(BaseModel): request: str = Field(..., description="Request from the user") @@ -29,4 +33,4 @@ class AgentOutput(BaseModel): class AgentState(TypedDict): messages: Annotated[Sequence[BaseMessage], add_messages] - decomposed_tasks: Sequence[Task] + task_list: TaskList