Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -114,4 +114,4 @@ ci-test-docs: install-deps-docs docs ## run CI test for documentation

.PHONY: langgraph-studio
langgraph-studio: ## run LangGraph Studio
uv run langgraph dev --no-reload
uv run langgraph dev
7 changes: 4 additions & 3 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ uv run python -m template_langgraph.tasks.search_documents_on_elasticsearch
uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。"
uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOのマニュアルから禅モードに関する情報を教えて下さい"

# BasicWorkflowAgent
uv run python -m template_langgraph.tasks.draw_basic_workflow_agent_mermaid_png "data/basic_workflow_agent.png"
uv run python -m template_langgraph.tasks.run_basic_workflow_agent
# ChatWithToolsAgent
uv run python -m template_langgraph.tasks.draw_chat_with_tools_agent_mermaid_png "data/chat_with_tools_agent.png"
uv run python -m template_langgraph.tasks.run_chat_with_tools_agent
# KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。KABUTO のマニュアルから、関連する情報を取得したり過去のシステムのトラブルシュート事例が蓄積されたデータベースから、関連する情報を取得して質問に答えてください
# 天狗のいたずら という現象について KABUTO のマニュアルから、関連する情報を取得したり過去のシステムのトラブルシュート事例が蓄積されたデータベースから、関連する情報を取得して質問に答えてください

Expand All @@ -42,6 +42,7 @@ uv run python -m template_langgraph.tasks.run_issue_formatter_agent
### LangGraph

- [Build a custom workflow](https://langchain-ai.github.io/langgraph/concepts/why-langgraph/)
- [LangGraphの(LLMなし)Human-in-the-loopを試してみた](https://qiita.com/te_yama/items/db38201af60dec76384d)

### Sample Codes

Expand Down
5 changes: 3 additions & 2 deletions langgraph.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
"."
],
"graphs": {
"basic_workflow_agent": "template_langgraph.agents.basic_workflow_agent.agent:graph",
"chat_with_tools_agent": "template_langgraph.agents.chat_with_tools_agent.agent:graph",
"kabuto_helpdesk_agent": "template_langgraph.agents.kabuto_helpdesk_agent:graph",
"issue_formatter_agent": "template_langgraph.agents.issue_formatter_agent.agent:graph"
"issue_formatter_agent": "template_langgraph.agents.issue_formatter_agent.agent:graph",
"task_decomposer_agent": "template_langgraph.agents.task_decomposer_agent.agent:graph"
},
"env": ".env"
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from langchain_core.messages import ToolMessage
from langgraph.graph import END, StateGraph

from template_langgraph.agents.basic_workflow_agent.models import AgentState
from template_langgraph.agents.chat_with_tools_agent.models import AgentState
from template_langgraph.llms.azure_openais import AzureOpenAiWrapper
from template_langgraph.loggers import get_logger
from template_langgraph.tools.elasticsearch_tool import search_elasticsearch
Expand Down Expand Up @@ -36,7 +36,7 @@ def __call__(self, inputs: dict):
return {"messages": outputs}


class BasicWorkflowAgent:
class ChatWithToolsAgent:
def __init__(self):
self.llm = AzureOpenAiWrapper().chat_model

Expand Down Expand Up @@ -80,7 +80,7 @@ def create_graph(self):

def initialize(self, state: AgentState) -> AgentState:
"""Initialize the agent with the given state."""
logger.info(f"Initializing BasicWorkflowAgent with state: {state}")
logger.info(f"Initializing ChatWithToolsAgent with state: {state}")
# Here you can add any initialization logic if needed
return state

Expand Down Expand Up @@ -119,7 +119,7 @@ def route_tools(

def finalize(self, state: AgentState) -> AgentState:
"""Finalize the agent's work and prepare the output."""
logger.info(f"Finalizing BasicWorkflowAgent with state: {state}")
logger.info(f"Finalizing ChatWithToolsAgent with state: {state}")
# Here you can add any finalization logic if needed
return state

Expand All @@ -128,4 +128,4 @@ def draw_mermaid_png(self) -> bytes:
return self.create_graph().get_graph().draw_mermaid_png()


graph = BasicWorkflowAgent().create_graph()
graph = ChatWithToolsAgent().create_graph()
Empty file.
80 changes: 80 additions & 0 deletions template_langgraph/agents/task_decomposer_agent/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
from langgraph.graph import END, StateGraph
from langgraph.types import interrupt

from template_langgraph.agents.task_decomposer_agent.models import AgentState, TaskList
from template_langgraph.llms.azure_openais import AzureOpenAiWrapper
from template_langgraph.loggers import get_logger

logger = get_logger(__name__)


class TaskDecomposerAgent:
def __init__(self):
self.llm = AzureOpenAiWrapper().chat_model

def create_graph(self):
"""Create the main graph for the agent."""
# Create the workflow state graph
workflow = StateGraph(AgentState)

# Create nodes
workflow.add_node("chat", self.chat)
workflow.add_node("human_feedback", self.human_feedback)

# Create edges
workflow.set_entry_point("chat")
workflow.add_edge("chat", "human_feedback")
workflow.add_conditional_edges(
source="human_feedback",
path=self.route_human_feedback,
path_map={
"loopback": "chat",
"end": END,
},
)
return workflow.compile()

def chat(self, state: AgentState) -> AgentState:
"""Chat with tools using the state."""
logger.info(f"Chatting with tools using state: {state}")

task_list = self.llm.with_structured_output(TaskList).invoke(
input=state["messages"],
)
state["task_list"] = task_list
logger.info(f"Decomposed tasks: {task_list}")
return state

def human_feedback(self, state: AgentState) -> AgentState:
"""Handle human feedback."""
logger.info(f"Handling human feedback with state: {state}")
feedback = interrupt("Type your feedback. If you want to end the conversation, type 'end'.")
state["messages"].append(
{
"content": feedback,
"role": "user",
}
)
return state

def route_human_feedback(
self,
state: AgentState,
):
"""
Use in the conditional_edge to route to the HumanFeedbackNode if the last message
has human feedback. Otherwise, route to the end.
"""
human_feedback = state["messages"][-1].content.strip().lower()
if human_feedback == "end":
logger.info("Ending the conversation as per user request.")
return "end"
logger.info("Looping back to chat for further processing.")
return "loopback"

def draw_mermaid_png(self) -> bytes:
"""Draw the graph in Mermaid format."""
return self.create_graph().get_graph().draw_mermaid_png()


graph = TaskDecomposerAgent().create_graph()
36 changes: 36 additions & 0 deletions template_langgraph/agents/task_decomposer_agent/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from collections.abc import Sequence
from typing import (
Annotated,
TypedDict,
)

from langchain_core.messages import (
BaseMessage,
)
from langgraph.graph.message import add_messages
from pydantic import BaseModel, Field


class Task(BaseModel):
title: str = Field(..., description="Title of the task")
description: str = Field(..., description="Description of the task")
priority: int = Field(..., description="Priority of the task (1-5)")
due_date: str | None = Field(None, description="Due date of the task (YYYY-MM-DD format)")
assigned_to: str | None = Field(None, description="Name of the agent assigned to the task")


class TaskList(BaseModel):
tasks: Sequence[Task] = Field(..., description="List of tasks to be decomposed")


class AgentInput(BaseModel):
request: str = Field(..., description="Request from the user")


class AgentOutput(BaseModel):
response: str = Field(..., description="Response from the agent")


class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
task_list: TaskList
11 changes: 0 additions & 11 deletions template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py

This file was deleted.

11 changes: 11 additions & 0 deletions template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import sys

from template_langgraph.agents.chat_with_tools_agent.agent import ChatWithToolsAgent

if __name__ == "__main__":
png_path = "data/chat_with_tools_agent.png"
if len(sys.argv) > 1:
png_path = sys.argv[1]

with open(png_path, "wb") as f:
f.write(ChatWithToolsAgent().draw_mermaid_png())
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging

from template_langgraph.agents.basic_workflow_agent.agent import AgentState
from template_langgraph.agents.basic_workflow_agent.agent import graph as basic_workflow_agent_graph
from template_langgraph.agents.chat_with_tools_agent.agent import AgentState
from template_langgraph.agents.chat_with_tools_agent.agent import graph as chat_with_tools_agent_graph
from template_langgraph.loggers import get_logger

logger = get_logger(__name__)
Expand All @@ -11,7 +11,7 @@
def stream_graph_updates(
state: AgentState,
) -> dict:
for event in basic_workflow_agent_graph.stream(input=state):
for event in chat_with_tools_agent_graph.stream(input=state):
logger.info("-" * 20)
logger.info(f"Event: {event}")
return event
Expand Down