diff --git a/docs/index.md b/docs/index.md index ac9f46c..3061561 100644 --- a/docs/index.md +++ b/docs/index.md @@ -24,6 +24,10 @@ uv run python -m template_langgraph.tasks.search_documents_on_elasticsearch # Run Kabuto Helpdesk Agent uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。" uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOのマニュアルから禅モードに関する情報を教えて下さい" + +# BasicWorkflowAgent +uv run python -m template_langgraph.tasks.draw_basic_workflow_agent_mermaid_png "data/basic_workflow_agent.png" +uv run python -m template_langgraph.tasks.run_basic_workflow_agent "KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。" ``` ## References diff --git a/template_langgraph/agents/basic_workflow_agent/__init__.py b/template_langgraph/agents/basic_workflow_agent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/template_langgraph/agents/basic_workflow_agent/agent.py b/template_langgraph/agents/basic_workflow_agent/agent.py new file mode 100644 index 0000000..79a4f21 --- /dev/null +++ b/template_langgraph/agents/basic_workflow_agent/agent.py @@ -0,0 +1,82 @@ +from langchain_core.messages import AIMessage +from langgraph.graph import END, START, StateGraph + +from template_langgraph.agents.basic_workflow_agent.models import AgentInput, AgentOutput, AgentState +from template_langgraph.llms.azure_openais import AzureOpenAiWrapper +from template_langgraph.loggers import get_logger + +logger = get_logger(__name__) + + +class BasicWorkflowAgent: + def __init__(self): + self.llm = AzureOpenAiWrapper().chat_model + + def create_graph(self): + """Create the main graph for the agent.""" + # Create the workflow state graph + workflow = StateGraph(AgentState) + + # Create nodes + workflow.add_node("initialize", self.initialize) + workflow.add_node("do_something", self.do_something) + workflow.add_node("finalize", self.finalize) + + # Create edges + workflow.add_edge(START, "initialize") + workflow.add_edge("initialize", "do_something") + workflow.add_edge("do_something", "finalize") + workflow.add_edge("finalize", END) + + # Compile the graph + return workflow.compile() + + def initialize(self, state: AgentState) -> AgentState: + """Initialize the agent with the given state.""" + logger.info(f"Initializing BasicWorkflowAgent with state: {state}") + # Here you can add any initialization logic if needed + return state + + def do_something(self, state: AgentState) -> AgentState: + """Perform some action with the given state.""" + logger.info(f"Doing something with state: {state}") + + # Here you can add the logic for the action + response: AIMessage = self.llm.invoke( + input=state["messages"], + ) + logger.info(f"Response after doing something: {response}") + state["messages"].append( + { + "role": "assistant", + "content": response.content, + } + ) + + return state + + def finalize(self, state: AgentState) -> AgentState: + """Finalize the agent's work and prepare the output.""" + logger.info(f"Finalizing BasicWorkflowAgent with state: {state}") + # Here you can add any finalization logic if needed + return state + + def run_agent(self, input: AgentInput) -> AgentOutput: + """Run the agent with the given input.""" + logger.info(f"Running BasicWorkflowAgent with question: {input.model_dump_json(indent=2)}") + app = self.create_graph() + initial_state: AgentState = { + "messages": [ + { + "role": "user", + "content": input.request, + } + ], + } + final_state = app.invoke(initial_state) + logger.info(f"Final state after running agent: {final_state}") + return AgentOutput(response=final_state["messages"][-1].content) + + def draw_mermaid_png(self) -> bytes: + """Draw the graph in Mermaid format.""" + return self.create_graph().get_graph().draw_mermaid_png() diff --git a/template_langgraph/agents/basic_workflow_agent/models.py b/template_langgraph/agents/basic_workflow_agent/models.py new file mode 100644 index 0000000..e5d4132 --- /dev/null +++ b/template_langgraph/agents/basic_workflow_agent/models.py @@ -0,0 +1,16 @@ +from typing import Annotated, TypedDict + +from langgraph.graph.message import add_messages +from pydantic import BaseModel, Field + + +class AgentInput(BaseModel): + request: str = Field(..., description="ユーザーからのリクエスト") + + +class AgentOutput(BaseModel): + response: str = Field(..., description="エージェントの応答") + + +class AgentState(TypedDict): + messages: Annotated[list, add_messages] diff --git a/template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py b/template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py new file mode 100644 index 0000000..0bb2fc0 --- /dev/null +++ b/template_langgraph/tasks/draw_basic_workflow_agent_mermaid_png.py @@ -0,0 +1,11 @@ +import sys + +from template_langgraph.agents.basic_workflow_agent.agent import BasicWorkflowAgent + +if __name__ == "__main__": + png_path = "data/basic_workflow_agent.png" + if len(sys.argv) > 1: + png_path = sys.argv[1] + + with open(png_path, "wb") as f: + f.write(BasicWorkflowAgent().draw_mermaid_png()) diff --git a/template_langgraph/tasks/run_basic_workflow_agent.py b/template_langgraph/tasks/run_basic_workflow_agent.py new file mode 100644 index 0000000..e67e091 --- /dev/null +++ b/template_langgraph/tasks/run_basic_workflow_agent.py @@ -0,0 +1,27 @@ +import logging +import sys + +from template_langgraph.agents.basic_workflow_agent.agent import AgentInput, BasicWorkflowAgent +from template_langgraph.loggers import get_logger + +logger = get_logger(__name__) +logger.setLevel(logging.INFO) + +if __name__ == "__main__": + question = "「鬼灯」を実行すると、KABUTOが急に停止します。原因と対策を教えてください。" + if len(sys.argv) > 1: + # sys.argv[1] が最初の引数 + question = sys.argv[1] + + # Agentのインスタンス化 + agent = BasicWorkflowAgent() + + # AgentInputの作成 + agent_input = AgentInput( + request=question, + ) + + # エージェントの実行 + logger.info(f"Running BasicWorkflowAgent with input: {agent_input.model_dump_json(indent=2)}") + agent_output = agent.run_agent(input=agent_input) + logger.info(f"Agent output: {agent_output.model_dump_json(indent=2)}")