Skip to content

Commit 4cf9e8f

Browse files
Copilotks6088ts
andcommitted
Implement LangGraph-based Agent API with modular architecture
Co-authored-by: ks6088ts <[email protected]>
1 parent 18ed17c commit 4cf9e8f

File tree

15 files changed

+938
-0
lines changed

15 files changed

+938
-0
lines changed

docs/index.md

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,3 +218,55 @@ python scripts/agents.py chat <agent_id> "機械学習の最新トレンドに
218218
# エージェントを削除
219219
python scripts/agents.py delete-agent <agent_id>
220220
```
221+
222+
### LangGraph Agent
223+
224+
LangGraphベースのエージェントAPIを使用した対話型AIアシスタント。ツール呼び出し機能を持つシンプルなエージェントワークフローを実装しています。
225+
226+
#### CLI 実行例
227+
228+
```bash
229+
# ヘルプ
230+
python scripts/langgraph_agent.py --help
231+
232+
# エージェントとチャット
233+
python scripts/langgraph_agent.py chat "こんにちは!今何時ですか?"
234+
235+
# スレッドIDを指定してチャット(会話の継続)
236+
python scripts/langgraph_agent.py chat "前回の続きを教えてください" --thread-id "12345-67890-abcdef"
237+
238+
# 詳細情報付きでチャット
239+
python scripts/langgraph_agent.py chat "2 + 2 × 3 を計算してください" --verbose
240+
241+
# 対話モード
242+
python scripts/langgraph_agent.py interactive
243+
244+
# 利用可能なツール一覧
245+
python scripts/langgraph_agent.py tools
246+
247+
# デモモード(サンプル質問のテスト)
248+
python scripts/langgraph_agent.py demo
249+
```
250+
251+
#### API エンドポイント
252+
253+
```bash
254+
# FastAPIサーバーを起動
255+
make dev
256+
257+
# LangGraphエージェントとチャット
258+
curl -X POST "http://localhost:8000/agents/langgraph/chat" \
259+
-H "Content-Type: application/json" \
260+
-d '{"message": "こんにちは!"}'
261+
262+
# ストリーミングチャット
263+
curl -X POST "http://localhost:8000/agents/langgraph/chat/stream" \
264+
-H "Content-Type: application/json" \
265+
-d '{"message": "長い回答をお願いします"}'
266+
267+
# 利用可能なツール一覧
268+
curl -X GET "http://localhost:8000/agents/langgraph/tools"
269+
270+
# ヘルスチェック
271+
curl -X GET "http://localhost:8000/agents/langgraph/health"
272+
```

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ dependencies = [
1515
"fastapi[standard]>=0.115.12",
1616
"langchain-community>=0.3.27",
1717
"langchain-openai>=0.3.27",
18+
"langgraph>=0.2.90",
1819
"msgraph-sdk>=1.37.0",
1920
"opentelemetry-instrumentation-fastapi>=0.52b1",
2021
"pydantic-settings>=2.10.1",

scripts/langgraph_agent.py

Lines changed: 221 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,221 @@
1+
#!/usr/bin/env python
2+
# filepath: /home/runner/work/template-fastapi/template-fastapi/scripts/langgraph_agent.py
3+
4+
"""LangGraph Agent CLI tool."""
5+
6+
import json
7+
from typing import Optional
8+
9+
import typer
10+
from rich.console import Console
11+
from rich.markdown import Markdown
12+
from rich.panel import Panel
13+
14+
from template_fastapi.core.langgraph.agent import LangGraphAgent
15+
from template_fastapi.core.langgraph.tools import get_tools
16+
17+
app = typer.Typer()
18+
console = Console()
19+
20+
21+
@app.command()
22+
def chat(
23+
message: str = typer.Argument(..., help="メッセージ"),
24+
thread_id: Optional[str] = typer.Option(None, "--thread-id", "-t", help="スレッドID(会話の継続用)"),
25+
verbose: bool = typer.Option(False, "--verbose", "-v", help="詳細な情報を表示"),
26+
):
27+
"""LangGraphエージェントとチャットする"""
28+
console.print("[bold green]LangGraphエージェントとチャットします[/bold green]")
29+
console.print(f"メッセージ: {message}")
30+
31+
if thread_id:
32+
console.print(f"スレッドID: {thread_id}")
33+
else:
34+
console.print("新しいスレッドを作成します")
35+
36+
try:
37+
# Initialize the LangGraph agent
38+
agent = LangGraphAgent()
39+
40+
# Show loading message
41+
with console.status("[bold green]エージェントが応答を生成中...", spinner="dots"):
42+
result = agent.chat(message=message, thread_id=thread_id)
43+
44+
# Display results
45+
console.print("\n" + "="*50)
46+
console.print("[bold blue]チャット結果[/bold blue]")
47+
console.print("="*50)
48+
49+
# Display user message
50+
user_panel = Panel(
51+
message,
52+
title="[bold cyan]あなた[/bold cyan]",
53+
border_style="cyan"
54+
)
55+
console.print(user_panel)
56+
57+
# Display agent response with markdown rendering
58+
response_content = result["response"]
59+
if response_content:
60+
try:
61+
# Try to render as markdown for better formatting
62+
markdown = Markdown(response_content)
63+
agent_panel = Panel(
64+
markdown,
65+
title="[bold green]LangGraphエージェント[/bold green]",
66+
border_style="green"
67+
)
68+
except Exception:
69+
# Fallback to plain text
70+
agent_panel = Panel(
71+
response_content,
72+
title="[bold green]LangGraphエージェント[/bold green]",
73+
border_style="green"
74+
)
75+
console.print(agent_panel)
76+
77+
# Display metadata
78+
if verbose:
79+
console.print("\n[bold yellow]メタデータ[/bold yellow]:")
80+
console.print(f"スレッドID: {result['thread_id']}")
81+
console.print(f"作成日時: {result['created_at']}")
82+
console.print(f"ステップ数: {result.get('step_count', 0)}")
83+
84+
if result.get("tools_used"):
85+
console.print(f"使用ツール: {', '.join(result['tools_used'])}")
86+
else:
87+
console.print("使用ツール: なし")
88+
else:
89+
console.print(f"\n[dim]スレッドID: {result['thread_id']}[/dim]")
90+
if result.get("tools_used"):
91+
console.print(f"[dim]使用ツール: {', '.join(result['tools_used'])}[/dim]")
92+
93+
except Exception as e:
94+
console.print(f"❌ [bold red]エラー[/bold red]: {str(e)}")
95+
96+
97+
@app.command()
98+
def interactive():
99+
"""対話モードでLangGraphエージェントとチャットする"""
100+
console.print("[bold green]LangGraphエージェント対話モード[/bold green]")
101+
console.print("終了するには 'exit', 'quit', または 'bye' と入力してください\n")
102+
103+
agent = LangGraphAgent()
104+
thread_id = None
105+
106+
while True:
107+
try:
108+
# Get user input
109+
user_input = typer.prompt("あなた")
110+
111+
# Check for exit commands
112+
if user_input.lower() in ['exit', 'quit', 'bye', '終了']:
113+
console.print("[yellow]対話を終了します。ありがとうございました![/yellow]")
114+
break
115+
116+
# Process the message
117+
with console.status("[bold green]応答を生成中...", spinner="dots"):
118+
result = agent.chat(message=user_input, thread_id=thread_id)
119+
120+
# Update thread_id for conversation continuity
121+
thread_id = result["thread_id"]
122+
123+
# Display agent response
124+
response_panel = Panel(
125+
Markdown(result["response"]) if result["response"] else "応答がありません",
126+
title="[bold green]エージェント[/bold green]",
127+
border_style="green"
128+
)
129+
console.print(response_panel)
130+
131+
# Show tools used if any
132+
if result.get("tools_used"):
133+
console.print(f"[dim]使用ツール: {', '.join(result['tools_used'])}[/dim]")
134+
135+
console.print() # Add spacing
136+
137+
except KeyboardInterrupt:
138+
console.print("\n[yellow]対話を終了します[/yellow]")
139+
break
140+
except Exception as e:
141+
console.print(f"❌ [bold red]エラー[/bold red]: {str(e)}")
142+
143+
144+
@app.command()
145+
def tools():
146+
"""利用可能なツールの一覧を表示する"""
147+
console.print("[bold green]利用可能なツール一覧[/bold green]")
148+
149+
try:
150+
available_tools = get_tools()
151+
152+
if not available_tools:
153+
console.print("[yellow]利用可能なツールがありません[/yellow]")
154+
return
155+
156+
console.print(f"\n[bold blue]合計 {len(available_tools)} 個のツールが利用可能です[/bold blue]\n")
157+
158+
for i, tool in enumerate(available_tools, 1):
159+
tool_info = f"""
160+
**名前:** {tool.name}
161+
**説明:** {tool.description}
162+
"""
163+
if hasattr(tool, 'args_schema') and tool.args_schema:
164+
try:
165+
schema = tool.args_schema.model_json_schema()
166+
if 'properties' in schema:
167+
tool_info += f"**パラメータ:** {', '.join(schema['properties'].keys())}"
168+
except Exception:
169+
pass
170+
171+
panel = Panel(
172+
Markdown(tool_info),
173+
title=f"[bold cyan]ツール {i}[/bold cyan]",
174+
border_style="cyan"
175+
)
176+
console.print(panel)
177+
178+
except Exception as e:
179+
console.print(f"❌ [bold red]エラー[/bold red]: {str(e)}")
180+
181+
182+
@app.command()
183+
def demo():
184+
"""デモンストレーション用のサンプルチャット"""
185+
console.print("[bold green]LangGraphエージェント デモモード[/bold green]")
186+
console.print("いくつかのサンプル質問でエージェントをテストします\n")
187+
188+
sample_queries = [
189+
"こんにちは!今何時ですか?",
190+
"2 + 2 × 3 を計算してください",
191+
"Pythonについて検索してください",
192+
]
193+
194+
agent = LangGraphAgent()
195+
196+
for i, query in enumerate(sample_queries, 1):
197+
console.print(f"[bold yellow]サンプル質問 {i}:[/bold yellow] {query}")
198+
199+
try:
200+
with console.status(f"[bold green]質問 {i} を処理中...", spinner="dots"):
201+
result = agent.chat(message=query)
202+
203+
response_panel = Panel(
204+
Markdown(result["response"]) if result["response"] else "応答がありません",
205+
title="[bold green]エージェントの応答[/bold green]",
206+
border_style="green"
207+
)
208+
console.print(response_panel)
209+
210+
if result.get("tools_used"):
211+
console.print(f"[dim]使用ツール: {', '.join(result['tools_used'])}[/dim]")
212+
213+
console.print() # Add spacing
214+
215+
except Exception as e:
216+
console.print(f"❌ [bold red]エラー[/bold red]: {str(e)}")
217+
console.print()
218+
219+
220+
if __name__ == "__main__":
221+
app()

template_fastapi/core/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
"""Core module for template_fastapi."""
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
"""LangGraph components for agent implementation."""
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
"""Main LangGraph agent interface."""
2+
3+
import uuid
4+
from datetime import datetime
5+
from typing import Any
6+
7+
from langchain_core.messages import HumanMessage
8+
9+
from .graph import get_compiled_graph
10+
from .state import AgentState
11+
12+
13+
class LangGraphAgent:
14+
"""Main interface for LangGraph agent operations."""
15+
16+
def __init__(self):
17+
self.graph = get_compiled_graph()
18+
19+
def chat(self, message: str, thread_id: str | None = None) -> dict[str, Any]:
20+
"""
21+
Chat with the LangGraph agent.
22+
23+
Args:
24+
message: User message
25+
thread_id: Optional thread ID for conversation continuity
26+
27+
Returns:
28+
Chat response with metadata
29+
"""
30+
# Generate thread ID if not provided
31+
if thread_id is None:
32+
thread_id = str(uuid.uuid4())
33+
34+
# Create initial state
35+
initial_state = AgentState(
36+
messages=[HumanMessage(content=message)],
37+
thread_id=thread_id,
38+
tools_used=[],
39+
step_count=0,
40+
)
41+
42+
# Run the graph
43+
config = {"configurable": {"thread_id": thread_id}}
44+
result = self.graph.invoke(initial_state, config=config)
45+
46+
# Extract the final response
47+
final_message = result["messages"][-1]
48+
response_content = final_message.content if hasattr(final_message, 'content') else str(final_message)
49+
50+
return {
51+
"message": message,
52+
"response": response_content,
53+
"thread_id": thread_id,
54+
"tools_used": result.get("tools_used", []),
55+
"created_at": datetime.now().isoformat(),
56+
"step_count": result.get("step_count", 0),
57+
}
58+
59+
def stream_chat(self, message: str, thread_id: str | None = None):
60+
"""
61+
Stream chat with the LangGraph agent.
62+
63+
Args:
64+
message: User message
65+
thread_id: Optional thread ID for conversation continuity
66+
67+
Yields:
68+
Streaming responses from the agent
69+
"""
70+
# Generate thread ID if not provided
71+
if thread_id is None:
72+
thread_id = str(uuid.uuid4())
73+
74+
# Create initial state
75+
initial_state = AgentState(
76+
messages=[HumanMessage(content=message)],
77+
thread_id=thread_id,
78+
tools_used=[],
79+
step_count=0,
80+
)
81+
82+
# Stream the graph execution
83+
config = {"configurable": {"thread_id": thread_id}}
84+
for chunk in self.graph.stream(initial_state, config=config):
85+
yield chunk

0 commit comments

Comments
 (0)