diff --git a/docs/index.md b/docs/index.md index b1e86bf..f60b1d4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,35 +6,34 @@ # Start Docker containers docker compose up -d -# Delete collection from Qdrant -uv run python -m template_langgraph.tasks.delete_qdrant_collection - -# Add documents to Qdrant -uv run python -m template_langgraph.tasks.add_documents_to_qdrant - -# Search Qdrant -uv run python -m template_langgraph.tasks.search_documents_on_qdrant - -# Add documents to Elasticsearch -uv run python -m template_langgraph.tasks.add_documents_to_elasticsearch - -# Search Elasticsearch -uv run python -m template_langgraph.tasks.search_documents_on_elasticsearch - -# Run Kabuto Helpdesk Agent -uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。" -uv run python -m template_langgraph.tasks.run_kabuto_helpdesk_agent "KABUTOのマニュアルから禅モードに関する情報を教えて下さい" - -# ChatWithToolsAgent -uv run python -m template_langgraph.tasks.draw_chat_with_tools_agent_mermaid_png "data/chat_with_tools_agent.png" -uv run python -m template_langgraph.tasks.run_chat_with_tools_agent +# Qdrant +uv run python scripts/qdrant_operator.py --help +uv run python scripts/qdrant_operator.py delete-collection --collection-name qa_kabuto --verbose +uv run python scripts/qdrant_operator.py add-documents --collection-name qa_kabuto --verbose +uv run python scripts/qdrant_operator.py search-documents --collection-name qa_kabuto --question "「鬼灯」を実行すると、KABUTOが急に停止します。原因と対策を教えてください。" --verbose + +# Elasticsearch +uv run python scripts/elasticsearch_operator.py --help +uv run python scripts/elasticsearch_operator.py add-documents --index-name docs_kabuto --verbose +uv run python scripts/elasticsearch_operator.py search-documents --index-name docs_kabuto --query "禅モード" --verbose + +# Agents + +## Draw agent graph +AGENT_NAME=chat_with_tools_agent +uv run python scripts/graph_runner.py png --name $AGENT_NAME --verbose --output data/$AGENT_NAME.png + +## Run agents +AGENT_NAME=chat_with_tools_agent +AGENT_NAME=kabuto_helpdesk_agent +uv run python scripts/graph_runner.py run --name $AGENT_NAME --verbose --question "KABUTOのマニュアルから禅モードに関する情報を教えて下さい" # KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。KABUTO のマニュアルから、関連する情報を取得したり過去のシステムのトラブルシュート事例が蓄積されたデータベースから、関連する情報を取得して質問に答えてください # 天狗のいたずら という現象について KABUTO のマニュアルから、関連する情報を取得したり過去のシステムのトラブルシュート事例が蓄積されたデータベースから、関連する情報を取得して質問に答えてください +# KABUTOの起動時に、画面全体が紫色に点滅し、システムがフリーズします。 +# KABUTOのマニュアルから禅モードに関する情報を教えて下さい -# IssueFormatterAgent -uv run python -m template_langgraph.tasks.run_issue_formatter_agent -# KABUTOにログインできない!パスワードは合ってるはずなのに…若手社員である山田太郎は、Windows 11 を立ち上げ、日課のように自社の業務システムKABUTOのログイン画面を開きます。しかし、そこには、意味をなさない「虚無」という文字だけがただひっそりと表示されていたのです。これは質問でもあり不具合の報告でもあります。岡本太郎さんに本件調査依頼します。 - +AGENT_NAME=issue_formatter_agent +uv run python scripts/graph_runner.py run --name $AGENT_NAME --verbose --question "KABUTOにログインできない!パスワードは合ってるはずなのに…若手社員である山田太郎は、Windows 11 を立ち上げ、日課のように自社の業務システムKABUTOのログイン画面を開きます。しかし、そこには、意味をなさない「虚無」という文字だけがただひっそりと表示されていたのです。これは質問でもあり不具合の報告でもあります。岡本太郎さんに本件調査依頼します。" ``` ## References diff --git a/scripts/elasticsearch_operator.py b/scripts/elasticsearch_operator.py new file mode 100644 index 0000000..2d6a6b5 --- /dev/null +++ b/scripts/elasticsearch_operator.py @@ -0,0 +1,109 @@ +import logging + +import typer +from dotenv import load_dotenv + +from template_langgraph.loggers import get_logger +from template_langgraph.tools.elasticsearch_tool import ElasticsearchClientWrapper +from template_langgraph.tools.pdf_loaders import PdfLoaderWrapper + +# Initialize the Typer application +app = typer.Typer( + add_completion=False, + help="Elasticsearch operator CLI", +) + +# Set up logging +logger = get_logger(__name__) + + +@app.command() +def search_documents( + index_name: str = typer.Option( + "docs_kabuto", + "--index-name", + "-i", + help="Name of the Elasticsearch index to search documents in", + ), + query: str = typer.Option( + "禅モード", + "--query", + "-q", + help="Query to search in the Elasticsearch index", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + es = ElasticsearchClientWrapper() + + results = es.search( + index_name=index_name, + query=query, + ) + logger.info(f"Found {len(results)} results for the question: {query}") + for i, result in enumerate(results, start=1): + logger.info(f"Result {i}:") + logger.info(f"File Name: {result.metadata['source']}") + logger.info(f"Content: {result.page_content}") + logger.info("-" * 40) + + +@app.command() +def add_documents( + index_name: str = typer.Option( + "docs_kabuto", + "--index-name", + "-i", + help="Name of the Elasticsearch index to add documents to", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + # Create Elasticsearch index + es = ElasticsearchClientWrapper() + logger.info(f"Creating Elasticsearch index: {index_name}") + result = es.create_index( + index_name=index_name, + ) + if result: + logger.info(f"Created Elasticsearch index: {index_name}") + else: + logger.warning(f"Index {index_name} already exists.") + + # Load documents from PDF files + documents = PdfLoaderWrapper().load_pdf_docs() + logger.info(f"Loaded {len(documents)} documents from PDF.") + + # Add documents to Elasticsearch index + result = es.add_documents( + index_name=index_name, + documents=documents, + ) + if result: + logger.info(f"Added {len(documents)} documents to Elasticsearch index: {index_name}") + else: + logger.error(f"Failed to add documents to Elasticsearch index: {index_name}") + + +if __name__ == "__main__": + load_dotenv( + override=True, + verbose=True, + ) + app() diff --git a/scripts/graph_runner.py b/scripts/graph_runner.py new file mode 100644 index 0000000..79bbaeb --- /dev/null +++ b/scripts/graph_runner.py @@ -0,0 +1,149 @@ +import logging + +import typer +from dotenv import load_dotenv + +from template_langgraph.agents.chat_with_tools_agent.agent import graph as chat_with_tools_agent_graph +from template_langgraph.agents.issue_formatter_agent.agent import graph as issue_formatter_agent_graph +from template_langgraph.agents.kabuto_helpdesk_agent import graph as kabuto_helpdesk_agent_graph +from template_langgraph.agents.task_decomposer_agent.agent import graph as task_decomposer_agent_graph +from template_langgraph.loggers import get_logger + +# Initialize the Typer application +app = typer.Typer( + add_completion=False, + help="template-langgraph CLI", +) + +# Set up logging +logger = get_logger(__name__) + + +def get_agent_graph(name: str): + if name == "chat_with_tools_agent": + return chat_with_tools_agent_graph + elif name == "issue_formatter_agent": + return issue_formatter_agent_graph + elif name == "task_decomposer_agent": + return task_decomposer_agent_graph + elif name == "kabuto_helpdesk_agent": + return kabuto_helpdesk_agent_graph + else: + raise ValueError(f"Unknown agent name: {name}") + + +@app.command() +def png( + name: str = typer.Option( + "chat_with_tools_agent", + "--name", + "-n", + help="Name of the agent to draw", + ), + output_file_path: str = typer.Option( + "output.png", + "--output", + "-o", + help="Path to the output PNG file", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + logger.debug(f"This is a debug message with name: {name}") + typer.echo(f"Drawing agent: {name}") + get_agent_graph(name).get_graph().draw_mermaid_png( + output_file_path=output_file_path, + ) + typer.echo(f"Graph saved to {output_file_path}") + + +@app.command() +def run( + name: str = typer.Option( + "chat_with_tools_agent", + "--name", + "-n", + help="Name of the agent to draw", + ), + question: str = typer.Option( + "「鬼灯」を実行すると、KABUTOが急に停止します。原因と対策を教えてください。", + "--question", + "-q", + help="Question to ask the agent", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + if name == "chat_with_tools_agent": + from template_langgraph.agents.chat_with_tools_agent.agent import ( + AgentState, + ) + + for event in chat_with_tools_agent_graph.stream( + input=AgentState( + messages=[ + { + "role": "user", + "content": question, + } + ], + ) + ): + logger.info("-" * 20) + logger.info(f"Event: {event}") + + if name == "issue_formatter_agent": + from template_langgraph.agents.issue_formatter_agent.agent import ( + AgentState, + ) + + for event in issue_formatter_agent_graph.stream( + input=AgentState( + messages=[ + { + "role": "user", + "content": question, + } + ], + ) + ): + logger.info("-" * 20) + logger.info(f"Event: {event}") + + if name == "kabuto_helpdesk_agent": + from template_langgraph.agents.kabuto_helpdesk_agent import KabutoHelpdeskAgent + + agent = KabutoHelpdeskAgent( + tools=None, # ツールはカスタムせず、デフォルトのツールを使用 + ) + response = agent.run( + question=question, + ) + logger.info(f"Agent result: {response}") + + # エージェントの応答を表示 + logger.info(f"Answer: {response['messages'][-1].content}") + + +if __name__ == "__main__": + load_dotenv( + override=True, + verbose=True, + ) + app() diff --git a/scripts/qdrant_operator.py b/scripts/qdrant_operator.py new file mode 100644 index 0000000..d819b4e --- /dev/null +++ b/scripts/qdrant_operator.py @@ -0,0 +1,154 @@ +import logging + +import typer +from dotenv import load_dotenv +from qdrant_client.models import PointStruct + +from template_langgraph.llms.azure_openais import AzureOpenAiWrapper +from template_langgraph.loggers import get_logger +from template_langgraph.tools.csv_loaders import CsvLoaderWrapper +from template_langgraph.tools.qdrants import QdrantClientWrapper + +# Initialize the Typer application +app = typer.Typer( + add_completion=False, + help="Qdrant operator CLI", +) + +# Set up logging +logger = get_logger(__name__) + + +@app.command() +def delete_collection( + collection_name: str = typer.Option( + "qa_kabuto", + "--collection-name", + "-c", + help="Name of the Qdrant collection to delete", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + logger.info(f"Deleting Qdrant collection: {collection_name}") + result = QdrantClientWrapper().delete_collection( + collection_name=collection_name, + ) + if result: + logger.info(f"Successfully deleted Qdrant collection: {collection_name}") + else: + logger.warning(f"Qdrant collection {collection_name} does not exist or could not be deleted.") + logger.info("Deletion task completed.") + + +@app.command() +def add_documents( + collection_name: str = typer.Option( + "qa_kabuto", + "--collection-name", + "-c", + help="Name of the Qdrant collection to add documents to", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + # Load documents from CSV files + documents = CsvLoaderWrapper().load_csv_docs() + logger.info(f"Loaded {len(documents)} documents from CSV.") + + points = [] + embedding_wrapper = AzureOpenAiWrapper() + for i, doc in enumerate(documents): + logger.debug(f"Processing document {i}: {doc.metadata.get('source', 'unknown')}") + content = doc.page_content.replace("\n", " ") + logger.debug(f"Creating embedding for document {i} with content: {content[:50]}...") + vector = embedding_wrapper.create_embedding(content) + points.append( + PointStruct( + id=i, + vector=vector, + payload={ + "file_name": doc.metadata.get("source", f"doc_{i}"), + "content": content, + }, + ) + ) + + # Create Qdrant collection and upsert points + logger.info(f"Creating Qdrant collection: {collection_name}") + qdrant_client = QdrantClientWrapper() + qdrant_client.create_collection( + collection_name=collection_name, + vector_size=len(points[0].vector) if points else 1536, # default vector size + ) + + # Upsert points into the Qdrant collection + logger.info(f"Upserting points into Qdrant collection: {collection_name}") + operation_info = qdrant_client.upsert_points( + collection_name=collection_name, + points=points, + ) + logger.info(f"Upserted {len(points)} points into Qdrant collection: {collection_name}") + logger.info(f"Operation info: {operation_info}") + + +@app.command() +def search_documents( + collection_name: str = typer.Option( + "qa_kabuto", + "--collection-name", + "-c", + help="Name of the Qdrant collection to search documents in", + ), + question: str = typer.Option( + "「鬼灯」を実行すると、KABUTOが急に停止します。原因と対策を教えてください。", + "--question", + "-q", + help="Question to search in the Qdrant collection", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + qdrant_client = QdrantClientWrapper() + + results = qdrant_client.query_points( + collection_name=collection_name, + query=AzureOpenAiWrapper().create_embedding(question), + ) + logger.info(f"Found {len(results)} results for the question: {question}") + for result in results: + logger.info(f"File Name: {result.payload['file_name']}") + logger.info(f"Content: {result.payload['content']}") + logger.info("-" * 40) + + +if __name__ == "__main__": + load_dotenv( + override=True, + verbose=True, + ) + app() diff --git a/template_langgraph/agents/chat_with_tools_agent/agent.py b/template_langgraph/agents/chat_with_tools_agent/agent.py index cc60123..18a975e 100644 --- a/template_langgraph/agents/chat_with_tools_agent/agent.py +++ b/template_langgraph/agents/chat_with_tools_agent/agent.py @@ -46,7 +46,6 @@ def create_graph(self): workflow = StateGraph(AgentState) # Create nodes - workflow.add_node("initialize", self.initialize) workflow.add_node("chat_with_tools", self.chat_with_tools) workflow.add_node( "tools", @@ -57,35 +56,24 @@ def create_graph(self): ] ), ) - workflow.add_node("finalize", self.finalize) # Create edges - workflow.set_entry_point("initialize") - workflow.add_edge("initialize", "chat_with_tools") + workflow.set_entry_point("chat_with_tools") workflow.add_conditional_edges( - "chat_with_tools", - self.route_tools, - # The following dictionary lets you tell the graph to interpret the condition's outputs as a specific node - # It defaults to the identity function, but if you - # want to use a node named something else apart from "tools", - # You can update the value of the dictionary to something else - # e.g., "tools": "my_tools" - {"tools": "tools", END: "finalize"}, + source="chat_with_tools", + path=self.route_tools, + path_map={ + "tools": "tools", + END: END, + }, ) workflow.add_edge("tools", "chat_with_tools") - workflow.add_edge("finalize", END) # Compile the graph return workflow.compile( name=ChatWithToolsAgent.__name__, ) - def initialize(self, state: AgentState) -> AgentState: - """Initialize the agent with the given state.""" - logger.info(f"Initializing ChatWithToolsAgent with state: {state}") - # Here you can add any initialization logic if needed - return state - def chat_with_tools(self, state: AgentState) -> AgentState: """Chat with tools using the state.""" logger.info(f"Chatting with tools using state: {state}") @@ -119,15 +107,5 @@ def route_tools( return "tools" return END - def finalize(self, state: AgentState) -> AgentState: - """Finalize the agent's work and prepare the output.""" - logger.info(f"Finalizing ChatWithToolsAgent with state: {state}") - # Here you can add any finalization logic if needed - return state - - def draw_mermaid_png(self) -> bytes: - """Draw the graph in Mermaid format.""" - return self.create_graph().get_graph().draw_mermaid_png() - graph = ChatWithToolsAgent().create_graph() diff --git a/template_langgraph/agents/chat_with_tools_agent/models.py b/template_langgraph/agents/chat_with_tools_agent/models.py index 86d2a3c..dfac77a 100644 --- a/template_langgraph/agents/chat_with_tools_agent/models.py +++ b/template_langgraph/agents/chat_with_tools_agent/models.py @@ -8,15 +8,6 @@ BaseMessage, ) from langgraph.graph.message import add_messages -from pydantic import BaseModel, Field - - -class AgentInput(BaseModel): - request: str = Field(..., description="ユーザーからのリクエスト") - - -class AgentOutput(BaseModel): - response: str = Field(..., description="エージェントの応答") class AgentState(TypedDict): diff --git a/template_langgraph/agents/issue_formatter_agent/agent.py b/template_langgraph/agents/issue_formatter_agent/agent.py index 76d3758..fdfe0dc 100644 --- a/template_langgraph/agents/issue_formatter_agent/agent.py +++ b/template_langgraph/agents/issue_formatter_agent/agent.py @@ -1,4 +1,4 @@ -from langgraph.graph import END, StateGraph +from langgraph.graph import StateGraph from template_langgraph.agents.issue_formatter_agent.models import AgentState, Issue from template_langgraph.llms.azure_openais import AzureOpenAiWrapper @@ -21,7 +21,7 @@ def create_graph(self): # Create edges workflow.set_entry_point("analyze") - workflow.add_edge("analyze", END) + workflow.set_finish_point("analyze") # Compile the graph return workflow.compile( @@ -37,9 +37,5 @@ def analyze(self, state: AgentState) -> AgentState: state["issue"] = issue return state - def draw_mermaid_png(self) -> bytes: - """Draw the graph in Mermaid format.""" - return self.create_graph().get_graph().draw_mermaid_png() - graph = IssueFormatterAgent().create_graph() diff --git a/template_langgraph/agents/task_decomposer_agent/agent.py b/template_langgraph/agents/task_decomposer_agent/agent.py index afcb960..6b0e904 100644 --- a/template_langgraph/agents/task_decomposer_agent/agent.py +++ b/template_langgraph/agents/task_decomposer_agent/agent.py @@ -74,9 +74,5 @@ def route_human_feedback( logger.info("Looping back to chat for further processing.") return "loopback" - def draw_mermaid_png(self) -> bytes: - """Draw the graph in Mermaid format.""" - return self.create_graph().get_graph().draw_mermaid_png() - graph = TaskDecomposerAgent().create_graph() diff --git a/template_langgraph/tasks/__init__.py b/template_langgraph/tasks/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/template_langgraph/tasks/add_documents_to_elasticsearch.py b/template_langgraph/tasks/add_documents_to_elasticsearch.py deleted file mode 100644 index 6c762f1..0000000 --- a/template_langgraph/tasks/add_documents_to_elasticsearch.py +++ /dev/null @@ -1,35 +0,0 @@ -import logging - -from template_langgraph.loggers import get_logger -from template_langgraph.tools.elasticsearch_tool import ElasticsearchClientWrapper -from template_langgraph.tools.pdf_loaders import PdfLoaderWrapper - -logger = get_logger(__name__) -logger.setLevel(logging.DEBUG) -COLLECTION_NAME = "docs_kabuto" - -if __name__ == "__main__": - # Create Elasticsearch index - es = ElasticsearchClientWrapper() - logger.info(f"Creating Elasticsearch index: {COLLECTION_NAME}") - result = es.create_index( - index_name=COLLECTION_NAME, - ) - if result: - logger.info(f"Created Elasticsearch index: {COLLECTION_NAME}") - else: - logger.warning(f"Index {COLLECTION_NAME} already exists.") - - # Load documents from PDF files - documents = PdfLoaderWrapper().load_pdf_docs() - logger.info(f"Loaded {len(documents)} documents from PDF.") - - # Add documents to Elasticsearch index - result = es.add_documents( - index_name=COLLECTION_NAME, - documents=documents, - ) - if result: - logger.info(f"Added {len(documents)} documents to Elasticsearch index: {COLLECTION_NAME}") - else: - logger.error(f"Failed to add documents to Elasticsearch index: {COLLECTION_NAME}") diff --git a/template_langgraph/tasks/add_documents_to_qdrant.py b/template_langgraph/tasks/add_documents_to_qdrant.py deleted file mode 100644 index 6b98285..0000000 --- a/template_langgraph/tasks/add_documents_to_qdrant.py +++ /dev/null @@ -1,51 +0,0 @@ -import logging - -from qdrant_client.models import PointStruct - -from template_langgraph.llms.azure_openais import AzureOpenAiWrapper -from template_langgraph.loggers import get_logger -from template_langgraph.tools.csv_loaders import CsvLoaderWrapper -from template_langgraph.tools.qdrants import QdrantClientWrapper - -logger = get_logger(__name__) -logger.setLevel(logging.DEBUG) -COLLECTION_NAME = "qa_kabuto" - -if __name__ == "__main__": - # Load documents from CSV files - documents = CsvLoaderWrapper().load_csv_docs() - logger.info(f"Loaded {len(documents)} documents from CSV.") - - points = [] - embedding_wrapper = AzureOpenAiWrapper() - for i, doc in enumerate(documents): - logger.debug(f"Processing document {i}: {doc.metadata.get('source', 'unknown')}") - content = doc.page_content.replace("\n", " ") - logger.debug(f"Creating embedding for document {i} with content: {content[:50]}...") - vector = embedding_wrapper.create_embedding(content) - points.append( - PointStruct( - id=i, - vector=vector, - payload={ - "file_name": doc.metadata.get("source", f"doc_{i}"), - "content": content, - }, - ) - ) - - # Create Qdrant collection and upsert points - logger.info(f"Creating Qdrant collection: {COLLECTION_NAME}") - qdrant_client = QdrantClientWrapper() - qdrant_client.create_collection( - collection_name=COLLECTION_NAME, - vector_size=len(points[0].vector) if points else 1536, # default vector size - ) - - # Upsert points into the Qdrant collection - logger.info(f"Upserting points into Qdrant collection: {COLLECTION_NAME}") - operation_info = qdrant_client.upsert_points( - collection_name=COLLECTION_NAME, - points=points, - ) - logger.info(f"Upserted {len(points)} points into Qdrant collection: {COLLECTION_NAME}") diff --git a/template_langgraph/tasks/delete_qdrant_collection.py b/template_langgraph/tasks/delete_qdrant_collection.py deleted file mode 100644 index 4b91510..0000000 --- a/template_langgraph/tasks/delete_qdrant_collection.py +++ /dev/null @@ -1,19 +0,0 @@ -import logging - -from template_langgraph.loggers import get_logger -from template_langgraph.tools.qdrants import QdrantClientWrapper - -logger = get_logger(__name__) -logger.setLevel(logging.DEBUG) -COLLECTION_NAME = "qa_kabuto" - -if __name__ == "__main__": - logger.info(f"Deleting Qdrant collection: {COLLECTION_NAME}") - result = QdrantClientWrapper().delete_collection( - collection_name=COLLECTION_NAME, - ) - if result: - logger.info(f"Successfully deleted Qdrant collection: {COLLECTION_NAME}") - else: - logger.warning(f"Qdrant collection {COLLECTION_NAME} does not exist or could not be deleted.") - logger.info("Deletion task completed.") diff --git a/template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py b/template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py deleted file mode 100644 index 9590a7e..0000000 --- a/template_langgraph/tasks/draw_chat_with_tools_agent_mermaid_png.py +++ /dev/null @@ -1,11 +0,0 @@ -import sys - -from template_langgraph.agents.chat_with_tools_agent.agent import ChatWithToolsAgent - -if __name__ == "__main__": - png_path = "data/chat_with_tools_agent.png" - if len(sys.argv) > 1: - png_path = sys.argv[1] - - with open(png_path, "wb") as f: - f.write(ChatWithToolsAgent().draw_mermaid_png()) diff --git a/template_langgraph/tasks/run_chat_with_tools_agent.py b/template_langgraph/tasks/run_chat_with_tools_agent.py deleted file mode 100644 index 0c39c28..0000000 --- a/template_langgraph/tasks/run_chat_with_tools_agent.py +++ /dev/null @@ -1,33 +0,0 @@ -import logging - -from template_langgraph.agents.chat_with_tools_agent.agent import AgentState -from template_langgraph.agents.chat_with_tools_agent.agent import graph as chat_with_tools_agent_graph -from template_langgraph.loggers import get_logger - -logger = get_logger(__name__) -logger.setLevel(logging.INFO) - - -def stream_graph_updates( - state: AgentState, -) -> dict: - for event in chat_with_tools_agent_graph.stream(input=state): - logger.info("-" * 20) - logger.info(f"Event: {event}") - return event - - -if __name__ == "__main__": - user_input = input("User: ") - state = AgentState( - messages=[ - { - "role": "user", - "content": user_input, - } - ], - profile=None, - ) - last_event = stream_graph_updates(state) - for value in last_event.values(): - logger.info(f"Final state: {value['messages'][-1].content}") # noqa: E501 diff --git a/template_langgraph/tasks/run_issue_formatter_agent.py b/template_langgraph/tasks/run_issue_formatter_agent.py deleted file mode 100644 index 82f546e..0000000 --- a/template_langgraph/tasks/run_issue_formatter_agent.py +++ /dev/null @@ -1,32 +0,0 @@ -import logging - -from template_langgraph.agents.issue_formatter_agent.agent import AgentState, graph -from template_langgraph.loggers import get_logger - -logger = get_logger(__name__) -logger.setLevel(logging.INFO) - - -def stream_graph_updates( - state: AgentState, -) -> dict: - for event in graph.stream(input=state): - logger.info("-" * 20) - logger.info(f"Event: {event}") - return event - - -if __name__ == "__main__": - user_input = input("User: ") - state = AgentState( - messages=[ - { - "role": "user", - "content": user_input, - } - ], - profile=None, - ) - last_event = stream_graph_updates(state) - for value in last_event.values(): - logger.info(f"Formatted issue: {value['issue']}") diff --git a/template_langgraph/tasks/run_kabuto_helpdesk_agent.py b/template_langgraph/tasks/run_kabuto_helpdesk_agent.py deleted file mode 100644 index b9c7f92..0000000 --- a/template_langgraph/tasks/run_kabuto_helpdesk_agent.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging -import sys - -from template_langgraph.agents.kabuto_helpdesk_agent import KabutoHelpdeskAgent -from template_langgraph.loggers import get_logger - -logger = get_logger(__name__) -logger.setLevel(logging.INFO) - -if __name__ == "__main__": - question = "「鬼灯」を実行すると、KABUTOが急に停止します。原因と対策を教えてください。" - if len(sys.argv) > 1: - # sys.argv[1] が最初の引数 - question = sys.argv[1] - - logger.info(f"質問: {question}") - - agent = KabutoHelpdeskAgent( - tools=None, # ツールはカスタムせず、デフォルトのツールを使用 - ) - response = agent.run( - question=question, - ) - logger.info(f"Agent result: {response}") - - # エージェントの応答を表示 - logger.info(f"Answer: {response['messages'][-1].content}") diff --git a/template_langgraph/tasks/search_documents_on_elasticsearch.py b/template_langgraph/tasks/search_documents_on_elasticsearch.py deleted file mode 100644 index 392f166..0000000 --- a/template_langgraph/tasks/search_documents_on_elasticsearch.py +++ /dev/null @@ -1,23 +0,0 @@ -import logging - -from template_langgraph.loggers import get_logger -from template_langgraph.tools.elasticsearch_tool import ElasticsearchClientWrapper - -logger = get_logger(__name__) -logger.setLevel(logging.INFO) -COLLECTION_NAME = "docs_kabuto" - -if __name__ == "__main__": - query = "禅モード" - es = ElasticsearchClientWrapper() - - results = es.search( - index_name=COLLECTION_NAME, - query=query, - ) - logger.info(f"Found {len(results)} results for the question: {query}") - for i, result in enumerate(results, start=1): - logger.info(f"Result {i}:") - logger.info(f"File Name: {result.metadata['source']}") - logger.info(f"Content: {result.page_content}") - logger.info("-" * 40) diff --git a/template_langgraph/tasks/search_documents_on_qdrant.py b/template_langgraph/tasks/search_documents_on_qdrant.py deleted file mode 100644 index 3f190ee..0000000 --- a/template_langgraph/tasks/search_documents_on_qdrant.py +++ /dev/null @@ -1,23 +0,0 @@ -import logging - -from template_langgraph.llms.azure_openais import AzureOpenAiWrapper -from template_langgraph.loggers import get_logger -from template_langgraph.tools.qdrants import QdrantClientWrapper - -logger = get_logger(__name__) -logger.setLevel(logging.INFO) -COLLECTION_NAME = "qa_kabuto" - -if __name__ == "__main__": - question = "「鬼灯」を実行すると、KABUTOが急に停止します。原因と対策を教えてください。" - qdrant_client = QdrantClientWrapper() - - results = qdrant_client.query_points( - collection_name=COLLECTION_NAME, - query=AzureOpenAiWrapper().create_embedding(question), - ) - logger.info(f"Found {len(results)} results for the question: {question}") - for result in results: - logger.info(f"File Name: {result.payload['file_name']}") - logger.info(f"Content: {result.payload['content']}") - logger.info("-" * 40)