-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAi agent
More file actions
83 lines (65 loc) · 2.7 KB
/
Ai agent
File metadata and controls
83 lines (65 loc) · 2.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
import json
from langchain.llms import OpenAI
from langchain.tools import TavilySearchResults
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.schema import Document
from langgraph.graph import StateGraph, END
# Setting up API keys (replace with actual keys)
os.environ["TAVILY_API_KEY"] = "your_tavily_api_key"
os.environ["OPENAI_API_KEY"] = "your_openai_api_key"
# Initialize LLM (GPT-4)
llm = OpenAI(model_name="gpt-4")
chat_llm = ChatOpenAI(model_name="gpt-4")
# Web search tool using Tavily
tavily_tool = TavilySearchResults()
# Vector database for storing research data
embeddings = OpenAIEmbeddings()
vector_db = FAISS.from_documents([], embeddings)
# Splitting text into chunks for better processing
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
# Research Agent: Searches the web and stores data
def research_agent(state):
query = state["query"]
print(f"[Research Agent] Searching for: {query}")
search_results = tavily_tool.run(query)
docs = [Document(page_content=res) for res in search_results]
chunks = text_splitter.split_documents(docs)
vector_db.add_documents(chunks)
return {"research_data": chunks}
# Answer Drafting Agent: Fetches relevant data and drafts an answer
def answer_drafting_agent(state):
query = state["query"]
print(f"[Answer Drafting Agent] Retrieving relevant data for: {query}")
docs = vector_db.similarity_search(query, k=5)
content = "\n".join([doc.page_content for doc in docs])
prompt = f"""Using the information below, answer the query:
{content}
Query: {query}
Answer: """
response = chat_llm.predict(prompt)
return {"answer": response}
# Quality Control Agent: Refines the final answer (optional)
def quality_control_agent(state):
answer = state["answer"]
prompt = f"Refine this response for clarity:\n\n{answer}"
refined_answer = chat_llm.predict(prompt)
return {"answer": refined_answer}
# Defining workflow using LangGraph
graph = StateGraph()
graph.add_node("research", research_agent)
graph.add_node("draft_answer", answer_drafting_agent)
graph.add_node("quality_control", quality_control_agent)
graph.set_entry_point("research")
graph.add_edge("research", "draft_answer")
graph.add_edge("draft_answer", "quality_control")
graph.add_edge("quality_control", END)
workflow = graph.compile()
# Running the system with a test query
query = "Latest advancements in quantum computing"
result = workflow.invoke({"query": query})
print("\nFinal Answer:")
print(result["answer"])