Skip to content

Commit ee451f3

Browse files
authored
0.1.2 (#4)
* Separate endpoints for vector and graph-only options * Vector chain updated to create a vector index if none is already present in the database * Dependencies updated * LLM var moved from chains to a config file * Changelog added * README updated with notes on data requirements for vector support
1 parent 31f11db commit ee451f3

File tree

9 files changed

+666
-463
lines changed

9 files changed

+666
-463
lines changed

CHANGELOG.md

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
# Changelog
2+
3+
All notable changes to this project will be documented in this file.
4+
5+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6+
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7+
8+
## [0.1.2] - 2024-07-27
9+
10+
### Added
11+
12+
- Separate endpoints for vector and graph-only options
13+
14+
### Changed
15+
16+
- Vector chain updated to create a vector index if none is already present in the database
17+
- Mode option in POST payload, now only requires the 'message' key-value
18+
- Dependencies updated
19+
20+
## [0.1.1] - 2024-06-05
21+
22+
### Added
23+
24+
- CORS middleware
25+
- Neo4j exception middleware
26+
27+
### Changed
28+
29+
- Replaced deprecated LLMChain implementation
30+
- Vector chain simplified to use RetrievalQA chain
31+
- Dependencies updated
32+
33+
## [0.1.0] - 2024-04-05
34+
35+
### Added
36+
37+
- Initial release.
38+
- Core functionality implemented, including:
39+
- FastAPI wrapper
40+
- Vector chain example
41+
- Graph chain example
42+
- Simple Agent example that aggregates results of the Vector and Graph retrievers

README.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,14 @@ This kit provides a simple [FastAPI](https://fastapi.tiangolo.com/) backend serv
1111
- An [OpenAI API Key](https://openai.com/blog/openai-api)
1212
- A running [local](https://neo4j.com/download/) or [cloud](https://neo4j.com/cloud/platform/aura-graph-database/) Neo4j database
1313

14+
## Presumptions
15+
16+
For the vector portion of this kit to work, it presumes the following about the source data:
17+
18+
- There are Nodes labeled 'Chunk' already within the database. This target label type can be changed within app/vector_chain.py file - line 49
19+
- Node records contain a 'text' property with the unstructured data of interest. This can be changed within the app/vector_chain.py file - line 52
20+
- Node records contain a 'sources' property. This is used by LangChain's [RetrievalQAWithSourcesChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_with_sources.retrieval.RetrievalQAWithSourcesChain.html)
21+
1422
## Usage
1523

1624
Add a .env file to the root folder with the following keys and your own credentials (or these included public access only creds):
@@ -23,7 +31,7 @@ NEO4J_PASSWORD=read_only
2331
OPENAI_API_KEY=<your_openai_key_here>
2432
```
2533

26-
Then run: `poetry run uvicorn app.server:app --reload --port=8000 `
34+
Then run: `poetry run uvicorn app.server:app --reload --port=8000`
2735

2836
Or add env variables at runtime:
2937

app/config.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import os
2+
3+
# Neo4j Credentials
4+
NEO4J_URI = os.getenv("NEO4J_URI")
5+
NEO4J_DATABASE = os.getenv("NEO4J_DATABASE")
6+
NEO4J_USERNAME = os.getenv("NEO4J_USERNAME")
7+
NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD")
8+
9+
# ==================
10+
# Change models here
11+
# ==================
12+
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
13+
14+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
15+
LLM = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
16+
EMBEDDINGS = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
17+
# ==================

app/graph_chain.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from langchain_community.graphs import Neo4jGraph
33
from langchain.prompts.prompt import PromptTemplate
44
from langchain.schema.runnable import Runnable
5-
from langchain_openai import ChatOpenAI
5+
from app.config import LLM, NEO4J_DATABASE, NEO4J_PASSWORD, NEO4J_URI, NEO4J_USERNAME
66
import os
77

88
CYPHER_GENERATION_TEMPLATE = """Task:Generate Cypher statement to query a graph database.
@@ -44,14 +44,6 @@
4444

4545
def graph_chain() -> Runnable:
4646

47-
NEO4J_URI = os.getenv("NEO4J_URI")
48-
NEO4J_DATABASE = os.getenv("NEO4J_DATABASE")
49-
NEO4J_USERNAME = os.getenv("NEO4J_USERNAME")
50-
NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD")
51-
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
52-
53-
LLM = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
54-
5547
graph = Neo4jGraph(
5648
url=NEO4J_URI,
5749
username=NEO4J_USERNAME,
@@ -60,8 +52,6 @@ def graph_chain() -> Runnable:
6052
sanitize=True,
6153
)
6254

63-
graph.refresh_schema()
64-
6555
# Official API doc for GraphCypherQAChain at: https://api.python.langchain.com/en/latest/chains/langchain.chains.graph_qa.base.GraphQAChain.html#
6656
graph_chain = GraphCypherQAChain.from_llm(
6757
cypher_llm=LLM,
@@ -70,7 +60,7 @@ def graph_chain() -> Runnable:
7060
graph=graph,
7161
verbose=True,
7262
return_intermediate_steps=True,
73-
# return_direct = True,
63+
return_direct=True,
7464
)
7565

7666
return graph_chain

app/server.py

Lines changed: 69 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -1,113 +1,98 @@
11
from __future__ import annotations
2+
from typing import Union
23
from app.graph_chain import graph_chain, CYPHER_GENERATION_PROMPT
34
from app.vector_chain import vector_chain, VECTOR_PROMPT
45
from app.simple_agent import simple_agent_chain
5-
from fastapi import FastAPI, Request, Response
6-
from fastapi.middleware.cors import CORSMiddleware
7-
from starlette.middleware.base import BaseHTTPMiddleware
6+
from fastapi import FastAPI
7+
from typing import Union, Optional
88
from pydantic import BaseModel, Field
9-
from neo4j import exceptions
10-
import logging
119

1210

1311
class ApiChatPostRequest(BaseModel):
1412
message: str = Field(..., description="The chat message to send")
15-
mode: str = Field(
16-
"agent",
17-
description='The mode of the chat message. Current options are: "vector", "graph", "agent". Default is "agent"',
18-
)
1913

2014

2115
class ApiChatPostResponse(BaseModel):
22-
response: str
23-
24-
25-
class Neo4jExceptionMiddleware(BaseHTTPMiddleware):
26-
async def dispatch(self, request: Request, call_next):
27-
try:
28-
response = await call_next(request)
29-
return response
30-
except exceptions.AuthError as e:
31-
msg = f"Neo4j Authentication Error: {e}"
32-
logging.warning(msg)
33-
return Response(content=msg, status_code=400, media_type="text/plain")
34-
except exceptions.ServiceUnavailable as e:
35-
msg = f"Neo4j Database Unavailable Error: {e}"
36-
logging.warning(msg)
37-
return Response(content=msg, status_code=400, media_type="text/plain")
38-
except Exception as e:
39-
msg = f"Neo4j Uncaught Exception: {e}"
40-
logging.error(msg)
41-
return Response(content=msg, status_code=400, media_type="text/plain")
42-
43-
44-
# Allowed CORS origins
45-
origins = [
46-
"http://127.0.0.1:8000", # Alternative localhost address
47-
"http://localhost:8000",
48-
]
16+
message: Optional[str] = Field(None, description="The chat message response")
17+
4918

5019
app = FastAPI()
5120

52-
# Add CORS middleware to allow cross-origin requests
53-
app.add_middleware(
54-
CORSMiddleware,
55-
allow_origins=origins,
56-
allow_credentials=True,
57-
allow_methods=["*"],
58-
allow_headers=["*"],
21+
22+
@app.post(
23+
"/api/chat",
24+
response_model=None,
25+
responses={"201": {"model": ApiChatPostResponse}},
26+
tags=["chat"],
27+
description="Endpoint utilizing a simple agent to composite responses from the Vector and Graph chains interfacing with a Neo4j instance.",
5928
)
60-
# Add Neo4j exception handling middleware
61-
app.add_middleware(Neo4jExceptionMiddleware)
29+
def send_chat_message(body: ApiChatPostRequest) -> Union[None, ApiChatPostResponse]:
30+
"""
31+
Send a chat message
32+
"""
33+
34+
question = body.message
35+
36+
v_response = vector_chain().invoke(
37+
{"question": question}, prompt=VECTOR_PROMPT, return_only_outputs=True
38+
)
39+
g_response = graph_chain().invoke(
40+
{"query": question}, prompt=CYPHER_GENERATION_PROMPT, return_only_outputs=True
41+
)
42+
43+
# Return an answer from a chain that composites both the Vector and Graph responses
44+
response = simple_agent_chain().invoke(
45+
{
46+
"question": question,
47+
"vector_result": v_response,
48+
"graph_result": g_response,
49+
}
50+
)
51+
52+
return f"{response}", 200
6253

6354

6455
@app.post(
65-
"/api/chat",
56+
"/api/chat/vector",
57+
response_model=None,
58+
responses={"201": {"model": ApiChatPostResponse}},
59+
tags=["chat"],
60+
description="Endpoint for utilizing only vector index for querying Neo4j instance.",
61+
)
62+
def send_chat_vector_message(
63+
body: ApiChatPostRequest,
64+
) -> Union[None, ApiChatPostResponse]:
65+
"""
66+
Send a chat message
67+
"""
68+
69+
question = body.message
70+
71+
response = vector_chain().invoke(
72+
{"question": question}, prompt=VECTOR_PROMPT, return_only_outputs=True
73+
)
74+
75+
return f"{response}", 200
76+
77+
78+
@app.post(
79+
"/api/chat/graph",
6680
response_model=None,
6781
responses={"201": {"model": ApiChatPostResponse}},
6882
tags=["chat"],
83+
description="Endpoint using only Text2Cypher for querying with Neo4j instance.",
6984
)
70-
async def send_chat_message(body: ApiChatPostRequest):
85+
def send_chat_graph_message(
86+
body: ApiChatPostRequest,
87+
) -> Union[None, ApiChatPostResponse]:
7188
"""
7289
Send a chat message
7390
"""
7491

7592
question = body.message
7693

77-
# Simple exception check. See https://neo4j.com/docs/api/python-driver/current/api.html#errors for full set of driver exceptions
78-
79-
if body.mode == "vector":
80-
# Return only the Vector answer
81-
v_response = vector_chain().invoke(
82-
{"query": question}, prompt=VECTOR_PROMPT, return_only_outputs=True
83-
)
84-
response = v_response
85-
elif body.mode == "graph":
86-
# Return only the Graph (text2Cypher) answer
87-
g_response = graph_chain().invoke(
88-
{"query": question},
89-
prompt=CYPHER_GENERATION_PROMPT,
90-
return_only_outputs=True,
91-
)
92-
response = g_response["result"]
93-
else:
94-
# Return both vector + graph answers
95-
v_response = vector_chain().invoke(
96-
{"query": question}, prompt=VECTOR_PROMPT, return_only_outputs=True
97-
)
98-
g_response = graph_chain().invoke(
99-
{"query": question},
100-
prompt=CYPHER_GENERATION_PROMPT,
101-
return_only_outputs=True,
102-
)["result"]
103-
104-
# Synthesize a composite of both the Vector and Graph responses
105-
response = simple_agent_chain().invoke(
106-
{
107-
"question": question,
108-
"vector_result": v_response,
109-
"graph_result": g_response,
110-
}
111-
)
112-
113-
return response, 200
94+
response = graph_chain().invoke(
95+
{"query": question}, prompt=CYPHER_GENERATION_PROMPT, return_only_outputs=True
96+
)
97+
98+
return f"{response}", 200

app/simple_agent.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
from langchain.prompts import PromptTemplate
33
from langchain.schema.runnable import Runnable
44
from langchain_openai import ChatOpenAI
5-
from langchain.chains import ConversationChain
65
from langchain_core.prompts import PromptTemplate
76
import os
87

0 commit comments

Comments
 (0)