Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion PathRAG/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

@dataclass
class QueryParam:
mode: Literal["hybrid"] = "global"
mode: Literal["hybrid"] = "hybrid"
only_need_context: bool = False
only_need_prompt: bool = False
response_type: str = "Multiple Paragraphs"
Expand Down
6 changes: 5 additions & 1 deletion PathRAG/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@
os.environ["TOKENIZERS_PARALLELISM"] = "false"


class BedrockError(Exception):
"""Exception raised for AWS Bedrock API errors."""
pass


@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
Expand All @@ -62,7 +67,6 @@ async def openai_complete_if_cache(
) -> str:
if api_key:
os.environ["OPENAI_API_KEY"] = api_key
time.sleep(2)
openai_async_client = (
AsyncOpenAI() if base_url is None else AsyncOpenAI(base_url=base_url)
)
Expand Down
13 changes: 6 additions & 7 deletions PathRAG/operate.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,6 @@ async def extract_entities(
relationships_vdb: BaseVectorStorage,
global_config: dict,
) -> Union[BaseGraphStorage, None]:
time.sleep(20)
use_llm_func: callable = global_config["llm_model_func"]
entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]

Expand Down Expand Up @@ -1179,13 +1178,13 @@ async def _find_most_related_edges_from_entities3(
edge1 = await knowledge_graph_inst.get_edge(path[1],path[2]) or await knowledge_graph_inst.get_edge(path[2], path[1])
edge2 = await knowledge_graph_inst.get_edge(path[2],path[3]) or await knowledge_graph_inst.get_edge(path[3], path[2])
if edge0==None or edge1==None or edge2==None:
print(path,"边丢失")
print(path, "edge missing")
if edge0==None:
print("edge0丢失")
print("edge0 missing")
if edge1==None:
print("edge1丢失")
print("edge1 missing")
if edge2==None:
print("edge2丢失")
print("edge2 missing")
continue
e1 = "through edge ("+edge0["keywords"]+") to connect to "+s_name+" and "+b1_name+"."
e2 = "through edge ("+edge1["keywords"]+") to connect to "+b1_name+" and "+b2_name+"."
Expand All @@ -1204,7 +1203,7 @@ async def _find_most_related_edges_from_entities3(
edge0 = await knowledge_graph_inst.get_edge(path[0], path[1]) or await knowledge_graph_inst.get_edge(path[1], path[0])
edge1 = await knowledge_graph_inst.get_edge(path[1],path[2]) or await knowledge_graph_inst.get_edge(path[2], path[1])
if edge0==None or edge1==None:
print(path,"边丢失")
print(path, "edge missing")
continue
e1 = "through edge("+edge0["keywords"]+") to connect to "+s_name+" and "+b_name+"."
e2 = "through edge("+edge1["keywords"]+") to connect to "+b_name+" and "+t_name+"."
Expand All @@ -1219,7 +1218,7 @@ async def _find_most_related_edges_from_entities3(
s_name,t_name = path[0],path[1]
edge0 = await knowledge_graph_inst.get_edge(path[0], path[1]) or await knowledge_graph_inst.get_edge(path[1], path[0])
if edge0==None:
print(path,"边丢失")
print(path, "edge missing")
continue
e = "through edge("+edge0["keywords"]+") to connect to "+s_name+" and "+t_name+"."
s = await knowledge_graph_inst.get_node(s_name)
Expand Down
28 changes: 28 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from setuptools import setup, find_packages

setup(
name="PathRAG",
version="0.1.0",
description="PathRAG: Pruning Graph-based Retrieval Augmented Generation with Relational Paths",
author="PathRAG Authors",
packages=find_packages(),
python_requires=">=3.9",
install_requires=[
"accelerate",
"aioboto3",
"aiohttp",
"graspologic",
"hnswlib",
"nano-vectordb",
"networkx",
"numpy",
"ollama",
"openai",
"pydantic",
"tenacity",
"tiktoken",
"torch",
"tqdm",
"transformers",
],
)
8 changes: 4 additions & 4 deletions v1_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
from PathRAG import PathRAG, QueryParam
from PathRAG.llm import gpt_4o_mini_complete

WORKING_DIR = ""
WORKING_DIR = "./pathrag_cache" # Set your working directory

api_key=""
api_key="your-openai-api-key-here" # Set your OpenAI API key
os.environ["OPENAI_API_KEY"] = api_key
base_url="https://api.openai.com/v1"
os.environ["OPENAI_API_BASE"]=base_url
Expand All @@ -18,8 +18,8 @@
llm_model_func=gpt_4o_mini_complete,
)

data_file=""
question=""
data_file="./text.txt" # Path to your input document
question="What is the main topic of this document?" # Your query
with open(data_file) as f:
rag.insert(f.read())

Expand Down