-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathingest.py
More file actions
62 lines (47 loc) · 2.03 KB
/
ingest.py
File metadata and controls
62 lines (47 loc) · 2.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from dotenv import load_dotenv
# 1. 加载环境变量
load_dotenv()
# 定义路径
DOCS_DIR = "./docs"
DB_DIR = "./chroma_db" # 向量数据库存储在本地的文件夹
def ingest_docs():
# 2. 读取 PDF
documents = []
print(f"📂 正在扫描 {DOCS_DIR} 目录...")
for file in os.listdir(DOCS_DIR):
if file.endswith(".pdf"):
file_path = os.path.join(DOCS_DIR, file)
print(f" 📄 发现文件: {file},正在读取...")
loader = PyPDFLoader(file_path)
documents.extend(loader.load())
if not documents:
print("❌ 错误: docs 文件夹里没有找到 PDF 文件!")
return
print(f"✅ 成功读取 {len(documents)} 页原始文档。")
# 3. 切分文档 (Chunking) - Data Science 的核心步骤
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, # 每个切片的大小
chunk_overlap=200, # 切片之间的重叠(防止语义断裂)
separators=["\n\n", "\n", " ", ""], # 优先按段落切分
)
chunks = text_splitter.split_documents(documents)
print(f"✂️ 文档已切分为 {len(chunks)} 个碎片 (Chunks)。")
# 4. 向量化并存储 (Embedding & Storage)
print("🧠 正在将文本转化为向量并存入数据库 (这一步可能需要几秒钟)...")
# 使用 OpenAI 的 embedding 模型 (要把文字变成数学向量)
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
# 创建并持久化向量数据库
vector_db = Chroma.from_documents(
documents=chunks, embedding=embeddings, persist_directory=DB_DIR
)
print("-" * 30)
print(f"🎉 成功!知识库已构建完成。")
print(f"💾 数据已保存在: {DB_DIR}")
print("-" * 30)
if __name__ == "__main__":
ingest_docs()