-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathchat.py
More file actions
151 lines (127 loc) · 4.71 KB
/
chat.py
File metadata and controls
151 lines (127 loc) · 4.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
"""
Local SAAB Knowledge chatbot — DeepSeek via Ollama + ChromaDB RAG.
Runs 100% on your machine, no internet needed after setup.
"""
import os
import json
import requests
import chromadb
from sentence_transformers import SentenceTransformer
DOCS_DIR = os.path.dirname(os.path.abspath(__file__))
DB_DIR = os.path.join(DOCS_DIR, ".vectordb")
COLLECTION = "saab_knowledge"
OLLAMA_URL = os.environ.get("OLLAMA_URL", "http://localhost:11434")
MODEL = "deepseek-r1:8b" # change to deepseek-r1:14b or deepseek-r1:32b for better quality
TOP_K = 5 # number of context chunks to retrieve
SYSTEM_PROMPT = """You are a SAAB vehicle expert assistant. You help owners maintain, repair, and upgrade their SAAB cars.
You answer questions using ONLY the provided context from the SAAB knowledge base.
If the context doesn't contain enough information to answer, say so honestly.
Always mention the specific SAAB model (96, C900, NG9-3, NG9-5) when relevant.
Include links from the knowledge base when available."""
def check_ollama():
"""Verify Ollama is running and the model is available."""
try:
r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
r.raise_for_status()
models = [m["name"] for m in r.json().get("models", [])]
if not any(MODEL in m for m in models):
print(f"Model '{MODEL}' not found. Pulling it now (this may take a while)...")
pull = requests.post(
f"{OLLAMA_URL}/api/pull",
json={"name": MODEL},
stream=True,
timeout=600,
)
for line in pull.iter_lines():
if line:
status = json.loads(line).get("status", "")
if status:
print(f" {status}", end="\r")
print()
return True
except requests.ConnectionError:
print("ERROR: Ollama is not running.")
print("Install it from https://ollama.com and run: ollama serve")
return False
def query_ollama(prompt: str, context: str) -> str:
"""Send a prompt to the local DeepSeek model via Ollama."""
full_prompt = f"""Context from SAAB knowledge base:
---
{context}
---
User question: {prompt}
Provide a helpful answer based on the context above."""
r = requests.post(
f"{OLLAMA_URL}/api/chat",
json={
"model": MODEL,
"messages": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": full_prompt},
],
"stream": True,
},
stream=True,
timeout=300,
)
r.raise_for_status()
response_text = ""
for line in r.iter_lines():
if line:
data = json.loads(line)
token = data.get("message", {}).get("content", "")
print(token, end="", flush=True)
response_text += token
if data.get("done"):
break
print() # newline after streaming
return response_text
def main():
if not check_ollama():
return
print("Loading embedding model...")
embedder = SentenceTransformer("all-MiniLM-L6-v2")
print("Connecting to vector store...")
client = chromadb.PersistentClient(path=DB_DIR)
try:
collection = client.get_collection(COLLECTION)
except ValueError:
print("ERROR: Vector store not found. Run 'python ingest.py' first.")
return
count = collection.count()
print(f"Loaded {count} knowledge chunks.\n")
print("=" * 60)
print(" SAAB Knowledge Assistant (DeepSeek local)")
print(" Type your question, or 'quit' to exit.")
print("=" * 60)
while True:
try:
question = input("\nYou: ").strip()
except (EOFError, KeyboardInterrupt):
print("\nGoodbye!")
break
if not question:
continue
if question.lower() in ("quit", "exit", "q"):
print("Goodbye!")
break
# Retrieve relevant chunks
q_embedding = embedder.encode([question]).tolist()
results = collection.query(
query_embeddings=q_embedding,
n_results=TOP_K,
)
# Build context from retrieved chunks
context_parts = []
sources = set()
for doc, meta in zip(results["documents"][0], results["metadatas"][0]):
source = meta.get("source", "unknown")
section = meta.get("section", "")
sources.add(source)
context_parts.append(f"[{source} — {section}]\n{doc}")
context = "\n\n".join(context_parts)
print(f"\n(Sources: {', '.join(sources)})")
print("\nAssistant: ", end="")
query_ollama(question, context)
if __name__ == "__main__":
main()