Skip to content

Commit 0c6bc59

Browse files
author
RobuRishabh
committed
UI_Langchain_SemanticSearch_added
1 parent fb69f1b commit 0c6bc59

File tree

4 files changed

+320
-125
lines changed

4 files changed

+320
-125
lines changed

app.py

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
"""
2+
UI using Streamlit
3+
"""
4+
import streamlit as st
5+
from query_handler import process_query
6+
from ollama_api import check_ollama_availability, generate_response
7+
8+
# ✅ Set page config
9+
st.set_page_config(
10+
page_title="Flight Information Assistant",
11+
page_icon="✈️"
12+
)
13+
14+
# ✅ Initialize session state for chat history
15+
if "messages" not in st.session_state:
16+
st.session_state.messages = []
17+
18+
# ✅ Check Ollama server availability
19+
ollama_available, ollama_message = check_ollama_availability()
20+
21+
def display_chat_message(role, content):
22+
"""
23+
Display messages in the chat interface.
24+
25+
Args:
26+
role (str): Role of the message sender (user or assistant).
27+
content (str): Message content.
28+
"""
29+
with st.chat_message(role):
30+
st.markdown(content)
31+
32+
# ✅ Main UI
33+
st.title("✈️ Flight Information Assistant")
34+
35+
# ✅ Display Ollama server availability message
36+
if not ollama_available:
37+
st.warning(f"⚠️ {ollama_message}\nUsing simplified responses until Ollama becomes available.")
38+
39+
# ✅ Instructions for the user
40+
st.markdown("""
41+
**Ask me about flights!** Try questions like:
42+
- What flights are available from New York to London?
43+
- Show me flight NY100.
44+
- Are there any flights from Chicago?
45+
""")
46+
47+
# ✅ Display chat history
48+
for message in st.session_state.messages:
49+
display_chat_message(message["role"], message["content"])
50+
51+
# ✅ Chat input handling
52+
if prompt := st.chat_input("Ask about flights..."):
53+
# ✅ Store and display user message
54+
st.session_state.messages.append({"role": "user", "content": prompt})
55+
display_chat_message("user", prompt)
56+
57+
try:
58+
# ✅ Process user query
59+
success, message, flights = process_query(prompt)
60+
61+
if not success:
62+
st.error(message) # Show error message in UI
63+
response = "⚠️ I couldn't process your request. Try rephrasing your question."
64+
else:
65+
# ✅ Generate assistant response
66+
response = generate_response(prompt, flights)
67+
68+
# ✅ Store and display assistant response
69+
st.session_state.messages.append({"role": "assistant", "content": response})
70+
display_chat_message("assistant", response)
71+
72+
except Exception as e:
73+
error_message = f"❌ An error occurred: {str(e)}"
74+
st.session_state.messages.append({"role": "assistant", "content": error_message})
75+
display_chat_message("assistant", error_message)

mock_database.py

Lines changed: 83 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
import ollama
2+
import numpy as np
3+
from sklearn.metrics.pairwise import cosine_similarity
4+
15
flights = [
26
{
37
"flight_number": "NY100",
@@ -36,21 +40,82 @@
3640
}
3741
]
3842

39-
def search_flights(query_params):
40-
"""
41-
Search for flights based on query parameters
42-
"""
43-
results = []
44-
for flight in flights:
45-
matches = True
46-
for key, value in query_params.items():
47-
if key not in flight:
48-
matches = False
49-
break
50-
# Handle the exact matches
51-
if value.lower() != flight[key].lower():
52-
matches = False
53-
break
54-
if matches:
55-
results.append(flight)
56-
return results
43+
### ✅ Check if Ollama is Running Before Sending API Calls
44+
def check_ollama_availability():
45+
"""
46+
Check if Ollama server is available.
47+
Returns: True if available, False otherwise.
48+
"""
49+
import requests
50+
try:
51+
response = requests.get("http://localhost:11434/api/tags", timeout=3)
52+
return response.status_code == 200
53+
except requests.exceptions.RequestException:
54+
return False
55+
56+
OLLAMA_AVAILABLE = check_ollama_availability()
57+
58+
### ✅ Lazy Embedding Generation with Error Handling
59+
flight_embeddings = {}
60+
61+
def generate_embedding(text):
62+
"""
63+
Generate text embeddings using Ollama's LLaMA2 API.
64+
Only calls Ollama if the server is running.
65+
"""
66+
if not OLLAMA_AVAILABLE:
67+
print("⚠️ Ollama server is not available. Using fallback search.")
68+
return None
69+
70+
try:
71+
response = ollama.embeddings(model="llama2:latest", prompt=text)
72+
return np.array(response["embedding"])
73+
except Exception as e:
74+
print(f"⚠️ Ollama embedding failed: {e}")
75+
return None
76+
77+
def get_flight_embedding(flight_number):
78+
"""
79+
Retrieve or generate an embedding for a flight.
80+
"""
81+
if flight_number in flight_embeddings:
82+
return flight_embeddings[flight_number]
83+
84+
# Get flight details
85+
flight = next((f for f in flights if f["flight_number"] == flight_number), None)
86+
if not flight:
87+
return None
88+
89+
text = f"{flight['origin']} {flight['destination']} {flight['airline']} {flight['time']}"
90+
embedding = generate_embedding(text)
91+
if embedding is not None:
92+
flight_embeddings[flight_number] = embedding
93+
return embedding
94+
95+
def search_flights_semantic(query):
96+
"""
97+
Perform semantic search on flight records using cosine similarity.
98+
"""
99+
print(f"🟢 Searching flights for: {query}") # Debug print
100+
101+
try:
102+
query_embedding = generate_embedding(query)
103+
104+
similarities = {}
105+
for flight in flights:
106+
flight_num = flight["flight_number"]
107+
flight_embedding = get_flight_embedding(flight_num)
108+
109+
if flight_embedding is not None:
110+
similarity_score = cosine_similarity([query_embedding], [flight_embedding])[0][0]
111+
similarities[flight_num] = similarity_score
112+
113+
matching_flights = [flight for flight in flights if similarities.get(flight["flight_number"], 0) > 0.6]
114+
115+
print(f"🟢 Flights Found: {matching_flights}") # Debug print
116+
117+
return matching_flights
118+
119+
except Exception as e:
120+
print(f"❌ Error in search_flights_semantic: {str(e)}") # Debug print
121+
return []

ollama_api.py

Lines changed: 64 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1,95 +1,104 @@
1-
"""
2-
Integration with the Ollama API for natural language generation.
3-
"""
41
import os
52
import json
63
import requests
7-
from dotenv import load_dotenv
4+
from langchain_ollama import OllamaLLM
85
from typing import Tuple, List
6+
from dotenv import load_dotenv
97

8+
# ✅ Load environment variables
109
load_dotenv()
1110

11+
print(f"🟢 Loaded OLLAMA_MODEL from .env: {os.getenv('OLLAMA_MODEL')}") # Debugging check
12+
13+
14+
# ✅ Use environment variables dynamically
1215
OLLAMA_URL = os.getenv("OLLAMA_URL")
13-
if not OLLAMA_URL:
14-
raise ValueError("OLLAMA_URL environment variable is not set.")
15-
MODEL = os.getenv("OLLAMA_MODEL", "llama2")
16+
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL")
17+
if not OLLAMA_MODEL:
18+
print("❌ OLLAMA_MODEL is missing! Check your .env file.")
19+
exit(1)
20+
21+
# ✅ Initialize Ollama LLM
22+
ollama_llm = OllamaLLM(model=OLLAMA_MODEL)
1623

1724
def check_ollama_availability() -> Tuple[bool, str]:
1825
"""
19-
Check if the Ollama server is available by querying its health endpoint.
20-
Returns: Tuple[bool, str]: (success, message)
26+
Check if the locally running Ollama server is available.
27+
28+
Returns:
29+
Tuple[bool, str]: (is_available, message)
2130
"""
2231
try:
23-
response = requests.get(f"{OLLAMA_URL}/api/tags")
32+
response = requests.get(f"{OLLAMA_URL}/api/tags", timeout=3) # ✅ Use .env URL and set timeout
2433
if response.status_code == 200:
25-
return True, "Ollama server is available."
26-
return False, "Ollama server is not available. status code: {response.status_code}"
34+
return True, "Ollama server is available"
35+
return False, f"Ollama server returned status code: {response.status_code}"
36+
except requests.exceptions.Timeout:
37+
return False, "⚠️ Ollama server timeout. It may be overloaded or down."
2738
except requests.exceptions.ConnectionError:
28-
return False, "Ollama server is not available. Connection error."
39+
return False, "❌ Cannot connect to Ollama server. Ensure it is running."
2940
except Exception as e:
30-
return False, f"An error occurred while checking Ollama availability: {str(e)}"
31-
32-
def generate_fallback_response(query:str, flights:List[dict]) -> str:
41+
return False, f"⚠️ Error checking Ollama server: {str(e)}"
42+
43+
def generate_fallback_response(query: str, flights: List[dict]) -> str:
3344
"""
3445
Generate a simple response when Ollama is not available.
35-
Args: query (str): User's question
36-
flights (List[dict]): List of matching flights
37-
Returns: str: Fallback response
46+
47+
Args:
48+
query (str): User's original question.
49+
flights (list): Matching flight information.
50+
51+
Returns:
52+
str: Fallback response.
3853
"""
3954
if not flights:
40-
return "No flights found matching your criteria."
41-
42-
response = "Here are the flights that match your criteria:\n"
55+
return "I couldn't find any flights matching your criteria. Please try again with different details."
56+
57+
response = "Here are the flights that match your search:\n\n"
4358
for flight in flights:
44-
response += f"Flight {flight['flight_number']} from {flight['origin']} to {flight['destination']} on {flight['time']} by {flight['airline']}\n"
59+
response += (f"Flight {flight['flight_number']} from {flight['origin']} to {flight['destination']}\n"
60+
f"Time: {flight['time']}\n"
61+
f"Airline: {flight['airline']}\n\n")
4562
return response
46-
63+
4764
def generate_response(query: str, flights: List[dict]) -> str:
4865
"""
49-
Generate a natural language response using the Ollama model.
50-
Args: query (str): User's question
51-
flights (List[dict]): List of matching flights
52-
Returns: str: Natural language response
66+
Generate a natural language response using the LangChain Ollama integration.
67+
68+
Args:
69+
query (str): User's original question.
70+
flights (list): Matching flight information.
71+
72+
Returns:
73+
str: Generated response.
5374
"""
54-
# Check is ollama server is available
75+
# ✅ First check if Ollama is available
5576
is_available, message = check_ollama_availability()
5677
if not is_available:
5778
return generate_fallback_response(query, flights)
79+
5880
try:
59-
# Prepare the prompt for Ollama model
81+
# Prepare flight information for LLM
6082
if flights:
61-
flight_info = json.dumps(flights, indent = 2)
83+
flight_info = json.dumps(flights, indent=2)
6284
prompt = f"""
63-
User Question: {query}
85+
User Query: {query}
6486
65-
Available Flights Information:
87+
Available Flights:
6688
{flight_info}
67-
Please provide a concise, natural language response that summarizes these flights, including flight numbers, times, and destinations.
89+
90+
Generate a natural language response summarizing these flights, including flight number, time, and airline details.
6891
"""
6992
else:
7093
prompt = f"""
71-
User Question: {query}
94+
User Query: {query}
7295
73-
No flights found matching your criteria.
96+
No matching flights found. Please provide a response indicating this politely.
7497
"""
75-
# Make the api call to Ollama
76-
response = requests.post(f"{OLLAMA_URL}/api/generate",
77-
json={
78-
"model": MODEL,
79-
"prompt": prompt,
80-
"stream": False},
81-
timeout=10 # Timeout after 10 seconds
82-
)
83-
if response.status_code == 200:
84-
try:
85-
response_data = response.json()
86-
return response_data.get("response", "Sorry, I couldn't generate a response.")
87-
except ValueError:
88-
return "There was an error processing the response from the server."
89-
else:
90-
return generate_fallback_response(query, flights)
9198

92-
except requests.exceptions.RequestException as e:
93-
return generate_fallback_response(query, flights) + f" Error: {str(e)}"
99+
# ✅ Generate response using LangChain's Ollama LLM
100+
response = ollama_llm(prompt)
101+
return response.strip()
102+
94103
except Exception as e:
95-
return generate_fallback_response(query, flights) + f" Unexpected error: {str(e)}"
104+
return generate_fallback_response(query, flights) + f" (⚠️ Error: {str(e)})"

0 commit comments

Comments
 (0)