|
1 | | -""" |
2 | | -Integration with the Ollama API for natural language generation. |
3 | | -""" |
4 | 1 | import os |
5 | 2 | import json |
6 | 3 | import requests |
7 | | -from dotenv import load_dotenv |
| 4 | +from langchain_ollama import OllamaLLM |
8 | 5 | from typing import Tuple, List |
| 6 | +from dotenv import load_dotenv |
9 | 7 |
|
| 8 | +# ✅ Load environment variables |
10 | 9 | load_dotenv() |
11 | 10 |
|
| 11 | +print(f"🟢 Loaded OLLAMA_MODEL from .env: {os.getenv('OLLAMA_MODEL')}") # Debugging check |
| 12 | + |
| 13 | + |
| 14 | +# ✅ Use environment variables dynamically |
12 | 15 | OLLAMA_URL = os.getenv("OLLAMA_URL") |
13 | | -if not OLLAMA_URL: |
14 | | - raise ValueError("OLLAMA_URL environment variable is not set.") |
15 | | -MODEL = os.getenv("OLLAMA_MODEL", "llama2") |
| 16 | +OLLAMA_MODEL = os.getenv("OLLAMA_MODEL") |
| 17 | +if not OLLAMA_MODEL: |
| 18 | + print("❌ OLLAMA_MODEL is missing! Check your .env file.") |
| 19 | + exit(1) |
| 20 | + |
| 21 | +# ✅ Initialize Ollama LLM |
| 22 | +ollama_llm = OllamaLLM(model=OLLAMA_MODEL) |
16 | 23 |
|
17 | 24 | def check_ollama_availability() -> Tuple[bool, str]: |
18 | 25 | """ |
19 | | - Check if the Ollama server is available by querying its health endpoint. |
20 | | - Returns: Tuple[bool, str]: (success, message) |
| 26 | + Check if the locally running Ollama server is available. |
| 27 | + |
| 28 | + Returns: |
| 29 | + Tuple[bool, str]: (is_available, message) |
21 | 30 | """ |
22 | 31 | try: |
23 | | - response = requests.get(f"{OLLAMA_URL}/api/tags") |
| 32 | + response = requests.get(f"{OLLAMA_URL}/api/tags", timeout=3) # ✅ Use .env URL and set timeout |
24 | 33 | if response.status_code == 200: |
25 | | - return True, "Ollama server is available." |
26 | | - return False, "Ollama server is not available. status code: {response.status_code}" |
| 34 | + return True, "Ollama server is available" |
| 35 | + return False, f"Ollama server returned status code: {response.status_code}" |
| 36 | + except requests.exceptions.Timeout: |
| 37 | + return False, "⚠️ Ollama server timeout. It may be overloaded or down." |
27 | 38 | except requests.exceptions.ConnectionError: |
28 | | - return False, "Ollama server is not available. Connection error." |
| 39 | + return False, "❌ Cannot connect to Ollama server. Ensure it is running." |
29 | 40 | except Exception as e: |
30 | | - return False, f"An error occurred while checking Ollama availability: {str(e)}" |
31 | | - |
32 | | -def generate_fallback_response(query:str, flights:List[dict]) -> str: |
| 41 | + return False, f"⚠️ Error checking Ollama server: {str(e)}" |
| 42 | + |
| 43 | +def generate_fallback_response(query: str, flights: List[dict]) -> str: |
33 | 44 | """ |
34 | 45 | Generate a simple response when Ollama is not available. |
35 | | - Args: query (str): User's question |
36 | | - flights (List[dict]): List of matching flights |
37 | | - Returns: str: Fallback response |
| 46 | + |
| 47 | + Args: |
| 48 | + query (str): User's original question. |
| 49 | + flights (list): Matching flight information. |
| 50 | + |
| 51 | + Returns: |
| 52 | + str: Fallback response. |
38 | 53 | """ |
39 | 54 | if not flights: |
40 | | - return "No flights found matching your criteria." |
41 | | - |
42 | | - response = "Here are the flights that match your criteria:\n" |
| 55 | + return "I couldn't find any flights matching your criteria. Please try again with different details." |
| 56 | + |
| 57 | + response = "Here are the flights that match your search:\n\n" |
43 | 58 | for flight in flights: |
44 | | - response += f"Flight {flight['flight_number']} from {flight['origin']} to {flight['destination']} on {flight['time']} by {flight['airline']}\n" |
| 59 | + response += (f"Flight {flight['flight_number']} from {flight['origin']} to {flight['destination']}\n" |
| 60 | + f"Time: {flight['time']}\n" |
| 61 | + f"Airline: {flight['airline']}\n\n") |
45 | 62 | return response |
46 | | - |
| 63 | + |
47 | 64 | def generate_response(query: str, flights: List[dict]) -> str: |
48 | 65 | """ |
49 | | - Generate a natural language response using the Ollama model. |
50 | | - Args: query (str): User's question |
51 | | - flights (List[dict]): List of matching flights |
52 | | - Returns: str: Natural language response |
| 66 | + Generate a natural language response using the LangChain Ollama integration. |
| 67 | +
|
| 68 | + Args: |
| 69 | + query (str): User's original question. |
| 70 | + flights (list): Matching flight information. |
| 71 | +
|
| 72 | + Returns: |
| 73 | + str: Generated response. |
53 | 74 | """ |
54 | | - # Check is ollama server is available |
| 75 | + # ✅ First check if Ollama is available |
55 | 76 | is_available, message = check_ollama_availability() |
56 | 77 | if not is_available: |
57 | 78 | return generate_fallback_response(query, flights) |
| 79 | + |
58 | 80 | try: |
59 | | - # Prepare the prompt for Ollama model |
| 81 | + # ✅ Prepare flight information for LLM |
60 | 82 | if flights: |
61 | | - flight_info = json.dumps(flights, indent = 2) |
| 83 | + flight_info = json.dumps(flights, indent=2) |
62 | 84 | prompt = f""" |
63 | | - User Question: {query} |
| 85 | + User Query: {query} |
64 | 86 |
|
65 | | - Available Flights Information: |
| 87 | + Available Flights: |
66 | 88 | {flight_info} |
67 | | - Please provide a concise, natural language response that summarizes these flights, including flight numbers, times, and destinations. |
| 89 | +
|
| 90 | + Generate a natural language response summarizing these flights, including flight number, time, and airline details. |
68 | 91 | """ |
69 | 92 | else: |
70 | 93 | prompt = f""" |
71 | | - User Question: {query} |
| 94 | + User Query: {query} |
72 | 95 |
|
73 | | - No flights found matching your criteria. |
| 96 | + No matching flights found. Please provide a response indicating this politely. |
74 | 97 | """ |
75 | | - # Make the api call to Ollama |
76 | | - response = requests.post(f"{OLLAMA_URL}/api/generate", |
77 | | - json={ |
78 | | - "model": MODEL, |
79 | | - "prompt": prompt, |
80 | | - "stream": False}, |
81 | | - timeout=10 # Timeout after 10 seconds |
82 | | - ) |
83 | | - if response.status_code == 200: |
84 | | - try: |
85 | | - response_data = response.json() |
86 | | - return response_data.get("response", "Sorry, I couldn't generate a response.") |
87 | | - except ValueError: |
88 | | - return "There was an error processing the response from the server." |
89 | | - else: |
90 | | - return generate_fallback_response(query, flights) |
91 | 98 |
|
92 | | - except requests.exceptions.RequestException as e: |
93 | | - return generate_fallback_response(query, flights) + f" Error: {str(e)}" |
| 99 | + # ✅ Generate response using LangChain's Ollama LLM |
| 100 | + response = ollama_llm(prompt) |
| 101 | + return response.strip() |
| 102 | + |
94 | 103 | except Exception as e: |
95 | | - return generate_fallback_response(query, flights) + f" Unexpected error: {str(e)}" |
| 104 | + return generate_fallback_response(query, flights) + f" (⚠️ Error: {str(e)})" |
0 commit comments