|
| 1 | +""" |
| 2 | +Integration with the Ollama API for natural language generation. |
| 3 | +""" |
| 4 | +import os |
| 5 | +import json |
| 6 | +import requests |
| 7 | +from dotenv import load_dotenv |
| 8 | +from typing import Tuple, List |
| 9 | + |
| 10 | +load_dotenv() |
| 11 | + |
| 12 | +OLLAMA_URL = os.getenv("OLLAMA_URL") |
| 13 | +if not OLLAMA_URL: |
| 14 | + raise ValueError("OLLAMA_URL environment variable is not set.") |
| 15 | +MODEL = os.getenv("OLLAMA_MODEL", "llama2") |
| 16 | + |
| 17 | +def check_ollama_availability() -> Tuple[bool, str]: |
| 18 | + """ |
| 19 | + Check if the Ollama server is available by querying its health endpoint. |
| 20 | + Returns: Tuple[bool, str]: (success, message) |
| 21 | + """ |
| 22 | + try: |
| 23 | + response = requests.get(f"{OLLAMA_URL}/api/tags") |
| 24 | + if response.status_code == 200: |
| 25 | + return True, "Ollama server is available." |
| 26 | + return False, "Ollama server is not available. status code: {response.status_code}" |
| 27 | + except requests.exceptions.ConnectionError: |
| 28 | + return False, "Ollama server is not available. Connection error." |
| 29 | + except Exception as e: |
| 30 | + return False, f"An error occurred while checking Ollama availability: {str(e)}" |
| 31 | + |
| 32 | +def generate_fallback_response(query:str, flights:List[dict]) -> str: |
| 33 | + """ |
| 34 | + Generate a simple response when Ollama is not available. |
| 35 | + Args: query (str): User's question |
| 36 | + flights (List[dict]): List of matching flights |
| 37 | + Returns: str: Fallback response |
| 38 | + """ |
| 39 | + if not flights: |
| 40 | + return "No flights found matching your criteria." |
| 41 | + |
| 42 | + response = "Here are the flights that match your criteria:\n" |
| 43 | + for flight in flights: |
| 44 | + response += f"Flight {flight['flight_number']} from {flight['origin']} to {flight['destination']} on {flight['time']} by {flight['airline']}\n" |
| 45 | + return response |
| 46 | + |
| 47 | +def generate_response(query: str, flights: List[dict]) -> str: |
| 48 | + """ |
| 49 | + Generate a natural language response using the Ollama model. |
| 50 | + Args: query (str): User's question |
| 51 | + flights (List[dict]): List of matching flights |
| 52 | + Returns: str: Natural language response |
| 53 | + """ |
| 54 | + # Check is ollama server is available |
| 55 | + is_available, message = check_ollama_availability() |
| 56 | + if not is_available: |
| 57 | + return generate_fallback_response(query, flights) |
| 58 | + try: |
| 59 | + # Prepare the prompt for Ollama model |
| 60 | + if flights: |
| 61 | + flight_info = json.dumps(flights, indent = 2) |
| 62 | + prompt = f""" |
| 63 | + User Question: {query} |
| 64 | +
|
| 65 | + Available Flights Information: |
| 66 | + {flight_info} |
| 67 | + Please provide a concise, natural language response that summarizes these flights, including flight numbers, times, and destinations. |
| 68 | + """ |
| 69 | + else: |
| 70 | + prompt = f""" |
| 71 | + User Question: {query} |
| 72 | +
|
| 73 | + No flights found matching your criteria. |
| 74 | + """ |
| 75 | + # Make the api call to Ollama |
| 76 | + response = requests.post(f"{OLLAMA_URL}/api/generate", |
| 77 | + json={ |
| 78 | + "model": MODEL, |
| 79 | + "prompt": prompt, |
| 80 | + "stream": False}, |
| 81 | + timeout=10 # Timeout after 10 seconds |
| 82 | + ) |
| 83 | + if response.status_code == 200: |
| 84 | + try: |
| 85 | + response_data = response.json() |
| 86 | + return response_data.get("response", "Sorry, I couldn't generate a response.") |
| 87 | + except ValueError: |
| 88 | + return "There was an error processing the response from the server." |
| 89 | + else: |
| 90 | + return generate_fallback_response(query, flights) |
| 91 | + |
| 92 | + except requests.exceptions.RequestException as e: |
| 93 | + return generate_fallback_response(query, flights) + f" Error: {str(e)}" |
| 94 | + except Exception as e: |
| 95 | + return generate_fallback_response(query, flights) + f" Unexpected error: {str(e)}" |
0 commit comments