diff --git a/ai/gen-ai-agents/travel_agent/README.md b/ai/gen-ai-agents/travel_agent/README.md new file mode 100644 index 000000000..58a706f32 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/README.md @@ -0,0 +1,16 @@ +# Travel Agent +This repository contains all the code for a demo of a Travel Agent. +The AI Agent enables a customer to get information about available destinations and to organize a trip, book flight, hotel... + +The agent has been developed using OCI Generative AI and LangGraph. + +## List of packages +* oci +* langchain-community +* langgraph +* streamlit +* fastapi +* black +* uvicorn + + diff --git a/ai/gen-ai-agents/travel_agent/base_node.py b/ai/gen-ai-agents/travel_agent/base_node.py new file mode 100644 index 000000000..6a3ef0dc2 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/base_node.py @@ -0,0 +1,63 @@ +""" +Base Node class for LangGraph nodes. + +This module defines a base class `BaseNode` for all LangGraph nodes, +providing a standard logging interface via `log_info` and `log_error` methods. +Each subclass should implement the `invoke(input, config=None)` method. +""" + +import logging +from langchain_core.runnables import Runnable + + +class BaseNode(Runnable): + """ + Abstract base class for LangGraph nodes. + + All node classes in the graph should inherit from this base class. + It provides convenient logging utilities and stores a unique node name + for identification in logs and debugging. + + Attributes: + name (str): Identifier for the node, used in logging. + logger (logging.Logger): Configured logger instance for the node. + """ + + def __init__(self, name: str): + """ + Initialize the base node with a logger. + + Args: + name (str): Unique name of the node for logging purposes. + """ + self.name = name + self.logger = logging.getLogger(name) + self.logger.setLevel(logging.INFO) + + # Attach a default console handler if no handlers are present + if not self.logger.handlers: + handler = logging.StreamHandler() + handler.setLevel(logging.INFO) + formatter = logging.Formatter( + "[%(asctime)s] %(levelname)s in %(name)s: %(message)s" + ) + handler.setFormatter(formatter) + self.logger.addHandler(handler) + + def log_info(self, message: str): + """ + Log an informational message. + + Args: + message (str): The message to log. + """ + self.logger.info("[%s] %s", self.name, message) + + def log_error(self, message: str): + """ + Log an error message. + + Args: + message (str): The error message to log. + """ + self.logger.error("[%s] %s", self.name, message) diff --git a/ai/gen-ai-agents/travel_agent/config.py b/ai/gen-ai-agents/travel_agent/config.py new file mode 100644 index 000000000..ddb6c78d6 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/config.py @@ -0,0 +1,36 @@ +""" +General configuration options +""" + +# +# application configs +# +DEBUG = False + +# this is the list of the mandatory fields in user input +# if any of these fields is missing, the agent will ask for clarification +REQUIRED_FIELDS = [ + "place_of_departure", + "destination", + "start_date", + "end_date", + "num_persons", + "transport_type", +] + +# OCI GenAI services configuration +REGION = "eu-frankfurt-1" +SERVICE_ENDPOINT = f"https://inference.generativeai.{REGION}.oci.oraclecloud.com" + +# seems to work with both models +MODEL_ID = "meta.llama-3.3-70b-instruct" +# MODEL_ID = "cohere.command-a-03-2025" + +MAX_TOKENS = 2048 + +# Mock API configuration +HOTEL_API_URL = "http://localhost:8000/search/hotels" +TRANSPORT_API_URL = "http://localhost:8000/search/transport" + +# Hotel Map +MAP_STYLE = "https://basemaps.cartocdn.com/gl/positron-gl-style/style.json" diff --git a/ai/gen-ai-agents/travel_agent/mock_api.py b/ai/gen-ai-agents/travel_agent/mock_api.py new file mode 100644 index 000000000..b70df0303 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/mock_api.py @@ -0,0 +1,111 @@ +""" +mock_api.py + +A simplified mock FastAPI server with two endpoints: +- /search/transport +- /search/hotels +""" + +from fastapi import FastAPI, Query +from fastapi.responses import JSONResponse + +app = FastAPI() + + +@app.get("/search/transport") +def search_transport( + destination: str = Query(...), + start_date: str = Query(...), + transport_type: str = Query(...), +): + """ + Mock endpoint to simulate transport search. + Args: + destination (str): Destination city. + start_date (str): Start date of the trip in 'YYYY-MM-DD' format. + transport_type (str): Type of transport (e.g., "airplane", "train"). + Returns: + JSONResponse: Mocked transport options. + """ + return JSONResponse( + content={ + "options": [ + { + "provider": ( + "TrainItalia" if transport_type == "train" else "Ryanair" + ), + "price": 45.50, + "departure": f"{start_date}T09:00", + "arrival": f"{start_date}T13:00", + "type": transport_type, + } + ] + } + ) + + +@app.get("/search/hotels") +def search_hotels(destination: str = Query(...), stars: int = Query(3)): + """ + Mock endpoint to simulate hotel search. + Args: + destination (str): Destination city. + stars (int): Number of stars for hotel preference. + Returns: + JSONResponse: Mocked hotel options. + """ + hotels_by_city = { + "valencia": { + "name": "Hotel Vincci Lys", + "price": 135.0, + "stars": stars, + "location": "Central district", + "amenities": ["WiFi", "Breakfast"], + "latitude": 39.4702, + "longitude": -0.3750, + }, + "barcelona": { + "name": "Hotel Jazz", + "price": 160.0, + "stars": stars, + "location": "Eixample", + "amenities": ["WiFi", "Rooftop pool"], + "latitude": 41.3849, + "longitude": 2.1675, + }, + "madrid": { + "name": "Only YOU Hotel Atocha", + "price": 170.0, + "stars": stars, + "location": "Retiro", + "amenities": ["WiFi", "Gym", "Restaurant"], + "latitude": 40.4093, + "longitude": -3.6828, + }, + "florence": { + "name": "Hotel L'Orologio Firenze", + "price": 185.0, + "stars": stars, + "location": "Santa Maria Novella", + "amenities": ["WiFi", "Spa", "Bar"], + "latitude": 43.7760, + "longitude": 11.2486, + }, + "amsterdam": { + "name": "INK Hotel Amsterdam", + "price": 190.0, + "stars": stars, + "location": "City Center", + "amenities": ["WiFi", "Breakfast", "Bar"], + "latitude": 52.3745, + "longitude": 4.8901, + }, + } + + hotel_key = destination.strip().lower() + hotel = hotels_by_city.get(hotel_key) + + if not hotel: + return JSONResponse(content={"hotels": []}, status_code=404) + + return JSONResponse(content={"hotels": [hotel]}) diff --git a/ai/gen-ai-agents/travel_agent/model_factory.py b/ai/gen-ai-agents/travel_agent/model_factory.py new file mode 100644 index 000000000..97a8887fd --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/model_factory.py @@ -0,0 +1,29 @@ +""" +Factory for Chat models +""" + +from langchain_community.chat_models import ChatOCIGenAI + +from config import MODEL_ID, SERVICE_ENDPOINT +from config_private import COMPARTMENT_OCID + + +def get_chat_model( + model_id: str = MODEL_ID, + service_endpoint: str = SERVICE_ENDPOINT, + temperature=0, + max_tokens=2048, +) -> ChatOCIGenAI: + """ + Factory function to create and return a ChatOCIGenAI model instance. + + Returns: + ChatOCIGenAI: Configured chat model instance. + """ + # Create and return the chat model + return ChatOCIGenAI( + model_id=model_id, + service_endpoint=service_endpoint, + model_kwargs={"temperature": temperature, "max_tokens": max_tokens}, + compartment_id=COMPARTMENT_OCID, + ) diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/answer_info_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/answer_info_node.cpython-311.pyc new file mode 100644 index 000000000..0185a0fae Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/answer_info_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/clarification_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/clarification_node.cpython-311.pyc new file mode 100644 index 000000000..1c136fef9 Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/clarification_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/generate_itinerary_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/generate_itinerary_node.cpython-311.pyc new file mode 100644 index 000000000..4fd1be166 Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/generate_itinerary_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/hotel_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/hotel_node.cpython-311.pyc new file mode 100644 index 000000000..1abc0abaa Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/hotel_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/parse_input_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/parse_input_node.cpython-311.pyc new file mode 100644 index 000000000..4988f3b54 Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/parse_input_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/router_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/router_node.cpython-311.pyc new file mode 100644 index 000000000..fdda045c0 Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/router_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/synthesize_plan_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/synthesize_plan_node.cpython-311.pyc new file mode 100644 index 000000000..1007efcf7 Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/synthesize_plan_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/__pycache__/transport_node.cpython-311.pyc b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/transport_node.cpython-311.pyc new file mode 100644 index 000000000..f2dd18575 Binary files /dev/null and b/ai/gen-ai-agents/travel_agent/nodes/__pycache__/transport_node.cpython-311.pyc differ diff --git a/ai/gen-ai-agents/travel_agent/nodes/answer_info_node.py b/ai/gen-ai-agents/travel_agent/nodes/answer_info_node.py new file mode 100644 index 000000000..316c36ec0 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/answer_info_node.py @@ -0,0 +1,76 @@ +# answer_info_node.py +# -*- coding: utf-8 -*- +""" +AnswerInfoNode + +This module defines the AnswerInfoNode class, which is responsible for handling +general travel information queries within the LangGraph-based travel assistant. + +When a user request is classified as an "info" intent by the router node, +this node generates a markdown-formatted response using a language model. + +Author: L. Saetta +Date: 20/05/2025 + +""" +from langchain_core.runnables import Runnable +from langchain_core.output_parsers import StrOutputParser +from base_node import BaseNode +from model_factory import get_chat_model +from prompt_template import answer_prompt +from config import MODEL_ID, SERVICE_ENDPOINT, MAX_TOKENS, DEBUG + + +class AnswerInfoNode(BaseNode): + """ + Node in the LangGraph workflow responsible for handling general travel information queries. + + This node is used when the user's intent is classified as an information request + (rather than a booking). + It uses a language model to generate a helpful, markdown-formatted response + based on the user's input. + + Attributes: + prompt (PromptTemplate): The prompt template for generating the informational response. + llm (Runnable): The configured language model used for generation. + chain (Runnable): Composed chain of prompt → model → output parser. + """ + + def __init__(self): + """ + Initialize the AnswerInfoNode with a pre-defined prompt and LLM configuration. + + The chain is constructed from: + - `answer_prompt` (PromptTemplate) + - A chat model initialized via `get_chat_model` + - A `StrOutputParser` for plain string output + """ + super().__init__("answer_info") + + self.prompt = answer_prompt + self.llm = get_chat_model( + model_id=MODEL_ID, + service_endpoint=SERVICE_ENDPOINT, + temperature=0.5, + max_tokens=MAX_TOKENS, + ) + self.chain: Runnable = self.prompt | self.llm | StrOutputParser() + + def invoke(self, state, config=None, **kwargs): + """ + Generate a general travel information response from the user's question. + + Args: + state (dict): The current LangGraph state, which must include a 'user_input' key. + config (optional): Reserved for compatibility; not used. + + Returns: + dict: Updated state with the 'final_plan' field set to the LLM-generated response. + """ + response = self.chain.invoke({"user_input": state["user_input"]}).strip() + + if DEBUG: + self.log_info("Generated informational response.") + + state["final_plan"] = response + return state diff --git a/ai/gen-ai-agents/travel_agent/nodes/clarification_node.py b/ai/gen-ai-agents/travel_agent/nodes/clarification_node.py new file mode 100644 index 000000000..60c9afe05 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/clarification_node.py @@ -0,0 +1,84 @@ +# clarification_node.py +# -*- coding: utf-8 -*- +""" +ClarificationNode + +This module defines the ClarificationNode class, which is responsible for checking whether +the user's input is missing required travel information fields. If any fields are missing, +the node uses an LLM to generate a user-friendly clarification prompt. + +Author: L. Saetta +Date: 20/05/2025 + +""" + +from base_node import BaseNode +from model_factory import get_chat_model +from translations import TRANSLATIONS +from config import DEBUG, MODEL_ID, SERVICE_ENDPOINT, REQUIRED_FIELDS, MAX_TOKENS + + +class ClarificationNode(BaseNode): + """ + Node in the LangGraph workflow responsible for identifying missing information + in the user's travel request and generating a clarification prompt if necessary. + + This node checks for any fields listed in `REQUIRED_FIELDS` that are missing from the state. + If any are missing, it uses an LLM to generate a natural-language follow-up question + and sets flags in the state to trigger a clarification cycle. + + Attributes: + llm (Runnable): The language model used to generate the clarification prompt. + """ + + def __init__(self): + """ + Initialize the ClarificationNode with a configured chat model. + + The LLM is used to rephrase missing-field prompts into natural user questions. + """ + super().__init__("ClarificationNode") + + self.llm = get_chat_model( + model_id=MODEL_ID, + service_endpoint=SERVICE_ENDPOINT, + temperature=0.2, + max_tokens=MAX_TOKENS, + ) + + def invoke(self, state: dict, config=None, **kwargs) -> dict: + """ + Check for missing required fields and, if needed, generate a clarification prompt. + + Args: + state (dict): The current shared workflow state containing user input and parsed fields. + config (optional): Reserved for LangGraph compatibility; not used here. + + Returns: + dict: Updated state with: + - 'clarification_needed' (bool): True if any required field is missing. + - 'clarification_prompt' (str): A user-friendly follow-up question + generated by the LLM (if needed). + """ + missing = [field for field in REQUIRED_FIELDS if not state.get(field)] + + language = config.get("configurable", {}).get("language", "EN") + # get the relevant translations + t = TRANSLATIONS[language] + + if missing: + if DEBUG: + self.log_info(f"Missing fields: {missing}") + + state["clarification_needed"] = True + + # Prompt localized for the missing fields + question_prompt = t["clarification_prompt_template"].format(fields=", ".join(missing)) + + # Generate user-friendly clarification message using LLM + followup_prompt = self.llm.invoke(question_prompt).content + state["clarification_prompt"] = followup_prompt + else: + state["clarification_needed"] = False + + return state diff --git a/ai/gen-ai-agents/travel_agent/nodes/generate_itinerary_node.py b/ai/gen-ai-agents/travel_agent/nodes/generate_itinerary_node.py new file mode 100644 index 000000000..d7ec9d91e --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/generate_itinerary_node.py @@ -0,0 +1,55 @@ +# generate_itinerary_node.py +# -*- coding: utf-8 -*- +""" +GenerateItineraryNode + +This LangGraph node uses an LLM to generate a personalized day-by-day travel itinerary +based on the selected destination, number of days, and user preferences. + +Author: L. Saetta +Date: 20/10/2025 +""" + +from base_node import BaseNode +from langchain_core.runnables import Runnable +from langchain_core.output_parsers import StrOutputParser +from model_factory import get_chat_model +from config import MODEL_ID, SERVICE_ENDPOINT, MAX_TOKENS, DEBUG +from translations import TRANSLATIONS + + +class GenerateItineraryNode(BaseNode): + def __init__(self): + super().__init__("GenerateItineraryNode") + + self.llm = get_chat_model( + model_id=MODEL_ID, + service_endpoint=SERVICE_ENDPOINT, + temperature=0.7, + max_tokens=MAX_TOKENS, + ) + + def invoke(self, state: dict, config=None, **kwargs) -> dict: + language = config.get("configurable", {}).get("language", "EN") + t = TRANSLATIONS[language] + + destination = state.get("destination", "") + num_days = state.get("num_days", 3) + interests = state.get("hotel_preferences", {}).get("location", "central") + hotel = state.get("hotel_options", [{}])[0].get("name", "a hotel") + + itinerary_prompt = t["itinerary_prompt_template"].format( + destination=destination, + num_days=num_days, + hotel=hotel, + location=interests, + ) + + if DEBUG: + self.log_info("Generating itinerary...") + + response = self.llm.invoke(itinerary_prompt).content + + state["final_plan"] += f"\n\n{t['suggested_itinerary_title']}\n{response}" + + return state diff --git a/ai/gen-ai-agents/travel_agent/nodes/hotel_node.py b/ai/gen-ai-agents/travel_agent/nodes/hotel_node.py new file mode 100644 index 000000000..27fec11ea --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/hotel_node.py @@ -0,0 +1,46 @@ +# hotel_node.py +# -*- coding: utf-8 -*- +""" +SearchHotelNode + +This module defines the SearchHotelNode class, which queries available hotel options +for a given destination and preferences, and stores the result in the LangGraph state. + +Author: L. Saetta +Date: 20/05/2025 +""" + +import requests +from base_node import BaseNode +from config import HOTEL_API_URL + + +class SearchHotelNode(BaseNode): + """ + Hotel node in the LangGraph workflow responsible for searching hotel options. + """ + + def __init__(self): + super().__init__("SearchHotelNode") + + def invoke(self, state: dict, config=None, **kwargs) -> dict: + self.log_info("Searching for hotels") + try: + prefs = state.get("hotel_preferences", {}) + + # here we call the API + response = requests.get( + HOTEL_API_URL, + params={ + "destination": state.get("destination"), + # default 3 stars + "stars": prefs.get("stars", 3), + }, + timeout=5, + ) + data = response.json() + state["hotel_options"] = data.get("hotels", []) + self.log_info(f"Found hotels: {data}") + except Exception as e: + self.log_error(f"Hotel search failed: {e}") + return state diff --git a/ai/gen-ai-agents/travel_agent/nodes/parse_input_node.py b/ai/gen-ai-agents/travel_agent/nodes/parse_input_node.py new file mode 100644 index 000000000..f9e3fe268 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/parse_input_node.py @@ -0,0 +1,83 @@ +""" +ParseInputNode + +LangGraph node responsible for parsing natural language travel requests +into structured data using an LLM hosted on Oracle Cloud Infrastructure (OCI). +""" + +from datetime import date +from prompt_template import input_parser_prompt +from model_factory import get_chat_model + +from base_node import BaseNode +from utils import extract_json_from_text +from config import MODEL_ID, SERVICE_ENDPOINT, DEBUG, MAX_TOKENS + + +class ParseInputNode(BaseNode): + """ + LangGraph node that uses an OCI-hosted LLM to extract structured travel planning + data from a user's natural language input. + + It parses values such as destination, dates, number of persons, transport type, + and hotel preferences. + """ + + def __init__(self): + """ + Initialize the node and configure the LLM (Meta Llama 3.3 via OCI Generative AI). + """ + super().__init__("ParseInputNode") + + self.llm = get_chat_model( + model_id=MODEL_ID, + service_endpoint=SERVICE_ENDPOINT, + temperature=0.0, + max_tokens=MAX_TOKENS, + ) + + def invoke(self, state: dict, config=None, **kwargs) -> dict: + """ + Invoke the node: parse user input using the LLM and update the state with structured values. + + Args: + state (dict): The current shared workflow state, must contain the key 'user_input'. + config (optional): Unused config passed by LangGraph. + + Returns: + dict: Updated state with extracted travel fields added. + """ + # to tell to the LLM that we are using the current date + # in the prompt, we need to pass it as a string + # in ISO format (YYYY-MM-DD) + today_str = date.today().isoformat() + + # Format the input using the prompt template + formatted_prompt = input_parser_prompt.format( + user_input=state["user_input"], today=today_str + ) + + try: + # Call the LLM with the formatted prompt + result = self.llm.invoke(formatted_prompt) + + if DEBUG: + self.log_info(f"LLM response: {result.content}") + + # Extract JSON structure from model response + structured_data = extract_json_from_text(result.content) + + if DEBUG: + self.log_info(f"Extracted JSON: {structured_data}") + + # Update state with parsed values + state.update(structured_data) + + if DEBUG: + self.log_info("Parsed user input successfully.") + + except Exception as e: + self.log_error(f"Failed to parse user input: {e}") + raise e + + return state diff --git a/ai/gen-ai-agents/travel_agent/nodes/router_node.py b/ai/gen-ai-agents/travel_agent/nodes/router_node.py new file mode 100644 index 000000000..86c0bd916 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/router_node.py @@ -0,0 +1,71 @@ +# router_node.py +# -*- coding: utf-8 -*- +""" +RouterNode + +This module defines the RouterNode class, which acts as an intent classifier in the LangGraph-based +travel assistant. It uses an LLM to determine whether the user's input is a travel booking request +or a general information query. + +Author: L. Saetta +Date: 20/05/2025 +""" +from langchain_core.runnables import Runnable +from langchain_core.output_parsers import StrOutputParser +from base_node import BaseNode +from model_factory import get_chat_model +from prompt_template import router_prompt +from config import MODEL_ID, SERVICE_ENDPOINT, DEBUG + + +class RouterNode(BaseNode): + """ + Node in the LangGraph workflow responsible for classifying the user's intent. + + This node invokes an LLM with a predefined prompt to determine whether the user's + request is for booking a trip or asking for general travel information. + + It updates the state with a new key: 'intent', which can be either 'booking' or 'info'. + + Attributes: + prompt (PromptTemplate): The classification prompt template. + llm (Runnable): The language model used for intent classification. + chain (Runnable): Composed chain of prompt → model → output parser. + """ + + def __init__(self): + """ + Initialize the RouterNode with a classification prompt and a configured chat model. + + The prompt is used to elicit one of two intents: 'booking' or 'info'. + The LLM is invoked with a temperature of 0.0 for deterministic intent classification. + """ + super().__init__("router") + + self.prompt = router_prompt + self.llm = get_chat_model( + model_id=MODEL_ID, + service_endpoint=SERVICE_ENDPOINT, + temperature=0.0, + max_tokens=2048, + ) + self.chain: Runnable = self.prompt | self.llm | StrOutputParser() + + def invoke(self, state, config=None, **kwargs): + """ + Classify the user's intent using the LLM and update the state accordingly. + + Args: + state (dict): The current LangGraph state, which must include a 'user_input' key. + config (optional): Reserved for compatibility; not used. + + Returns: + dict: Updated state with a new key 'intent' set to either 'booking' or 'info'. + """ + user_input = state["user_input"] + intent = self.chain.invoke({"user_input": user_input}).strip().lower() + + self.log_info(f"Router classified intent as: {intent}") + + state["intent"] = intent + return state diff --git a/ai/gen-ai-agents/travel_agent/nodes/synthesize_plan_node.py b/ai/gen-ai-agents/travel_agent/nodes/synthesize_plan_node.py new file mode 100644 index 000000000..68d4ed927 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/synthesize_plan_node.py @@ -0,0 +1,126 @@ +# hotel_node.py +# -*- coding: utf-8 -*- +""" +# SynthesizePlanNode +This module defines the SynthesizePlanNode class, which synthesizes a final travel plan + +Author: L. Saetta +Date: 20/05/2025 +""" + +from base_node import BaseNode +from config import DEBUG + + +class SynthesizePlanNode(BaseNode): + """ + Node in the LangGraph workflow responsible for synthesizing the final travel plan. + This node collects transport and hotel options from the state and formats them into + a user-friendly markdown response. + It also includes the travel dates, number of travelers, and other relevant details. + Attributes: + name (str): Node identifier for logging purposes. + """ + + def __init__(self): + super().__init__("SynthesizePlanNode") + + def invoke(self, state: dict, config=None, **kwargs) -> dict: + """ + Synthesize the final travel plan based on transport and hotel options. + This method retrieves the transport and hotel options from the state, + formats them into a markdown response, and updates the state with the final plan. + Args: + state (dict): The shared workflow state containing travel preferences and options. + config (optional): Reserved for compatibility; not used here. + Returns: + dict: Updated state with 'final_plan' key containing the synthesized travel plan. + """ + language = config.get("configurable", {}).get("language", "EN") + + if DEBUG: + self.log_info("Synthesizing final travel plan") + + transport = state.get("flight_options", []) + hotels = state.get("hotel_options", []) + + num_days = state.get("num_days", 0) + + transport_summary = "" + if transport: + option = transport[0] # use the first result + + if language == "IT": + transport_summary = ( + f"**Opzione di Trasporto**\n" + f"- Tipo: {option.get('type')}\n" + f"- Fornitore: {option.get('provider')}\n" + f"- Partenza: {option.get('departure')}\n" + f"- Arrivo: {option.get('arrival')}\n" + f"- Prezzo: €{option.get('price')}\n" + ) + else: + # Default to English if not Italian + transport_summary = ( + f"**Transport Option**\n" + f"- Type: {option.get('type')}\n" + f"- Provider: {option.get('provider')}\n" + f"- Departure: {option.get('departure')}\n" + f"- Arrival: {option.get('arrival')}\n" + f"- Price: €{option.get('price')}\n" + ) + else: + transport_summary = "_No transport options found._" + + hotel_summary = "" + if hotels: + hotel = hotels[0] + + if language == "IT": + hotel_summary = ( + f"**Opzione di Hotel**\n" + f"- Nome: {hotel.get('name')}\n" + f"- Stelle: {hotel.get('stars')}\n" + f"- Posizione: {hotel.get('location')}\n" + f"- Servizi: {', '.join(hotel.get('amenities', []))}\n" + f"- Prezzo per notte: €{hotel.get('price')}\n" + f"- Totale per {num_days} notti: €{hotel.get('price') * num_days}\n" + ) + else: + # Default to English if not Italian + hotel_summary = ( + f"**Hotel Option**\n" + f"- Name: {hotel.get('name')}\n" + f"- Stars: {hotel.get('stars')}\n" + f"- Location: {hotel.get('location')}\n" + f"- Amenities: {', '.join(hotel.get('amenities', []))}\n" + f"- Price per night: €{hotel.get('price')}\n" + f"- Total for {num_days} nights: €{hotel.get('price') * num_days}\n" + ) + else: + hotel_summary = "_No hotel options found._" + + if language == "IT": + final_text = ( + f"### ✈️ Piano di Viaggio da {state.get('place_of_departure')} a {state.get('destination')}\n\n" + f"📅 **Date**: {state.get('start_date')} → {state.get('end_date')}\n" + f"👥 **Viaggiatori**: {state.get('num_persons')}\n\n" + f"{transport_summary}\n\n" + f"{hotel_summary}\n" + ) + else: + # Default to English if not Italian + final_text = ( + f"### ✈️ Travel Plan from {state.get('place_of_departure')} to {state.get('destination')}\n\n" + f"📅 **Dates**: {state.get('start_date')} → {state.get('end_date')}\n" + f"👥 **Travelers**: {state.get('num_persons')}\n\n" + f"{transport_summary}\n\n" + f"{hotel_summary}\n" + ) + + state["final_plan"] = final_text + + if DEBUG: + self.log_info("Final plan synthesized.") + + return state diff --git a/ai/gen-ai-agents/travel_agent/nodes/transport_node.py b/ai/gen-ai-agents/travel_agent/nodes/transport_node.py new file mode 100644 index 000000000..ab9f57fe8 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/nodes/transport_node.py @@ -0,0 +1,71 @@ +# search_transport_node.py +# -*- coding: utf-8 -*- +""" +SearchTransportNode + +This module defines the SearchTransportNode class, which is responsible for retrieving +transport options (e.g., flights or trains) based on user-provided travel details. + +It queries a mock or real API and stores the results in the workflow state under 'flight_options'. + +Author: L. Saetta +Date: 20/05/2025 + +""" +import time +import requests +from base_node import BaseNode +from config import TRANSPORT_API_URL + + +class SearchTransportNode(BaseNode): + """ + Node in the LangGraph workflow responsible for retrieving transport options. + + This node uses an HTTP API to search for available transport (e.g., flights or trains) + based on destination, start date, and preferred transport type. The results are stored + in the shared workflow state for further processing. + + Attributes: + name (str): Node identifier for logging purposes. + """ + + def __init__(self): + """ + Initialize the SearchTransportNode and configure its name for logging. + """ + super().__init__("SearchTransportNode") + + def invoke(self, state: dict, config=None, **kwargs) -> dict: + """ + Query a transport search API and update the state with transport options. + + Args: + state (dict): The shared workflow state containing travel preferences. + config (optional): Reserved for compatibility; not used here. + + Returns: + dict: Updated state with 'flight_options' key containing a list of transport results. + """ + self.log_info("Searching for transport options") + + # Simulate network delay or latency + time.sleep(5) + + try: + response = requests.get( + TRANSPORT_API_URL, + params={ + "destination": state.get("destination"), + "start_date": state.get("start_date"), + "transport_type": state.get("transport_type"), + }, + timeout=5, + ) + data = response.json() + state["flight_options"] = data.get("options", []) + self.log_info(f"Found transport: {data}") + except Exception as e: + self.log_error(f"Transport search failed: {e}") + + return state diff --git a/ai/gen-ai-agents/travel_agent/prompt_template.py b/ai/gen-ai-agents/travel_agent/prompt_template.py new file mode 100644 index 000000000..0cd43ad16 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/prompt_template.py @@ -0,0 +1,87 @@ +""" +Prompt templates for managing user input + +These prompts don't need to be translated, +as they are used internally by the LangGraph workflow. +""" + +from langchain_core.prompts import PromptTemplate + +# +# Prompt template for extracting structured travel details from user input +# +input_parser_prompt = PromptTemplate.from_template( + """ +Today's date is: **{today}** + +Extract structured travel details from the following user input. + +Return only a JSON, enclosed in triple backticks, with the keys: +- place_of_departure (str) +- destination (str) +- start_date (YYYY-MM-DD) +- end_date (YYYY-MM-DD) +- num_days (int) +- num_persons (int) +- transport_type (e.g., "airplane", "train", "other") +- hotel_preferences (dictionary with keys like "stars", "location", "amenities") + +If some information is not clear, use null for the value. + +## **User input:** +{user_input} + +### **Output Format:** +Return a **JSON object** with the following format: +```json +{{ + "place_of_departure": "Rome", + "destination": "Barcelona", + "start_date": null, + "end_date": null, + "num_days": null, + "num_persons": null, + "transport_type": "train", + "hotel_preferences": {{ + "stars": 4, + "location": "central", + "amenities": null + }} +}} +``` + +Return: +""" +) + +# +# Prompt template for classifying user intent (Routing) +# +router_prompt = PromptTemplate.from_template( + """You are an intent classifier for a travel assistant. + + Classify the user request as either: + - "booking": when the user wants to plan or book a trip + - "info": when the user wants general travel information + + Return only "booking" or "info". + + User input: + {user_input} + """ +) + +# +# Prompt template for generating travel information +# +answer_prompt = PromptTemplate.from_template( + """You are a helpful travel assistant. + + Based on the user question below, provide clear and useful travel information. + + Respond in markdown format (with bullet points or sections if needed). + + User question: + {user_input} + """ +) diff --git a/ai/gen-ai-agents/travel_agent/run_agent.py b/ai/gen-ai-agents/travel_agent/run_agent.py new file mode 100644 index 000000000..e7195c08a --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/run_agent.py @@ -0,0 +1,49 @@ +""" +run_agent.py + +Defines the asynchronous entry point to invoke the LangGraph-based travel planning agent. +This module prepares the initial state from user input, runs the workflow, and formats the output. +""" + +from travel_state import TravelState +from workflow import create_travel_planner_graph + +# Compile the LangGraph once at module level +travel_agent_graph = create_travel_planner_graph() + + +async def run_agent(user_input: str, config: dict = None) -> str: + """ + Run the travel planning agent with the given user input. + + Args: + user_input (str): A free-form natural language string describing the user's travel needs. + + Returns: + str: A formatted Markdown string summarizing the extracted travel plan. + """ + initial_state = TravelState( + request_id=config.get("configurable", "").get("thread_id", ""), + user_input=user_input, + ) + + final_state = await travel_agent_graph.ainvoke(initial_state, config=config or {}) + + # Se è una richiesta di chiarimento, restituisci il messaggio al posto del piano + if final_state.get("clarification_needed"): + return { + "final_plan": final_state.get( + "clarification_prompt", "I need more details to proceed." + ), + "hotel_options": [], + "clarification_needed": True, + } + + # Final, complete answer + return { + "final_plan": final_state.get( + "final_plan", "Sorry, I couldn't generate a travel plan." + ), + "hotel_options": final_state.get("hotel_options", []), + "clarification_needed": False, + } diff --git a/ai/gen-ai-agents/travel_agent/streamlit_app.py b/ai/gen-ai-agents/travel_agent/streamlit_app.py new file mode 100644 index 000000000..183960b89 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/streamlit_app.py @@ -0,0 +1,131 @@ +""" +Streamlit UI for the prototype +""" + +import uuid +import asyncio +import pydeck as pdk +import streamlit as st +from run_agent import run_agent +from translations import TRANSLATIONS +from config import DEBUG, MAP_STYLE + +st.set_page_config(page_title="AI Travel Planner", layout="centered") + +if "language" not in st.session_state: + st.session_state.language = "EN" # Default language + +# --- Sidebar for language selection --- +with st.sidebar: + lang_code = st.selectbox( + "🌍 Language", + options=["EN", "IT"], + index=["EN", "IT"].index(st.session_state.language), + format_func=lambda l: "English" if l == "EN" else "Italiano", + ) + st.session_state.language = lang_code + +# get the translations for the selected language +t = TRANSLATIONS[st.session_state.language] + +st.title(t["title"]) + +# --- Initialize session state --- +if "conversation" not in st.session_state: + st.session_state.conversation = [] # stores (speaker, message) +if "clarification_mode" not in st.session_state: + st.session_state.clarification_mode = False +if "last_input" not in st.session_state: + st.session_state.last_input = "" + +# --- User input box --- +user_input = st.text_area( + label=t["input_label"], + height=200, + placeholder=t["input_placeholder"], +) +# to have the buttons below the text area on the same line +col1, col2 = st.columns([1, 1]) + +with col1: + if st.button(t["send"]) and user_input.strip(): + # Combine clarification with last input + if st.session_state.clarification_mode: + combined_input = st.session_state.last_input + " " + user_input + else: + combined_input = user_input + + st.session_state.conversation.append(("user", user_input)) + st.session_state.last_input = combined_input + + # Run the agent + with st.spinner(t["spinner"]): + # language is injected through the config + REQUEST_ID = str(uuid.uuid4()) + my_config = { + "configurable": { + "language": st.session_state.language, + "thread_id": REQUEST_ID, + } + } + + result = asyncio.run(run_agent(combined_input, config=my_config)) + + if DEBUG: + print("Final result from agent:") + print(result) + + st.session_state.conversation.append(("agent", result["final_plan"])) + st.session_state.hotel_options = result["hotel_options"] + st.session_state.clarification_mode = result.get( + "clarification_needed", False + ) + + +with col2: + if st.button(t["clear"]): + st.session_state.conversation = [] + st.session_state.clarification_mode = False + st.session_state.last_input = "" + st.rerun() + +# --- Display only last two messages --- +if len(st.session_state.conversation) >= 2: + last_user = st.session_state.conversation[-2] + last_agent = st.session_state.conversation[-1] + + if last_user[0] == "user": + st.markdown(f"**🧑 {t['user']}:** {last_user[1]}") + if last_agent[0] == "agent": + st.markdown(f"**🤖 {t['planner']}:**") + st.markdown(last_agent[1]) + + # Visualize Hotel on map if available + if ( + hasattr(st.session_state, "hotel_options") + and st.session_state.hotel_options + ): + # get Hotel location + hotel = st.session_state.hotel_options[0] + lat = hotel.get("latitude") + lon = hotel.get("longitude") + + if lat and lon: + layer = pdk.Layer( + "ScatterplotLayer", + data=[{"position": [lon, lat]}], + get_position="position", + get_radius=100, + get_fill_color=[255, 0, 0], + pickable=True, + ) + + view_state = pdk.ViewState(latitude=lat, longitude=lon, zoom=14) + + st.pydeck_chart( + pdk.Deck( + layers=[layer], + initial_view_state=view_state, + map_style=MAP_STYLE, + ) + ) diff --git a/ai/gen-ai-agents/travel_agent/translations.py b/ai/gen-ai-agents/travel_agent/translations.py new file mode 100644 index 000000000..95af4c62e --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/translations.py @@ -0,0 +1,71 @@ +""" +This file contains all the translations for the OCI AI Travel Planner application. +The translations are stored in a dictionary format, where each key is a language code +(e.g., "EN" for English, "IT" for Italian) and the value is another dictionary containing +the translated strings for that language. + +clarification_keywords: Keywords that indicate a need for clarification in user input. +clarification_prompt_template: Template for generating a clarification prompt +based on missing fields. +""" + +TRANSLATIONS = { + "EN": { + "title": "✈️ OCI AI Travel Planner", + "input_label": "Tell me about your trip (or respond to clarification):", + "input_placeholder": """I want to go to Valencia from June 12 to June 18 with my girlfriend. + I want to leave from Rome. + Prefer train and a central hotel.""", + "send": "Send", + "clear": "🧹 Clear", + "user": "User", + "planner": "Planner", + "spinner": "Processing...", + "clarification_keywords": [ + "missing", + "please provide", + "provide", + "clarification", + ], + "clarification_prompt_template": ( + "The following fields are missing from the user's message: {fields}. " + "Please formulate a polite question to request this information." + ), + "itinerary_prompt_template": ( + "You are a travel assistant. Create a {num_days}-day travel itinerary " + "for a trip to {destination}, " + "staying at {hotel} in a {location} area. Suggest realistic, " + "enjoyable activities for each day." + ), + "suggested_itinerary_title": "### 🗓️ Suggested Itinerary", + }, + "IT": { + "title": "✈️ OCI AI Travel Planner", + "input_label": "Parlami del tuo viaggio (o rispondi alla richiesta di chiarimento):", + "input_placeholder": """Voglio andare a Valencia dal 12 al 18 giugno con la mia ragazza. + Voglio partire da Roma. + Preferisco il treno e un hotel centrale.""", + "send": "Invia", + "clear": "🧹 Cancella", + "user": "Utente", + "planner": "Assistente", + "spinner": "Sto elaborando...", + "clarification_keywords": [ + "manca", + "per favore fornisci", + "inserisci", + "richiesta di chiarimento", + ], + "clarification_prompt_template": ( + "Nel messaggio dell'utente mancano le seguenti informazioni: {fields}. " + "Formula una domanda per richiedere questi dettagli." + ), + "itinerary_prompt_template": ( + "Sei un assistente di viaggio. Crea un itinerario di {num_days} giorni " + "a {destination}, " + "con soggiorno presso {hotel} in una zona {location}. " + "Suggerisci attività piacevoli e realistiche per ogni giorno." + ), + "suggested_itinerary_title": "### 🗓️ Itinerario Consigliato", + }, +} diff --git a/ai/gen-ai-agents/travel_agent/travel_state.py b/ai/gen-ai-agents/travel_agent/travel_state.py new file mode 100644 index 000000000..13e77ce21 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/travel_state.py @@ -0,0 +1,53 @@ +""" +Defines the shared state structure used by all nodes in the travel planner LangGraph workflow. + +This state is passed between nodes and incrementally filled as the workflow progresses. +""" + +from typing import TypedDict, Dict, List + + +class TravelState(TypedDict, total=False): + """ + Shared state for the travel planning agent. + + Each field in this structure represents a piece of information extracted, + enriched, or used by the AI agent during the travel planning process. + + Fields: + user_input (str): Raw natural language input from the user. + place_of_departure (str): Starting city or region for the trip. + destination (str): Target destination city or region. + start_date (str): Start date of the trip in 'YYYY-MM-DD' format. + end_date (str): End date of the trip in 'YYYY-MM-DD' format. + num_persons (int): Number of travelers. + transport_type (str): Preferred transport method ("airplane", "train", etc.). + hotel_preferences (Dict): Preferences for hotel (e.g., stars, location). + flight_options (List): Available or suggested flight options. + hotel_options (List): Available or suggested hotel options. + final_plan (str): Generated summary plan to return to the user. + """ + + # unique_identifier for the request + request_id: str + # the string with user input + user_input: str + + # the structured output + place_of_departure: str + destination: str + start_date: str + end_date: str + num_days: int + num_persons: int + transport_type: str + hotel_preferences: Dict + flight_options: List + hotel_options: List + + # the final plan to return to the user + final_plan: str + + # new fields for clarification loop + clarification_needed: bool + clarification_prompt: str diff --git a/ai/gen-ai-agents/travel_agent/utils.py b/ai/gen-ai-agents/travel_agent/utils.py new file mode 100644 index 000000000..c870c3da3 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/utils.py @@ -0,0 +1,48 @@ +""" +utils.py + +Utility functions for text processing and JSON extraction from LLM outputs. +""" + +import json +import re + + +def extract_json_from_text(text): + """ + Extracts JSON content from a given text and returns it as a Python dictionary. + + Args: + text (str): The input text containing JSON content. + + Returns: + dict: Parsed JSON data. + """ + try: + # Remove triple backticks and leading/trailing whitespace + text = remove_triple_backtics(text).strip() + # Use regex to extract JSON content (contained between {}) + json_match = re.search(r"\{.*\}", text, re.DOTALL) + if json_match: + json_content = json_match.group(0) + return json.loads(json_content) + + # If no JSON content is found, raise an error + raise ValueError("No JSON content found in the text.") + except json.JSONDecodeError as e: + raise ValueError(f"Invalid JSON format: {e}") from e + + +def remove_triple_backtics(input_text: str) -> str: + """ + Remove triple backticks and language markers (e.g., ```json, ```python) from LLM responses. + + Args: + input_text (str): Text that may include markdown code fences. + + Returns: + str: Cleaned text without triple backticks. + """ + _text = re.sub(r"```(json|python)?", "", input_text, flags=re.IGNORECASE) + _text = _text.replace("```", "") + return _text diff --git a/ai/gen-ai-agents/travel_agent/workflow.py b/ai/gen-ai-agents/travel_agent/workflow.py new file mode 100644 index 000000000..508356b63 --- /dev/null +++ b/ai/gen-ai-agents/travel_agent/workflow.py @@ -0,0 +1,77 @@ +""" +Travel Planner Workflow Definition + +Defines the LangGraph-based workflow for the travel planning AI agent. +This initial version includes only the input parsing step. +""" + +from langgraph.graph import StateGraph, START, END +from travel_state import TravelState +from nodes.router_node import RouterNode +from nodes.parse_input_node import ParseInputNode +from nodes.transport_node import SearchTransportNode +from nodes.hotel_node import SearchHotelNode +from nodes.synthesize_plan_node import SynthesizePlanNode +from nodes.clarification_node import ClarificationNode +from nodes.answer_info_node import AnswerInfoNode +from nodes.generate_itinerary_node import GenerateItineraryNode + + +def create_travel_planner_graph(): + """ + Construct and compile the LangGraph workflow for the travel planning agent. + + The graph currently includes a single node: + - "parse_input": Extracts structured travel details from raw user input. + + Returns: + graph (CompiledGraph): A runnable LangGraph instance. + """ + builder = StateGraph(TravelState) + + # the node for intent classification + builder.add_node("router", RouterNode()) + # Add the node responsible for parsing the user input + builder.add_node("parse_input", ParseInputNode()) + # node to check if there are missing fields + builder.add_node("clarify", ClarificationNode()) + builder.add_node("search_transport", SearchTransportNode()) + builder.add_node("search_hotel", SearchHotelNode()) + builder.add_node("synthesize_plan", SynthesizePlanNode()) + builder.add_node("answer_info", AnswerInfoNode()) + builder.add_node("generate_itinerary", GenerateItineraryNode()) + + # Define the entry and exit points of the workflow: intent classification + builder.add_edge(START, "router") + + builder.add_conditional_edges( + "router", + lambda state: state["intent"], + { + "booking": "parse_input", + "info": "answer_info", + }, + ) + + builder.add_edge("parse_input", "clarify") + + # check if there are missing fields + builder.add_conditional_edges( + "clarify", + lambda state: ( + "search_transport" if not state.get("clarification_needed") else "end" + ), + {"search_transport": "search_transport", "end": END}, + ) + + # for now, sequential, could be in parallel + builder.add_edge("search_transport", "search_hotel") + builder.add_edge("search_hotel", "synthesize_plan") + builder.add_edge("synthesize_plan", "generate_itinerary") + builder.add_edge("generate_itinerary", END) + + # info + builder.add_edge("answer_info", END) + + # Compile and return the runnable graph + return builder.compile()