diff --git a/aibi/aibi-customer-support/_resources/bundle_config.py b/aibi/aibi-customer-support/_resources/bundle_config.py
index a0df8ff6..bec531af 100644
--- a/aibi/aibi-customer-support/_resources/bundle_config.py
+++ b/aibi/aibi-customer-support/_resources/bundle_config.py
@@ -38,20 +38,17 @@
{
"source_folder": "aibi/dbdemos_aibi_customer_support/agents_bronze",
"source_format": "parquet",
- "target_volume_folder": "agents_bronze",
- "target_format": "parquet"
+ "target_volume_folder": "agents_bronze"
},
{
"source_folder": "aibi/dbdemos_aibi_customer_support/tickets_bronze",
"source_format": "parquet",
- "target_volume_folder": "tickets_bronze",
- "target_format": "parquet"
+ "target_volume_folder": "tickets_bronze"
},
{
"source_folder": "aibi/dbdemos_aibi_customer_support/sla_bronze",
"source_format": "parquet",
- "target_volume_folder": "sla_bronze",
- "target_format": "parquet"
+ "target_volume_folder": "sla_bronze"
}
],
"sql_queries": [
@@ -256,6 +253,9 @@
"function_names": [
"{{CATALOG}}.{{SCHEMA}}.get_top_agents_by_survey_score"
],
+ "metrics": {"yaml": """
+
+ """}
"curated_questions": [
"Proportion of tickets per month that violate first response SLA",
"Agent performance by tickets closed per month",
diff --git a/aibi/aibi-sales-pipeline-review/_resources/bundle_config.py b/aibi/aibi-sales-pipeline-review/_resources/bundle_config.py
index 8a01028b..2789313b 100644
--- a/aibi/aibi-sales-pipeline-review/_resources/bundle_config.py
+++ b/aibi/aibi-sales-pipeline-review/_resources/bundle_config.py
@@ -38,32 +38,27 @@
{
"source_folder": "aibi/dbdemos_aibi_sales_pipeline/raw_data/raw_accounts",
"source_format": "parquet",
- "target_volume_folder": "raw_accounts",
- "target_format": "delta"
+ "target_volume_folder": "raw_accounts"
},
{
"source_folder": "aibi/dbdemos_aibi_sales_pipeline/raw_data/raw_opportunity",
"source_format": "parquet",
- "target_volume_folder": "raw_opportunity",
- "target_format": "delta"
+ "target_volume_folder": "raw_opportunity"
},
{
"source_folder": "aibi/dbdemos_aibi_sales_pipeline/raw_data/raw_user",
"source_format": "parquet",
- "target_volume_folder": "raw_user",
- "target_format": "delta"
+ "target_volume_folder": "raw_user"
},
{
"source_folder": "aibi/dbdemos_aibi_sales_pipeline/raw_data/raw_dim_country",
"source_format": "parquet",
- "target_volume_folder": "raw_dim_country",
- "target_format": "delta"
+ "target_volume_folder": "raw_dim_country"
},
{
"source_folder": "aibi/dbdemos_aibi_sales_pipeline/raw_data/raw_employee_hierarchy",
"source_format": "parquet",
- "target_volume_folder": "raw_employee_hierarchy",
- "target_format": "delta"
+ "target_volume_folder": "raw_employee_hierarchy"
}
],
"sql_queries": [
diff --git a/product_demos/Data-Science/llm-ai-agent/01-create-tools/01-first-tools.py b/product_demos/Data-Science/llm-ai-agent/01-create-tools/01-first-tools.py
new file mode 100644
index 00000000..d45a1eb1
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/01-create-tools/01-first-tools.py
@@ -0,0 +1,208 @@
+# Databricks notebook source
+see https://github.com/databricks/tmm/blob/main/agents-workshop/01_create_tools/01_create_tools.py
+
+# COMMAND ----------
+
+# MAGIC %md-sandbox
+# MAGIC
+# MAGIC # 1/ Create your first tool
+# MAGIC
+# MAGIC xxx, add image like this one with the different tools & app?
+# MAGIC
+# MAGIC See deck template: https://docs.google.com/presentation/d/18aIGHtwmrYFhVtF7gE6fi5g7xeA8jRgoCgNLwle9IfM/edit#slide=id.g2ee8efb26f5_4_0
+# MAGIC
+# MAGIC Cal can help to accept the image PR on dbdemos-resources repo
+# MAGIC
+# MAGIC
+# MAGIC
+# MAGIC
+# MAGIC
+# MAGIC
+# MAGIC -- Don't forget the tracker
+# MAGIC
+
+# COMMAND ----------
+
+#Note: don't pin databricks langchain/agent/mlflow, pin the rest
+%pip install -U --quiet databricks-sdk==0.40.0 databricks-langchain databricks-agents mlflow[databricks] databricks-vectorsearch==0.49 langchain==0.3.19 langchain_core==0.3.37 bs4==0.0.2 markdownify==0.14.1 pydantic==2.10.1
+dbutils.library.restartPython()
+
+# COMMAND ----------
+
+#Note: Load the dataset directly as delta table
+%run ../_resources/00-init $reset_all_data=false
+
+# COMMAND ----------
+
+# MAGIC %sql
+# MAGIC -- Note: Run some select to review the different dataset available
+# MAGIC SELECT * FROM databricks_documentation ...
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ## Creating our tools: Using Unity Catalog Functions
+# MAGIC
+# MAGIC Let's start by defining the functions our LLM will be able to execute. These functions can contain any logic, from simple SQL to advanced python.
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ### First tool: Computing xxx
+# MAGIC
+# MAGIC xxx what are we doing
+# MAGIC
+# MAGIC We'll save this function within Unity Catalog. You can open the explorer to review the functions created in this notebook.
+# MAGIC
+# MAGIC *Note: This is a very simple first example for this demo. We'll implement a broader math tool later on.*
+
+# COMMAND ----------
+
+# MAGIC %sql
+# MAGIC CREATE OR REPLACE FUNCTION query_customer_details(size_in_inch FLOAT COMMENT 'xxxx')
+# MAGIC RETURNS FLOAT
+# MAGIC LANGUAGE SQL
+# MAGIC COMMENT 'convert size from inch to cm'
+# MAGIC RETURN xxx
+# MAGIC
+# MAGIC -- let's test our function:
+# MAGIC SELECT convert_inch_to_cm(10) as 10_inches_in_cm;
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ### Second tool: Computing xxx
+# MAGIC
+# MAGIC xxx what are we doing
+# MAGIC
+# MAGIC We'll save this function within Unity Catalog. You can open the explorer to review the functions created in this notebook.
+# MAGIC
+# MAGIC *Note: This is a very simple first example for this demo. We'll implement a broader math tool later on.*
+
+# COMMAND ----------
+
+# MAGIC %sql
+# MAGIC CREATE OR REPLACE FUNCTION query_customer_invoice_history(size_in_inch FLOAT COMMENT 'xxxx')
+# MAGIC RETURNS FLOAT
+# MAGIC LANGUAGE SQL
+# MAGIC COMMENT 'convert size from inch to cm'
+# MAGIC RETURN xxx
+# MAGIC
+# MAGIC -- let's test our function:
+# MAGIC SELECT convert_inch_to_cm(10) as 10_inches_in_cm;
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ### Last tool: Generate coupon xxx
+# MAGIC
+# MAGIC xxx what are we doing
+# MAGIC
+# MAGIC We'll save this function within Unity Catalog. You can open the explorer to review the functions created in this notebook.
+# MAGIC
+# MAGIC *Note: This is a very simple first example for this demo. We'll implement a broader math tool later on.*
+
+# COMMAND ----------
+
+TODO mockup python function to show how to have a tool in pyton
+
+%sql
+CREATE OR REPLACE FUNCTION compute_math(expr STRING)
+RETURNS STRING
+LANGUAGE PYTHON
+COMMENT 'Run any mathematical function and returns the result as output. Supports python syntax like math.sqrt(13)'
+AS
+$$
+ import ast
+ import operator
+ import math
+ try:
+ if expr.startswith('```') and expr.endswith('```'):
+ expr = expr[3:-3].strip()
+ node = ast.parse(expr, mode='eval').body
+ return eval_node(node)
+ except Exception as ex:
+ return str(ex)
+$$;
+-- let's test our function:
+SELECT compute_math("(2+2)/3") as result;
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ### ... Continue, add the tool to UC,
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ### ... Try on Playground
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ### ... Try on Playground
+
+# COMMAND ----------
+
+from langchain.agents import AgentExecutor, create_tool_calling_agent
+prompt = get_prompt(prompt="You are an assistant for a python developer. Internally, you have a tool named execute_python_code that can generate and run python code to help answering what the customer is asking. input: valid python code as a string. output: the result of the return value from the code execution. Don't mention you have tools or the tools name. Make sure you send the full python code at once to the function and that the code has a return statement at the end to capture the result. Don't print anything in the code you write, return the result you need as final instruction. Make sure the python code is valid. Only send python. Here is an example: 'def square_function(number):\\n return number*number\\n\\nreturn square_function(3)'")
+tools = get_tools()
+agent = create_tool_calling_agent(llm, tools, prompt)
+agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
+
+#Let's print the answer! Note that the LLM often makes a few error, but then analyze it and self-correct.
+answer = agent_executor.invoke({"input": "What's the result of the fibonacci suite? Display its result for 5."})
+displayHTML(answer['output'].replace('\n', ' '))
+
+# COMMAND ----------
+
+
+
+# COMMAND ----------
+
+# TODO: write this as a separate file if you want to deploy it properly
+from langchain.schema.runnable import RunnableLambda
+from langchain_core.output_parsers import StrOutputParser
+
+# Function to extract the user's query
+def extract_user_query_string(chat_messages_array):
+ return chat_messages_array[-1]["content"]
+
+# Wrapping the agent_executor invocation
+def agent_executor_wrapper(input_data):
+ result = agent_executor.invoke({"input": input_data})
+ return result["output"]
+
+# Create the chain using the | operator with StrOutputParser
+chain = (
+ RunnableLambda(lambda data: extract_user_query_string(data["messages"])) # Extract the user query
+ | RunnableLambda(agent_executor_wrapper) # Pass the query to the agent executor
+ | StrOutputParser() # Optionally parse the output to ensure it's a clean string
+)
+
+# COMMAND ----------
+
+# Example input data
+input_data = {
+ "messages": [
+ {"content": "Write a function that computes the Fibonacci sequence in Python and displays its result for 5."}
+ ]
+}
+# Run the chain
+answer = chain.invoke(input_data)
+displayHTML(answer.replace('\n', ' '))
+
+# COMMAND ----------
+
+# MAGIC %md
+# MAGIC ## Deploying the agent as a model serving endpoint
+# MAGIC
+# MAGIC Databricks automatically generates all the required notebooks and setup for you to deploy these agents!
+# MAGIC
+# MAGIC To deploy them with the latest configuration, open [Databricks Playground ](/ml/playground), select the tools you want to use and click on the "Export Notebook" button on the top!
+# MAGIC
+# MAGIC This will generate notebooks pre-configured for you, including the review application to start testing and collecting your new model!
+
+# COMMAND ----------
+
+# MAGIC %md Next: deploy the agent
diff --git a/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/agent.py b/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/agent.py
new file mode 100644
index 00000000..34c04253
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/agent.py
@@ -0,0 +1,6 @@
+# Databricks notebook source
+todo: add data automatic generation ?
+
+# COMMAND ----------
+
+see https://github.com/databricks/tmm/tree/main/agents-workshop/02_agent_eval
diff --git a/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/config.yml.py b/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/config.yml.py
new file mode 100644
index 00000000..cdad7908
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/config.yml.py
@@ -0,0 +1,2 @@
+# Databricks notebook source
+
diff --git a/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/driver.py b/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/driver.py
new file mode 100644
index 00000000..389fe3fe
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/02-evaluate-and-deploy/driver.py
@@ -0,0 +1,2 @@
+# Databricks notebook source
+see https://github.com/databricks/tmm/blob/main/agents-workshop/02_agent_eval/driver.py
diff --git a/product_demos/Data-Science/llm-ai-agent/03-documentation-rag-tool/03.1-First-Step-RAG-On-Databricks.py b/product_demos/Data-Science/llm-ai-agent/03-documentation-rag-tool/03.1-First-Step-RAG-On-Databricks.py
new file mode 100644
index 00000000..0cd23b5f
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/03-documentation-rag-tool/03.1-First-Step-RAG-On-Databricks.py
@@ -0,0 +1,20 @@
+# Databricks notebook source
+Documentation
+See notebook https://github.com/databricks-demos/dbdemos-notebooks/blob/main/product_demos/Data-Science/chatbot-rag-llm/01-first-step/01-First-Step-RAG-On-Databricks.py to setup the vs endpoint, create the VS table etc
+
+# COMMAND ----------
+
+TODO: should we plug that with AI Builder KBQA directly ?
+
+https://docs.google.com/presentation/d/1oyiQrt9tVQGJvZW0G1vi2R6y4ukZaDoy3IwEMY5WXF4/edit#slide=id.g32e304af4b9_0_412
+
+# COMMAND ----------
+
+# MAGIC %sql
+# MAGIC -- show the source table with the documentation
+# MAGIC -- TODO: review potential integration with
+# MAGIC SELECT xxx
+
+# COMMAND ----------
+
+add it in the tools
diff --git a/product_demos/Data-Science/llm-ai-agent/04-advanced-tool/04.1-Genie-As-A-Tool.py b/product_demos/Data-Science/llm-ai-agent/04-advanced-tool/04.1-Genie-As-A-Tool.py
new file mode 100644
index 00000000..0102fc5c
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/04-advanced-tool/04.1-Genie-As-A-Tool.py
@@ -0,0 +1,3 @@
+# Databricks notebook source
+add genie as a tool. TBD: how are we easily updating the demo yaml/chain to add the new tool?
+
diff --git a/product_demos/Data-Science/llm-ai-agent/04-advanced-tool/04.2-OLTP.py b/product_demos/Data-Science/llm-ai-agent/04-advanced-tool/04.2-OLTP.py
new file mode 100644
index 00000000..0fb84468
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/04-advanced-tool/04.2-OLTP.py
@@ -0,0 +1,2 @@
+# Databricks notebook source
+TODO as V2? add tool using an OLT database
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/.cursor/rules/max-line-len.mdc b/product_demos/Data-Science/llm-ai-agent/05-full-application/.cursor/rules/max-line-len.mdc
new file mode 100644
index 00000000..c5c0d9eb
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/.cursor/rules/max-line-len.mdc
@@ -0,0 +1,8 @@
+---
+description: code style - maximum len size
+globs: *.py
+---
+
+# Maximum line length code style
+
+- You can write longer lines, don't stop at 80, don't make them too long either, especially for function definition, put more parameters on the same line.
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/.cursorignore b/product_demos/Data-Science/llm-ai-agent/05-full-application/.cursorignore
new file mode 100644
index 00000000..30ce736e
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/.cursorignore
@@ -0,0 +1,16 @@
+__pycache__
+frontend/dist
+backend/app_local.yaml
+app_local.yaml
+.env
+.app.yaml
+.eggs
+.DS_Store
+build
+dist
+*.egg-info/
+backend/__pycache__
+.idea
+
+
+
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/.gitignore b/product_demos/Data-Science/llm-ai-agent/05-full-application/.gitignore
new file mode 100644
index 00000000..c86a7ace
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/.gitignore
@@ -0,0 +1,55 @@
+**/package-lock.json
+# Environment variables
+.env
+**/.env
+
+# Python
+**/__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+env/
+build/
+**/app_local.yaml
+frontend/dist
+frontend/static
+backend/app_local.yaml
+develop-eggs/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Node
+node_modules/
+**/node_modules/
+npm-debug.log
+yarn-error.log
+yarn-debug.log
+.pnp/
+.pnp.js
+coverage/
+build/
+.DS_Store
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+
+# Database
+*.sqlite3
+*.db
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/05.1-deploy-app.ipynb b/product_demos/Data-Science/llm-ai-agent/05-full-application/05.1-deploy-app.ipynb
new file mode 100644
index 00000000..13b3a883
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/05.1-deploy-app.ipynb
@@ -0,0 +1,64 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "application/vnd.databricks.v1+cell": {
+ "cellMetadata": {},
+ "inputWidgets": {},
+ "nuid": "7966902b-3376-4052-8b12-0a1644ac5ff5",
+ "showTitle": false,
+ "tableResultSettingsMap": {},
+ "title": ""
+ }
+ },
+ "source": [
+ "# Deploy our chatbot application\n",
+ "\n",
+ "xxx"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "application/vnd.databricks.v1+cell": {
+ "cellMetadata": {},
+ "inputWidgets": {},
+ "nuid": "85f74566-f97e-4d9e-88df-ebb6bea7754e",
+ "showTitle": false,
+ "tableResultSettingsMap": {},
+ "title": ""
+ }
+ },
+ "outputs": [],
+ "source": [
+ "#TODO: use databricks python sdk to deploy the app. \n",
+ "#It'll requires to add the endpoint / warehouse as external resource \n",
+ "#"
+ ]
+ }
+ ],
+ "metadata": {
+ "application/vnd.databricks.v1+notebook": {
+ "computePreferences": null,
+ "dashboards": [],
+ "environmentMetadata": {
+ "base_environment": "",
+ "environment_version": "2"
+ },
+ "inputWidgetPreferences": null,
+ "language": "python",
+ "notebookMetadata": {
+ "pythonIndentUnit": 2
+ },
+ "notebookName": "05.1-deploy-app",
+ "widgets": {}
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/README.md b/product_demos/Data-Science/llm-ai-agent/05-full-application/README.md
new file mode 100644
index 00000000..65fadeb7
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/README.md
@@ -0,0 +1,148 @@
+# Databricks GenAI Agent Demo
+
+A web application demonstrating Databricks GenAI capabilities with a chat interface powered by FastAPI and React.
+
+## Local Development Setup
+
+### Python Environment Setup
+
+1. Install Miniconda (recommended) or Anaconda if not already installed:
+ - Download Miniconda from [here](https://docs.conda.io/en/latest/miniconda.html)
+ - Or Anaconda from [here](https://www.anaconda.com/download)
+
+2. Create and activate a new conda environment:
+```bash
+# Create a new environment with Python 3.11
+conda create -n dbdemos-agent python=3.11
+
+# Activate the environment
+conda activate dbdemos-agent
+```
+
+### Python Backend Setup
+
+1. Install Python dependencies:
+```bash
+pip install -r requirements.txt
+```
+
+2. Create `app_local.yaml` from the example file and configure your Databricks credentials:
+```bash
+cp app_local.yaml.example app_local.yaml
+# Edit app_local.yaml with your Databricks credentials
+```
+
+3. Start the FastAPI server:
+```bash
+python -m uvicorn app.main:app --reload
+```
+
+The backend will be available at:
+- API: http://localhost:8000
+- API Documentation: http://localhost:8000/docs
+
+### Frontend Setup
+
+1. Install Node.js (v18+ recommended) if not already installed:
+ - Download from [Node.js website](https://nodejs.org/)
+ - Or use nvm (Node Version Manager)
+
+2. Install frontend dependencies:
+```bash
+cd frontend
+npm install
+```
+
+3. Start the development server:
+```bash
+npm run dev
+```
+
+The frontend will be available at http://localhost:5173
+
+### Local Development Tips
+
+1. **IDE Setup**: We recommend using CursorAI for development:
+ - Provides intelligent code completion
+ - Built-in AI assistance for development
+ - Download from [cursor.sh](https://cursor.sh)
+
+2. **Hot Reload**:
+ - Backend: The `--reload` flag enables auto-reload on code changes
+ - Frontend: Vite provides fast hot module replacement
+
+3. **Testing**:
+ - Backend tests: `pytest`
+ - Frontend tests: `cd frontend && npm test`
+
+## Deployment
+
+The application can be deployed to Databricks using the provided `deploy.sh` script:
+
+```bash
+# Deploy to a specific workspace folder and app name
+./deploy.sh "/Workspace/your/path" "your-app-name"
+
+# Use defaults
+./deploy.sh
+```
+
+The deployment process:
+1. Builds the frontend (`npm run build`)
+2. Packages the Python code
+3. Uploads both to your Databricks workspace
+4. Deploys as a Databricks Application
+
+### Manual Deployment Steps
+
+If you prefer to deploy manually:
+
+1. Build the frontend:
+```bash
+cd frontend
+npm run build
+```
+
+2. Copy the frontend build to the static folder:
+```bash
+cp -r frontend/dist/* static/
+```
+
+3. Package and upload the Python code:
+```bash
+# Create a clean build directory
+rm -rf build
+mkdir -p build
+
+# Copy Python files (excluding dev files)
+rsync -av \
+ --exclude='**/__pycache__/' \
+ --exclude='**/app_local.yaml' \
+ --exclude='**/*.pyc' \
+ --exclude='.*' \
+ --exclude='build' \
+ --exclude='frontend' \
+ app/ build/
+
+# Upload to Databricks
+databricks workspace import-dir build "/Workspace/your/path" --overwrite
+```
+
+4. Deploy the application in your Databricks workspace
+
+## Project Structure
+
+```
+├── app/ # Python FastAPI application
+│ ├── routes/ # API endpoints
+│ ├── services/ # Business logic
+│ └── main.py # Application entry
+├── frontend/ # React frontend
+│ ├── src/
+│ │ ├── components/ # React components
+│ │ └── App.tsx # Main application
+│ └── package.json
+├── static/ # Static files served by FastAPI
+├── requirements.txt # Python dependencies
+└── deploy.sh # Deployment script
+```
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app.yaml b/product_demos/Data-Science/llm-ai-agent/05-full-application/app.yaml
new file mode 100644
index 00000000..096f470a
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app.yaml
@@ -0,0 +1,5 @@
+command: ["uvicorn", "app.main:app"]
+
+env:
+ - name: 'ENV'
+ value: 'prod'
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/config.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/config.py
new file mode 100644
index 00000000..b708e86c
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/config.py
@@ -0,0 +1,46 @@
+import yaml
+import os
+from pathlib import Path
+from typing import Dict, Optional
+
+class Config:
+ _instance = None
+ _initialized = False
+
+ def __new__(cls):
+ if cls._instance is None:
+ cls._instance = super(Config, cls).__new__(cls)
+ return cls._instance
+
+ def __init__(self):
+ if not self._initialized:
+ self._config = self._load_config()
+ self._initialized = True
+
+ def _load_config(self) -> Dict:
+ """Load configuration from YAML files with fallback to default config."""
+ config_dir = Path(__file__).parent.parent
+ local_config = config_dir / 'app_local.yaml'
+ default_config = config_dir / 'app.yaml'
+
+ try:
+ config_file = local_config if local_config.exists() else default_config
+ with open(config_file) as f:
+ return yaml.safe_load(f).get('env', {})
+ except Exception as e:
+ print(f"Failed to load configuration: {e}")
+ return {'ENV': 'prod'} # Safe default
+
+ def get_value(self, key: str, default_value: str = '') -> str:
+ """Get a configuration value from the config list of dictionaries"""
+ return next((item['value'] for item in self._config if item['name'] == key), default_value)
+
+ def setup_databricks_env(self):
+ """Set up Databricks environment variables"""
+ os.environ['DATABRICKS_HOST'] = self.get_value('DATABRICKS_HOST')
+ os.environ['DATABRICKS_TOKEN'] = self.get_value('DATABRICKS_TOKEN')
+
+ @property
+ def environment(self) -> str:
+ """Get the current environment (dev/prod)"""
+ return self.get_value('ENV', 'prod')
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/main.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/main.py
new file mode 100644
index 00000000..3c524420
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/main.py
@@ -0,0 +1,113 @@
+from fastapi import FastAPI, Request, Response
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
+from fastapi.responses import JSONResponse
+from fastapi.exceptions import RequestValidationError
+from starlette.exceptions import HTTPException as StarletteHTTPException
+from starlette.middleware.base import BaseHTTPMiddleware
+from .routes import agent
+import os
+import traceback
+import time
+import logging
+from .config import Config
+
+# Set up application logging
+logger = logging.getLogger("app")
+
+# Initialize configuration
+config = Config()
+
+environment = config.environment
+
+app = FastAPI(title="AI Agent demo")
+
+# Request-Response logging middleware
+class RequestLoggingMiddleware(BaseHTTPMiddleware):
+ async def dispatch(self, request: Request, call_next):
+ request_id = id(request)
+ method = request.method
+ endpoint = request.url.path
+
+ # Log request
+ logger.info(f"Request {request_id}: {method} {endpoint} started")
+
+ # Process request and measure time
+ start_time = time.time()
+
+ try:
+ response = await call_next(request)
+ process_time = time.time() - start_time
+
+ # Log successful response
+ logger.info(f"{method} {endpoint} completed with status {response.status_code} in {process_time:.3f}s")
+
+ return response
+ except Exception as e:
+ process_time = time.time() - start_time
+ logger.error(f"{method} {endpoint} failed in {process_time:.3f}s: {str(e)}\n{traceback.format_exc()}")
+ raise
+
+# Add the request logging middleware
+app.add_middleware(RequestLoggingMiddleware)
+
+# Add CORS middleware only in development environment
+if environment == 'dev':
+ config.setup_databricks_env()
+ print("STARTING IN DEV MODE - This won't work in a deployed environment on Databricks. If you see this message in your databricks logs, change the ENV to prod in the app.yaml file.")
+ app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["http://127.0.0.1:5173", "http://localhost:5173"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+ )
+ @app.get("/")
+ async def root():
+ return {"message": "Databricks GenAI API"}
+else:
+ print("STARTING IN PROD MODE - will serve the /static folder. This will work in a deployed environment on Databricks.")
+
+# Initialize API routes first
+app.include_router(agent.router, prefix="/api/agent")
+
+# Then mount static files in prod mode
+if environment == 'prod':
+ try:
+ target_dir = "static"
+ app.mount("/", StaticFiles(directory=target_dir, html=True), name="site")
+ except:
+ print('ERROR - static folder not found')
+
+# Global exception handler for all unhandled exceptions
+@app.exception_handler(Exception)
+async def unhandled_exception_handler(request: Request, exc: Exception):
+ method = request.method
+ endpoint = request.url.path
+ logger.error(f"{method} {endpoint} error: {str(exc)}\n{traceback.format_exc()}")
+ return JSONResponse(
+ status_code=500,
+ content={"detail": f"Internal server error : {str(exc)} Check logs for details."}
+ )
+
+# Handle validation errors
+@app.exception_handler(RequestValidationError)
+async def validation_exception_handler(request: Request, exc: RequestValidationError):
+ method = request.method
+ endpoint = request.url.path
+ logger.error(f"{method} {endpoint} validation error: {str(exc)}")
+ return JSONResponse(
+ status_code=422,
+ content={"detail": f"Validation error: {str(exc)}"}
+ )
+
+# Handle HTTP exceptions
+@app.exception_handler(StarletteHTTPException)
+async def http_exception_handler(request: Request, exc: StarletteHTTPException):
+ method = request.method
+ endpoint = request.url.path
+ logger.error(f"{method} {endpoint} HTTP {exc.status_code}: {exc.detail}")
+ return JSONResponse(
+ status_code=exc.status_code,
+ content={"detail": exc.detail}
+ )
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/routes/agent.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/routes/agent.py
new file mode 100644
index 00000000..7b597f2c
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/routes/agent.py
@@ -0,0 +1,32 @@
+from fastapi import APIRouter, Depends, HTTPException, Body, Query
+from fastapi.responses import StreamingResponse
+from typing import List
+from ..services.agent_service import AgentService
+import mlflow.deployments
+import mlflow
+
+router = APIRouter()
+agent_service = AgentService()
+
+
+@router.get("/questions", response_model=List[dict])
+async def get_questions(use_case: str = Query("telco", description="The use case to get questions for")):
+ """Get the list of predefined questions"""
+ return await agent_service.get_questions(use_case)
+
+@router.post("/chat")
+async def process_message(
+ messages: List[dict] = Body(...),
+ intelligence_enabled: bool = Body(True),
+ use_case: str = Body("telco", description="The use case to process messages for")
+):
+ """Process a chat message and return streaming response"""
+ return StreamingResponse(
+ agent_service.process_message(messages, intelligence_enabled, use_case),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "Content-Type": "text/event-stream"
+ }
+ )
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/agent_prompt.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/agent_prompt.py
new file mode 100644
index 00000000..7c254f28
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/agent_prompt.py
@@ -0,0 +1,19 @@
+PROMPTS = {
+ "telco": """You are a telco customer service assistant. You have access to customer data and can help with billing, technical support, and account management.
+You can use tools to:
+- Query customer details and billing history
+- Check customer propensity scores
+- Generate coupon codes
+- Access product documentation
+
+Please provide detailed, professional responses and use tools when needed to get accurate information.""",
+
+ "retail": """You are a retail customer service assistant. You can help with order tracking, returns, and product support.
+You can use tools to:
+- Query order details and customer information
+- Access product documentation
+- Generate coupon codes
+- Forecast customer consumption
+
+Please provide detailed, professional responses and use tools when needed to get accurate information."""
+}
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/agent_service.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/agent_service.py
new file mode 100644
index 00000000..ff16e8ce
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/agent_service.py
@@ -0,0 +1,160 @@
+from .mockup_agent import USE_CASES
+from .agent_prompt import PROMPTS
+import json
+import mlflow.deployments
+import mlflow
+from typing import List
+import asyncio
+
+class AgentService:
+ def __init__(self, use_mockup: bool = True):
+ self.use_cases = USE_CASES
+ self.use_mockup = use_mockup
+
+ async def get_questions(self, use_case: str = "telco"):
+ """Return the list of predefined questions for the given use case"""
+ responses = self.use_cases.get(use_case, self.use_cases["telco"])["responses"]
+ return [{"preview": q["question"][:50] + "..." if len(q["question"]) > 50 else q["question"],
+ "text": q["question"]} for q in responses]
+
+ def _cleanup_message_sequence(self, messages: List[dict]) -> List[dict]:
+ """Clean up message sequence to ensure proper alternation and remove redundant messages.
+
+ Rules:
+ 1. Remove first message if it's from assistant
+ 2. For consecutive user messages, only keep the last one
+ """
+ cleaned_messages = []
+ prev_role = None
+
+ for msg in messages:
+ # Skip first message if it's from assistant
+ if not cleaned_messages and msg["role"] == "assistant":
+ continue
+
+ # If we have consecutive user messages, remove the previous one
+ if msg["role"] == "user" and prev_role == "user":
+ cleaned_messages.pop()
+
+ cleaned_messages.append(msg)
+ prev_role = msg["role"]
+
+ return cleaned_messages
+
+ async def process_message(self, messages, intelligence_enabled: bool = True, use_case: str = "telco"):
+ """Process a message and yield streaming responses"""
+ # First emit thinking start
+ yield f"data: {json.dumps({'type': 'thinking-start', 'data': None})}\n\n"
+
+ if self.use_mockup:
+ async for response in self._process_message_mock(messages, intelligence_enabled, use_case):
+ yield response
+ else:
+ async for response in self._process_message_real(messages, intelligence_enabled, use_case):
+ yield response
+
+ async def _process_message_real(self, messages, intelligence_enabled: bool, use_case: str):
+ """Process messages using real Databricks endpoints"""
+ # Add system prompt based on use case
+ system_messages = [{"role": "system", "content": PROMPTS[use_case]}]
+
+ # Clean up message sequence
+ cleaned_messages = self._cleanup_message_sequence(messages)
+ full_messages = system_messages + cleaned_messages
+
+ # Call the appropriate endpoint based on intelligence mode
+ endpoint_name = "agents_main-dbdemos_agent_tools-agent_tools_test" if intelligence_enabled else "databricks-llama3"
+
+ # Get streaming response
+ response_stream = self.call_endpoint(full_messages, endpoint_name)
+
+ current_tool = None
+ for chunk in response_stream:
+ if isinstance(chunk, str):
+ chunk = json.loads(chunk)
+
+ delta = chunk.get('delta', {})
+
+ if delta.get('tool_calls') and intelligence_enabled:
+ # Tool call started
+ for tool in delta['tool_calls']:
+ tool_data = {
+ "tool_name": tool['function']['name'].replace('main__dbdemos_agent_tools__', ''),
+ "description": tool['function']['arguments'],
+ "type": "TOOL_CALL",
+ "reasoning": f"Calling tool {tool['function']['name']}"
+ }
+ current_tool = tool['id']
+ yield f"data: {json.dumps({'type': 'tool', 'data': tool_data})}\n\n"
+
+ elif delta.get('role') == 'tool' and intelligence_enabled:
+ # Tool response
+ if delta.get('tool_call_id') == current_tool:
+ tool_response = json.loads(delta.get('content', '{}'))
+ yield f"data: {json.dumps({'type': 'tool', 'data': {'informations': [tool_response.get('value', '')]}})}\n\n"
+
+ elif delta.get('content'):
+ # Final answer
+ yield f"data: {json.dumps({'type': 'final-answer', 'data': {'final_answer': delta['content']}})}\n\n"
+
+ def call_endpoint(self, messages: List[dict], endpoint_name: str = "agents_main-dbdemos_agent_tools-agent_tools_test"):
+ """Call MLflow endpoint with proper Databricks configuration"""
+ # Create client and call endpoint
+ client = mlflow.deployments.get_deploy_client("databricks")
+ response_stream = client.predict_stream(
+ endpoint=endpoint_name,
+ inputs={"messages": messages, "stream": True}
+ )
+ return response_stream
+
+ def get_agent(self, agent_id: int):
+ pass
+
+ ###########################################
+ # MOCK IMPLEMENTATION
+ ###########################################
+
+ def _calculate_word_similarity(self, text1: str, text2: str) -> float:
+ """Calculate similarity score between two texts based on word overlap"""
+ words1 = set(text1.lower().split())
+ words2 = set(text2.lower().split())
+ intersection = len(words1.intersection(words2))
+ union = len(words1.union(words2))
+ return intersection / union if union > 0 else 0
+
+ async def find_best_response(self, question: str, use_case: str = "telco") -> dict:
+ """Find the most relevant response by matching the question with predefined questions"""
+ responses = self.use_cases.get(use_case, self.use_cases["telco"])["responses"]
+
+ if not question:
+ return responses[0]
+
+ # Find most similar question
+ best_match = max(responses, key=lambda r: self._calculate_word_similarity(question, r["question"]))
+ return best_match
+
+ async def _process_message_mock(self, messages, intelligence_enabled: bool, use_case: str):
+ """Process messages using mock data and simulated delays"""
+ # Get the last user message
+ user_messages = [m for m in messages if m["role"] == "user"]
+ last_question = user_messages[-1]["content"] if user_messages else ""
+
+ response = await self.find_best_response(last_question, use_case)
+
+ # Use shorter thinking time for non-intelligent mode
+ await asyncio.sleep(2 if intelligence_enabled else 1)
+
+ if intelligence_enabled:
+ # Emit each tool one by one with delays
+ for tool in response["tools"]:
+ yield f"data: {json.dumps({'type': 'tool', 'data': tool})}\n\n"
+ await asyncio.sleep(2)
+
+ # Wait before emitting final answer
+ await asyncio.sleep(1.5)
+
+ # Emit final answer and informations
+ yield f"data: {json.dumps({'type': 'final-answer', 'data': {'final_answer': response['final_answer'], 'final_informations': response.get('final_informations', [])}})}\n\n"
+ else:
+ # Emit completion event
+ yield f"data: {json.dumps({'type': 'final-answer', 'data': {'final_answer': response['non_intelligent_answer']}})}\n\n"
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/mockup_agent.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/mockup_agent.py
new file mode 100644
index 00000000..0bcb6bc4
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/services/mockup_agent.py
@@ -0,0 +1,156 @@
+USE_CASES = {
+ "telco": {
+ "name": "Telco Subscription Support",
+ "responses": [
+ {
+ "question": "I had a customer asking about his high bill last month after upgrading to a new phone. Could you give me the support steps?",
+ "non_intelligent_answer": "Your bill may be higher due to charges related to the phone upgrade. Please review your billing statement or contact support for more details.",
+ "tools": [
+ {
+ "tool_name": "query_customer_details",
+ "description": "Query customer information",
+ "type": "DATABASE",
+ "reasoning": "I need to access customer information to answer this question. Calling the tool query_customer_details",
+ "informations": [
+ "Databricks Enforce security with Access Control across all components, ensuring the LLM doesn't access private information.\nThe Agent won't be able to see sensitive customer details.\n[Discover how to enforce security](https://www.databricks.com/product/security)"
+ ]
+ },
+ {
+ "tool_name": "query_billing",
+ "description": "Fetch customer billing history details",
+ "type": "DATABASE",
+ "reasoning": "Let's get the latest billing information for this customer. Calling the tool query_billing",
+ "informations": [
+ "Agents need to have access to a variety of data.\nAll your data sources can be ingested and centralized, instantly available for Analytics and GenAI usecases [Discover how to ingest data with LakeFlow ](https://www.databricks.com/product/lakehouse)"
+ ]
+ }
+ ],
+ "final_answer": "Let's get the latest billing information for this customer. Calling the tool query_billing...\n\nI see that this billing data is for Layla Yang, home address **redacted**, billing cycle starting 01/20/2025.\nHere's a summary of the current charges:\n\n| Bill Description | Amount | Notes |\n| ------------| ------ | ------- |\n| Plan Charge (Unlimited Max) | $85.00 | Recurring monthly |\n| Device Installment (S25 Ultra) | $50.00 | Phone upgrade |\n| Taxes & Surcharges | $19.99 | Recurring monthly |\n\nIt looks like the promotion for the S25 Ultra wasn't applied, so the customer is currently being billed the full device installment — which explains the $50 increase they're seeing. Would you like me to troubleshoot the issue instead? We might be able to avoid the return.",
+ "final_informations": [
+ "GenAI can make the system vulnerable to leaking **sensitive PII** information. Databricks provides a defense layer, securing your business integrity while tracking usage and cost.\n[Discover Databricks AI Governance](https://www.databricks.com/product/ai-governance)"
+ ]
+ },
+ {
+ "question": "The customer says the phone bill increased by $50 and the customer's new phone is the item 'Samsung Galaxy S25 Ultra'. She also says she used the promotion coupon to upgrade for free.",
+ "non_intelligent_answer": "The $50 increase could be related to charges from the phone upgrade or plan changes. Please refer to your bill for more details.\n\nHere's a general guideline to review phone bill while we find out more:\n\n### 🛠️General Guideline to Review a Phone Bill Increase\nReview the billing statement for any new or changed charges.\nCheck for recent upgrades, plan changes, or added services.\nCompare with the previous bill to spot differences.\nIf unclear, direct them to **return** the new purchased device.",
+ "tools": [
+ {
+ "tool_name": "fetch_product_documentation",
+ "description": "Perform RAG on PDF documentation",
+ "type": "DATABASE",
+ "reasoning": "Let's try to troubleshoot the issue before asking for a return. ",
+ "informations": [
+ "Databricks makes it easy to plug any unstructured data sources (Past tickets, PDF, Office documents and other)[Discover how to perform RAG and Vector Search ](https://www.databricks.com/product/lakehouse)"
+ ]
+ }
+ ],
+ "final_answer": "Based on the billing details, the increase is due to the promotion not being applied to the current billing cycle. As a result, the customer was charged the full monthly device installment of $50.00 for the Samsung Galaxy S25 Ultra.\n\nOnce the promo is correctly applied, this charge should be removed or credited in future cycles, and the customer's bill will return to the expected amount",
+ "final_informations": [
+ "Databricks lets you plug any LLM foundation model (openai, llama, anthropic...). But these LLMs are quickly evolving! You need to make sure your AI application will provide relevant information overtime and focus on increasing customer satisfaction. Databricks lets you benchmark your applications against your dataset and instruction, making software iteration easy and safe.[Discover how MLFlow Evaluate enforces your quality](https://www.databricks.com/product/mlflow)"
+ ]
+ },
+ {
+ "question": "Ok, the customer seems angry and has churn risk, can you craft a friendly message for me to handle that?",
+ "non_intelligent_answer": "# Customer Service Response\n\nDear [Customer's Name],\n\nThanks for contacting us.\nWe understand there may be some confusion with your recent bill after the phone upgrade. Please note that charges may vary depending on your account activity.\nIf you're not satisfied with the new phone, we recommend returning it and reviewing your billing details in your online account.\n\nLet us know if you have further questions.",
+ "tools": [
+ {
+ "tool_name": "query_customer_propensity",
+ "description": "Check customer propensity score",
+ "type": "FORECASTING_MODEL",
+ "reasoning": "I need to generate a service message for the customer — but first, let's check if this customer qualifies for any incentives, as we'll adjust the tone and offer accordingly. Calling check_propensity_score...",
+ "informations": [
+ "GenAI relies on classic, specialized models such as forecasting, customer segmentation or CLV.\nDatabricks offers a full platform to build classic ML models, with all the traditional MLOps capabilities to enforce trust and simplify the deployment."
+ ]
+ },
+ {
+ "tool_name": "generate_coupon_code",
+ "description": "Generate unique coupon code",
+ "type": "EXTERNAL_API",
+ "reasoning": "The model flags Layla as a high-value long term customer, with high projected spend in upcoming cycles",
+ "informations": [
+ "Databricks can be plugged with any other external systems, including partner solutions"
+ ]
+ }
+ ],
+ "final_answer": "It looks like this customer is a loyal high-value client. I recommend offering a 10% discount coupon for 1 month.\n\nHere's the message I've drafted for the customer service team:\n---\n\n**Subject:** We Fixed the Upgrade — and Added a Little Thank You 🎁\n\nDear Layla,\n\nThanks again for reaching out — I understand how frustrating a surprise bill can be. I took another look and made sure the promotion was correctly applied to your upgrade.\nThat means your phone is now fully covered under the promo, and you won't see any increase related to the device going forward.\n\nAs a thank you for your patience, I've also added a 10% discount coupon to your account, which will be applied to your next bill.\n\nWe really appreciate having you with us, and I'm here if there's anything else I can help with.\n"
+ }
+ ]
+ },
+ "retail": {
+ "name": "Retail Order Support",
+ "responses": [
+ {
+ "question": "I have a customer asking about his order number #1231, can you get me the order details?",
+ "non_intelligent_answer": "I can't directly access order systems or databases. But if you can share the order details or data (e.g. a spreadsheet, database export, or screenshot), I can help you look it up or extract the relevant info for order #1231. Want to upload something?",
+ "tools": [
+ {
+ "tool_name": "query_customer_details",
+ "description": "Query customer information",
+ "type": "DATABASE",
+ "reasoning": "I need to access customer information to answer this question. Calling the tool query_customer_details",
+ "informations": [
+ "Databricks Enforce security with Access Control across all components, ensuring the LLM doesn't access private information.\nThe Agent won't be able to see sensitive customer details.\n[Discover how to enforce security](https://www.databricks.com/product/security)"
+ ]
+ },
+ {
+ "tool_name": "query_customer_invoice_history",
+ "description": "Fetch order details",
+ "type": "DATABASE",
+ "reasoning": "Let's get the latest orders for this customer. Calling the tool query_customer_orders",
+ "informations": [
+ "Agents need to have access to a variety of data.\nAll your data sources can be ingested and centralized, instantly available for Analytics and GenAI usecases [Discover how to ingest data with LakeFlow ](https://www.databricks.com/product/lakehouse)"
+ ]
+ }
+ ],
+ "final_answer": "I see that this order is from Quentin Ambard, the 12/23/205, delivery address is *** MASKED DATA ***.\n\nHere is the order detail:\n\n| Product | Quantity | Price | Status |\n| ------- | -------- | ----- | ------ |\n| Wireless Headphones soundWAVE PRO X5 | 1 | $129.99 | Returned Requested |\n| Phone Charger | 2 | $24.99 | Shipped |\n| Screen Protector | 1 | $19.99 | Processing |\n\nI see the customer asked for a return on the Headphones. Would you like me to troubleshoot the issue instead? We might be able to avoid the return.",
+ "final_informations": [
+ "GenAI can make it very easy to leak sensitive PII information. Databricks provides a defense layer, securing your business integrity while tracking usage and cost.\n[Discover Databricks AI Governance](https://www.databricks.com/product/ai-governance)"
+ ]
+ },
+ {
+ "question": "Yes, the customer says they have an error code E12, what could we suggest?",
+ "non_intelligent_answer": "To help with error code E12, I'd need to know the type of product or system the customer is referring to (e.g., a coffee machine, HVAC unit, industrial sensor, etc.), since error codes can vary widely between manufacturers and products.\n\nBut here's a general approach while we find out more:\n\n## 🛠️ General Troubleshooting Steps for Error Code E12\n\n1. Check the Manual: If available, look up error code E12 in the product's manual or support site.\n2. Restart the Device: A power cycle often resolves temporary glitches.\n3. Common E12 Meanings (by type of device):\n - Appliances (e.g., dishwashers, coffee machines): Often linked to water flow, temperature sensor failure, or internal communication issues.\n - Industrial/IoT Equipment: Might mean sensor disconnection, data timeout, or firmware mismatch.\n - Electronics (e.g., routers or controllers): Could be a firmware or internal component issue.",
+ "tools": [
+ {
+ "tool_name": "fetch_product_documentation",
+ "description": "Perform RAG on PDF documentation",
+ "type": "DATABASE",
+ "reasoning": "Let's try to troubleshoot the issue before asking for a return. I need to fetch the product documentation for the headphones Wireless Headphones soundWAVE PRO X5.",
+ "informations": [
+ "Databricks makes it easy to plug any unstructured data sources (Past tickets, PDF, Office documents and other)[Discover how to perform RAG and Vector Search ](https://www.databricks.com/product/lakehouse)"
+ ]
+ }
+ ],
+ "final_answer": "From the headphone manual ([headphone-001-AZE1.pdf](https://www.databricks.com/product/lakehouse)), I see that the following can be performed to do a hard reset:\n\n- press the play button for 30 seconds\n- if it doesn't work, use the pin to reset the headphone to factory settings\n- Make sure the user installed the application in his mobile phone to do the latest firmware update\n\nLet me know if it helps",
+ "final_informations": [
+ "Databricks lets you plug any LLM foundation model (openai, llama, anthropic...). But these LLMs are quickly evolving! You need to make sure your genAI application will provide relevant information overtime and focus on reducing return. Databricks lets you benchmark your applications against your dataset and instruction, making software iteration easy and safe.[Discover how MLFlow Evaluate enforces your quality](https://www.databricks.com/product/mlflow)"
+ ]
+ },
+ {
+ "question": "Ok, the customer seems upset, can you craft an email for me?",
+ "non_intelligent_answer": "# Customer Service Response\n\nDear [Customer's Name],\n\nThank you for reaching out, and I'm very sorry to hear about the trouble you've encountered with your order #1231.\n\nWe understand how frustrating this situation must be, and we want to make it right. We've gone ahead and approved the return for your item. Please follow the return instructions below to send the product back to us:\n\n## Return Instructions\n[Insert return steps, address, or link to return portal]\n\nOnce we receive the item, we'll process your refund or replacement promptly, depending on your preference.\n\nIf there's anything else we can do in the meantime, please don't hesitate to let me know. We really appreciate your patience and understanding.",
+ "tools": [
+ {
+ "tool_name": "forecast_customer_consumption",
+ "description": "Forecast future consumption",
+ "type": "FORECASTING_MODEL",
+ "reasoning": "I need to generate an email, first let's if this customer qualifies for any insentive as we'll adjust the email accordingly...",
+ "informations": [
+ "GenAI relies on classic, specialized models such as forecasting or CLV.\nDatabricks offers a full platform to build classic ML models, with all the traditional MLOps capabilities to enforce trust and simplify the deployment."
+ ]
+ },
+ {
+ "tool_name": "generate_coupon_code",
+ "description": "Generate unique coupon code",
+ "type": "EXTERNAL_API",
+ "reasoning": "This seems to be a premium customer, as per guidance let's generate a 5% discount coupon for 1 month using the COUPON API",
+ "informations": [
+ "Databricks can be plugged with any other external systems, including partner solutions"
+ ]
+ }
+ ],
+ "final_answer": "It looks like this customer is a loyal high-value client. I recommend offering a 5% discount coupon for 1 month.\n\nHere's the email I've drafted for you:\n---\n\n**Subject:** Quick Fix + A Special Thank You for Your Loyalty 🎧\n\nDear Quentin,\n\nThank you for reaching out regarding your recent order. We're happy to assist!\n\nBased on your inquiry about the headphones, here are a few steps you can perform that should solve the issue:\n\n- Press and hold the **play** button for 30 seconds\n- If that doesn't work, use a pin to press the **reset** hole (usually near the charging port)\n- Lastly, Ensure the **official mobile app is installed** to update to the latest firmware\n\nWe also noticed your continued trust in our products — and we truly appreciate it!\nAs a token of our gratitude, please enjoy **5% off your next order** using the exclusive code:\n\n🎁 **THANKYOU-QA5DISC**\n*(Valid for one month from today)*\n\nIf you have any further questions, don't hesitate to reach out — we're always here to help!\n\nWarm regards,\nThe Customer Success Team\n\n---\nLet me know if you'd like to modify the tone or highlight other products based on recent activity."
+ }
+ ]
+ }
+}
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/tests/__init__.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/tests/__init__.py
new file mode 100644
index 00000000..0aaa8d89
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/tests/__init__.py
@@ -0,0 +1 @@
+# Initialize tests package
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app/tests/test_agent.py b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/tests/test_agent.py
new file mode 100644
index 00000000..6e340a3d
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app/tests/test_agent.py
@@ -0,0 +1,37 @@
+import os
+import sys
+import pytest
+
+# Add the backend directory to the Python path
+backend_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
+sys.path.insert(0, backend_dir)
+
+from app.services.agent_service import AgentService
+from app.config import Config
+
+@pytest.fixture(scope="session", autouse=True)
+def setup_environment():
+ """Setup environment variables for all tests"""
+ config = Config()
+ config.setup_databricks_env()
+
+def test_call_endpoint():
+ """Test real MLflow endpoint call with Databricks configuration"""
+ # Test data
+ history = [
+ {
+ "role": "user",
+ "content": "What is 5+5"
+ }
+ ]
+
+ # Make the actual call
+ agent_service = AgentService()
+ response = agent_service.call_endpoint(history)
+ print("--------------------------------")
+ print(response)
+ assert response == 'test'
+ print("--------------------------------")
+
+if __name__ == "__main__":
+ pytest.main([__file__])
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/app_local.yaml.example b/product_demos/Data-Science/llm-ai-agent/05-full-application/app_local.yaml.example
new file mode 100644
index 00000000..c8cb44b0
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/app_local.yaml.example
@@ -0,0 +1,9 @@
+command: ["uvicorn", "app.main:app"]
+
+env:
+ - name: 'ENV'
+ value: 'dev'
+ - name: 'DATABRICKS_HOST'
+ value: 'https://xxx.com'
+ - name: 'DATABRICKS_TOKEN'
+ value: 'xxx'
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/deploy.sh b/product_demos/Data-Science/llm-ai-agent/05-full-application/deploy.sh
new file mode 100755
index 00000000..2d471212
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/deploy.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Accept parameters
+APP_FOLDER_IN_WORKSPACE=${1:-"/Workspace/dbdemos/dbdemos-genai/dbdemos-genai-agent-support"}
+LAKEHOUSE_APP_NAME=${2:-"dbdemos-genai-agent-support"}
+
+# Frontend build and import
+(
+ cd frontend
+ npm run build
+ rm -rf ../static/
+ mv dist ../static
+ databricks workspace delete "$APP_FOLDER_IN_WORKSPACE/static" --recursive --profile WEST
+ databricks workspace import-dir ../static "$APP_FOLDER_IN_WORKSPACE/static" --overwrite --profile WEST
+) &
+
+# Backend packaging
+(
+ rm -rf build
+ mkdir -p build
+ rsync -av \
+ --exclude='**/__pycache__/' \
+ --exclude='**/app_local.yaml' \
+ --exclude='frontend' \
+ --exclude='**/app_local.yaml.example' \
+ --exclude='**/*.pyc' \
+ --exclude='.*' \
+ --exclude='tests' \
+ --exclude='deploy.sh' \
+ --exclude='test' \
+ --exclude='build' \
+ --exclude='local_conf*' \
+ ./ build/
+ if [ -f app_prod.py ]; then
+ cp app_prod.py build/app.py
+ fi
+ databricks workspace delete "$APP_FOLDER_IN_WORKSPACE/app" --recursive --profile WEST
+ databricks workspace import-dir build "$APP_FOLDER_IN_WORKSPACE" --overwrite --profile WEST
+ rm -rf build
+) &
+
+# Wait for both background processes to finish
+wait
+
+# Deploy the application
+databricks apps deploy "$LAKEHOUSE_APP_NAME" --profile WEST
+
+# Print the app page URL
+echo "Open the app page for details and permission: https://e2-demo-west.cloud.databricks.com/apps/$LAKEHOUSE_APP_NAME"
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.env.example b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.env.example
new file mode 100644
index 00000000..e6b26972
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.env.example
@@ -0,0 +1 @@
+REACT_APP_API_URL=http://localhost:8000/api
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.gitignore b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.gitignore
new file mode 100644
index 00000000..94c0d593
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.gitignore
@@ -0,0 +1,27 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+.DS_Store
+dist-ssr
+coverage
+*.local
+
+/cypress/videos/
+/cypress/screenshots/
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.prettierrc b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.prettierrc
new file mode 100644
index 00000000..a267d871
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/.prettierrc
@@ -0,0 +1,6 @@
+{
+ "semi": false,
+ "singleQuote": true,
+ "tabWidth": 2,
+ "trailingComma": "none"
+}
\ No newline at end of file
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/README.md b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/README.md
new file mode 100644
index 00000000..e35430eb
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/README.md
@@ -0,0 +1,24 @@
+# Intelligent Document Processing Frontend
+
+A vibe coded application to showcase Dabricks Mosaic AI.
+
+
+## Project Setup
+
+### Frontend
+
+
+```bash
+cd frontend
+# Install dependencies
+npm install
+
+# Start development server
+npm run dev
+
+# Build for production
+npm run build
+```
+
+### Backend
+
diff --git a/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/index.html b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/index.html
new file mode 100644
index 00000000..ddae9967
--- /dev/null
+++ b/product_demos/Data-Science/llm-ai-agent/05-full-application/frontend/index.html
@@ -0,0 +1,14 @@
+
+
+
+`}tablecell(t){const n=this.parser.parseInline(t.tokens),a=t.header?"th":"td";return(t.align?`<${a} align="${t.align}">`:`<${a}>`)+n+`${a}>
+`}strong({tokens:t}){return`${this.parser.parseInline(t)}`}em({tokens:t}){return`${this.parser.parseInline(t)}`}codespan({text:t}){return`${Pn(t,!0)}`}br(t){return" "}del({tokens:t}){return`${this.parser.parseInline(t)}`}link({href:t,title:n,tokens:a}){const l=this.parser.parseInline(a),i=$d(t);if(i===null)return l;t=i;let o='"+l+"",o}image({href:t,title:n,text:a}){const l=$d(t);if(l===null)return Pn(a);t=l;let i=`",i}text(t){return"tokens"in t&&t.tokens?this.parser.parseInline(t.tokens):"escaped"in t&&t.escaped?t.text:Pn(t.text)}}class Tu{strong({text:t}){return t}em({text:t}){return t}codespan({text:t}){return t}del({text:t}){return t}html({text:t}){return t}text({text:t}){return t}link({text:t}){return""+t}image({text:t}){return""+t}br(){return""}}class rn{constructor(t){Ye(this,"options");Ye(this,"renderer");Ye(this,"textRenderer");this.options=t||Wa,this.options.renderer=this.options.renderer||new po,this.renderer=this.options.renderer,this.renderer.options=this.options,this.renderer.parser=this,this.textRenderer=new Tu}static parse(t,n){return new rn(n).parse(t)}static parseInline(t,n){return new rn(n).parseInline(t)}parse(t,n=!0){var l,i;let a="";for(let o=0;o{const u=s[r].flat(1/0);a=a.concat(this.walkTokens(u,n))}):s.tokens&&(a=a.concat(this.walkTokens(s.tokens,n)))}}return a}use(...t){const n=this.defaults.extensions||{renderers:{},childTokens:{}};return t.forEach(a=>{const l={...a};if(l.async=this.defaults.async||l.async||!1,a.extensions&&(a.extensions.forEach(i=>{if(!i.name)throw new Error("extension name required");if("renderer"in i){const o=n.renderers[i.name];o?n.renderers[i.name]=function(...s){let r=i.renderer.apply(this,s);return r===!1&&(r=o.apply(this,s)),r}:n.renderers[i.name]=i.renderer}if("tokenizer"in i){if(!i.level||i.level!=="block"&&i.level!=="inline")throw new Error("extension level must be 'block' or 'inline'");const o=n[i.level];o?o.unshift(i.tokenizer):n[i.level]=[i.tokenizer],i.start&&(i.level==="block"?n.startBlock?n.startBlock.push(i.start):n.startBlock=[i.start]:i.level==="inline"&&(n.startInline?n.startInline.push(i.start):n.startInline=[i.start]))}"childTokens"in i&&i.childTokens&&(n.childTokens[i.name]=i.childTokens)}),l.extensions=n),a.renderer){const i=this.defaults.renderer||new po(this.defaults);for(const o in a.renderer){if(!(o in i))throw new Error(`renderer '${o}' does not exist`);if(["options","parser"].includes(o))continue;const s=o,r=a.renderer[s],u=i[s];i[s]=(...c)=>{let d=r.apply(i,c);return d===!1&&(d=u.apply(i,c)),d||""}}l.renderer=i}if(a.tokenizer){const i=this.defaults.tokenizer||new bo(this.defaults);for(const o in a.tokenizer){if(!(o in i))throw new Error(`tokenizer '${o}' does not exist`);if(["options","rules","lexer"].includes(o))continue;const s=o,r=a.tokenizer[s],u=i[s];i[s]=(...c)=>{let d=r.apply(i,c);return d===!1&&(d=u.apply(i,c)),d}}l.tokenizer=i}if(a.hooks){const i=this.defaults.hooks||new Jl;for(const o in a.hooks){if(!(o in i))throw new Error(`hook '${o}' does not exist`);if(["options","block"].includes(o))continue;const s=o,r=a.hooks[s],u=i[s];Jl.passThroughHooks.has(o)?i[s]=c=>{if(this.defaults.async)return Promise.resolve(r.call(i,c)).then(v=>u.call(i,v));const d=r.call(i,c);return u.call(i,d)}:i[s]=(...c)=>{let d=r.apply(i,c);return d===!1&&(d=u.apply(i,c)),d}}l.hooks=i}if(a.walkTokens){const i=this.defaults.walkTokens,o=a.walkTokens;l.walkTokens=function(s){let r=[];return r.push(o.call(this,s)),i&&(r=r.concat(i.call(this,s))),r}}this.defaults={...this.defaults,...l}}),this}setOptions(t){return this.defaults={...this.defaults,...t},this}lexer(t,n){return sn.lex(t,n??this.defaults)}parser(t,n){return rn.parse(t,n??this.defaults)}parseMarkdown(t){return(a,l)=>{const i={...l},o={...this.defaults,...i},s=this.onError(!!o.silent,!!o.async);if(this.defaults.async===!0&&i.async===!1)return s(new Error("marked(): The async option was set to true by an extension. Remove async: false from the parse options object to return a Promise."));if(typeof a>"u"||a===null)return s(new Error("marked(): input parameter is undefined or null"));if(typeof a!="string")return s(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(a)+", string expected"));o.hooks&&(o.hooks.options=o,o.hooks.block=t);const r=o.hooks?o.hooks.provideLexer():t?sn.lex:sn.lexInline,u=o.hooks?o.hooks.provideParser():t?rn.parse:rn.parseInline;if(o.async)return Promise.resolve(o.hooks?o.hooks.preprocess(a):a).then(c=>r(c,o)).then(c=>o.hooks?o.hooks.processAllTokens(c):c).then(c=>o.walkTokens?Promise.all(this.walkTokens(c,o.walkTokens)).then(()=>c):c).then(c=>u(c,o)).then(c=>o.hooks?o.hooks.postprocess(c):c).catch(s);try{o.hooks&&(a=o.hooks.preprocess(a));let c=r(a,o);o.hooks&&(c=o.hooks.processAllTokens(c)),o.walkTokens&&this.walkTokens(c,o.walkTokens);let d=u(c,o);return o.hooks&&(d=o.hooks.postprocess(d)),d}catch(c){return s(c)}}}onError(t,n){return a=>{if(a.message+=`
+Please report this to https://github.com/markedjs/marked.`,t){const l="