From 6d9f89433a21645177a346c9230aaebba6684fec Mon Sep 17 00:00:00 2001
From: Prasanjeet-Microsoft
Date: Wed, 7 May 2025 13:58:35 +0530
Subject: [PATCH 01/19] Added .dockerignore file to exclude unnecessary files
from Docker build context (#537)
---
src/.dockerignore | 162 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 162 insertions(+)
create mode 100644 src/.dockerignore
diff --git a/src/.dockerignore b/src/.dockerignore
new file mode 100644
index 000000000..68dc84378
--- /dev/null
+++ b/src/.dockerignore
@@ -0,0 +1,162 @@
+# Include any files or directories that you don't want to be copied to your
+# container here (e.g., local build artifacts, temporary files, etc.).
+#
+# For more help, visit the .dockerignore file reference guide at
+# https://docs.docker.com/engine/reference/builder/#dockerignore-file
+
+**/.DS_Store
+**/__pycache__
+**/.venv
+**/.classpath
+**/.dockerignore
+**/.env
+**/.git
+**/.gitignore
+**/.project
+**/.settings
+**/.toolstarget
+**/.vs
+**/.vscode
+**/*.*proj.user
+**/*.dbmdl
+**/*.jfm
+**/bin
+**/charts
+**/docker-compose*
+**/compose*
+**/Dockerfile*
+**/*.Dockerfile
+**/node_modules
+**/npm-debug.log
+**/obj
+**/secrets.dev.yaml
+**/values.dev.yaml
+LICENSE
+README.md
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.log
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# VS Code
+.vscode/
+
+# Ignore other unnecessary files
+*.bak
+*.swp
+.DS_Store
+*.pdb
+*.sqlite3
From b4ba302775981241b6c6af06596a1d0d4ceabdae Mon Sep 17 00:00:00 2001
From: AjitPadhi-Microsoft
Date: Wed, 14 May 2025 17:12:16 +0530
Subject: [PATCH 02/19] fixed date time response issue (#542)
---
src/App/backend/chat_logic_handler.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/App/backend/chat_logic_handler.py b/src/App/backend/chat_logic_handler.py
index 75fdc2c34..2b992d0aa 100644
--- a/src/App/backend/chat_logic_handler.py
+++ b/src/App/backend/chat_logic_handler.py
@@ -86,7 +86,7 @@ def greeting(self, input: Annotated[str, "the question"]) -> Annotated[str, "The
answer = f"Error retrieving greeting response: {str(e)}"
return answer
- @kernel_function(name="ChatWithSQLDatabase", description="Given a query about client assets, investements and meeting dates or times, get details from the database based on the provided question and client id")
+ @kernel_function(name="ChatWithSQLDatabase", description="Given a query about client assets, investments and meeting scheduled (including upcoming or next meeting dates/times), get details from the database based on the provided question and client id")
def get_SQL_Response(
self,
input: Annotated[str, "the question"],
From 03417fe3307e9fadcaf3d8bb2965a87b6625766c Mon Sep 17 00:00:00 2001
From: Abdul-Microsoft
Date: Mon, 19 May 2025 14:48:59 +0530
Subject: [PATCH 03/19] Update main.bicepparam
---
infra/main.bicepparam | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/infra/main.bicepparam b/infra/main.bicepparam
index b7ac77755..42c04971b 100644
--- a/infra/main.bicepparam
+++ b/infra/main.bicepparam
@@ -8,4 +8,4 @@ param gptDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_MODEL_CAPAC
param embeddingDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_EMBEDDING_MODEL_CAPACITY', '80'))
param AzureOpenAILocation = readEnvironmentVariable('AZURE_ENV_OPENAI_LOCATION', 'eastus2')
-param AZURE_LOCATION = readEnvironmentVariable('AZURE_ENV_LOCATION', '')
+param AZURE_LOCATION = readEnvironmentVariable('AZURE_LOCATION', '')
From 43a464be73fe5ef9cab3f28a9cc35ef57977b5ee Mon Sep 17 00:00:00 2001
From: Roopan-Microsoft <168007406+Roopan-Microsoft@users.noreply.github.com>
Date: Wed, 21 May 2025 10:50:47 +0530
Subject: [PATCH 04/19] Update src/App/backend/chat_logic_handler.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
src/App/backend/chat_logic_handler.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/App/backend/chat_logic_handler.py b/src/App/backend/chat_logic_handler.py
index 2b992d0aa..f848a011f 100644
--- a/src/App/backend/chat_logic_handler.py
+++ b/src/App/backend/chat_logic_handler.py
@@ -86,7 +86,7 @@ def greeting(self, input: Annotated[str, "the question"]) -> Annotated[str, "The
answer = f"Error retrieving greeting response: {str(e)}"
return answer
- @kernel_function(name="ChatWithSQLDatabase", description="Given a query about client assets, investments and meeting scheduled (including upcoming or next meeting dates/times), get details from the database based on the provided question and client id")
+ @kernel_function(name="ChatWithSQLDatabase", description="Given a query about client assets, investments and scheduled meetings (including upcoming or next meeting dates/times), get details from the database based on the provided question and client id")
def get_SQL_Response(
self,
input: Annotated[str, "the question"],
From cb49449fd68f68232604e0715a7dd971e618157a Mon Sep 17 00:00:00 2001
From: Roopan-Microsoft <168007406+Roopan-Microsoft@users.noreply.github.com>
Date: Wed, 21 May 2025 10:50:54 +0530
Subject: [PATCH 05/19] Update src/.dockerignore
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
src/.dockerignore | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/.dockerignore b/src/.dockerignore
index 68dc84378..8cdbb515f 100644
--- a/src/.dockerignore
+++ b/src/.dockerignore
@@ -157,6 +157,5 @@ cython_debug/
# Ignore other unnecessary files
*.bak
*.swp
-.DS_Store
*.pdb
*.sqlite3
From 72eb23a2852dc7cd5fd22340373bcfac360a2278 Mon Sep 17 00:00:00 2001
From: Priyanka-Microsoft
Date: Wed, 21 May 2025 17:06:34 +0530
Subject: [PATCH 06/19] feat: added opentelemetry log (#545)
* added opentelemetry log in apis
* resolved pylint issues
* resolved pylint issues
* resolved pylint issues
* resolved pylint issues
* removed space
* updated requirement file
---
infra/deploy_ai_foundry.bicep | 2 +
infra/deploy_app_service.bicep | 5 +
infra/main.bicep | 1 +
infra/main.json | 39 ++--
src/App/app.py | 320 ++++++++++++++++++++++++++++++++-
src/App/backend/event_utils.py | 29 +++
src/App/requirements.txt | 12 +-
7 files changed, 389 insertions(+), 19 deletions(-)
create mode 100644 src/App/backend/event_utils.py
diff --git a/infra/deploy_ai_foundry.bicep b/infra/deploy_ai_foundry.bicep
index d6a8c611b..b8954277d 100644
--- a/infra/deploy_ai_foundry.bicep
+++ b/infra/deploy_ai_foundry.bicep
@@ -492,3 +492,5 @@ output aiProjectName string = aiHubProject.name
output applicationInsightsId string = applicationInsights.id
output logAnalyticsWorkspaceResourceName string = logAnalytics.name
output storageAccountName string = storageNameCleaned
+output applicationInsightsConnectionString string = applicationInsights.properties.ConnectionString
+
diff --git a/infra/deploy_app_service.bicep b/infra/deploy_app_service.bicep
index 3d30f5291..a5531ce4f 100644
--- a/infra/deploy_app_service.bicep
+++ b/infra/deploy_app_service.bicep
@@ -178,6 +178,7 @@ param streamTextSystemPrompt string
param aiProjectConnectionString string
param useAIProjectClientFlag string = 'false'
param aiProjectName string
+param applicationInsightsConnectionString string
// var WebAppImageName = 'DOCKER|byoaiacontainer.azurecr.io/byoaia-app:latest'
@@ -215,6 +216,10 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'APPINSIGHTS_INSTRUMENTATIONKEY'
value: reference(applicationInsightsId, '2015-05-01').InstrumentationKey
}
+ {
+ name: 'APPLICATIONINSIGHTS_CONNECTION_STRING'
+ value: applicationInsightsConnectionString
+ }
{
name: 'AZURE_SEARCH_SERVICE'
value: AzureSearchService
diff --git a/infra/main.bicep b/infra/main.bicep
index 3e286e79f..e437e94d1 100644
--- a/infra/main.bicep
+++ b/infra/main.bicep
@@ -246,6 +246,7 @@ module appserviceModule 'deploy_app_service.bicep' = {
streamTextSystemPrompt: functionAppStreamTextSystemPrompt
aiProjectConnectionString:keyVault.getSecret('AZURE-AI-PROJECT-CONN-STRING')
aiProjectName:aifoundry.outputs.aiProjectName
+ applicationInsightsConnectionString:aifoundry.outputs.applicationInsightsConnectionString
}
scope: resourceGroup(resourceGroup().name)
}
diff --git a/infra/main.json b/infra/main.json
index 0e4dc7597..ce81ca845 100644
--- a/infra/main.json
+++ b/infra/main.json
@@ -5,7 +5,7 @@
"_generator": {
"name": "bicep",
"version": "0.34.44.8038",
- "templateHash": "1797657337218629559"
+ "templateHash": "9713836480105967098"
}
},
"parameters": {
@@ -708,7 +708,7 @@
"_generator": {
"name": "bicep",
"version": "0.34.44.8038",
- "templateHash": "3569608512312433081"
+ "templateHash": "18186919711353368589"
}
},
"parameters": {
@@ -1016,11 +1016,11 @@
"name": "[format('{0}/{1}', variables('aiHubName'), format('{0}-connection-AzureOpenAI', variables('aiHubName')))]",
"properties": {
"category": "AIServices",
- "target": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]",
+ "target": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]",
"authType": "ApiKey",
"isSharedToAll": true,
"credentials": {
- "key": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').key1]"
+ "key": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]"
},
"metadata": {
"ApiType": "Azure",
@@ -1122,7 +1122,7 @@
},
{
"type": "Microsoft.CognitiveServices/accounts",
- "apiVersion": "2021-10-01",
+ "apiVersion": "2024-04-01-preview",
"name": "[variables('aiServicesName')]",
"location": "[variables('location')]",
"sku": {
@@ -1131,9 +1131,6 @@
"kind": "AIServices",
"properties": {
"customSubDomainName": "[variables('aiServicesName')]",
- "apiProperties": {
- "statisticsEnabled": false
- },
"publicNetworkAccess": "Enabled"
}
},
@@ -1303,7 +1300,7 @@
"apiVersion": "2021-11-01-preview",
"name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPENAI-KEY')]",
"properties": {
- "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').key1]"
+ "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]"
},
"dependsOn": [
"[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]"
@@ -1330,7 +1327,7 @@
"apiVersion": "2021-11-01-preview",
"name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPENAI-ENDPOINT')]",
"properties": {
- "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]"
+ "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]"
},
"dependsOn": [
"[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]"
@@ -1393,7 +1390,7 @@
"apiVersion": "2021-11-01-preview",
"name": "[format('{0}/{1}', parameters('keyVaultName'), 'COG-SERVICES-ENDPOINT')]",
"properties": {
- "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]"
+ "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]"
},
"dependsOn": [
"[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]"
@@ -1404,7 +1401,7 @@
"apiVersion": "2021-11-01-preview",
"name": "[format('{0}/{1}', parameters('keyVaultName'), 'COG-SERVICES-KEY')]",
"properties": {
- "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').key1]"
+ "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]"
},
"dependsOn": [
"[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]"
@@ -1454,7 +1451,7 @@
},
"aiServicesTarget": {
"type": "string",
- "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]"
+ "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]"
},
"aiServicesName": {
"type": "string",
@@ -1495,6 +1492,10 @@
"storageAccountName": {
"type": "string",
"value": "[variables('storageNameCleaned')]"
+ },
+ "applicationInsightsConnectionString": {
+ "type": "string",
+ "value": "[reference(resourceId('Microsoft.Insights/components', variables('applicationInsightsName')), '2020-02-02').ConnectionString]"
}
}
}
@@ -2296,6 +2297,9 @@
},
"aiProjectName": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiProjectName.value]"
+ },
+ "applicationInsightsConnectionString": {
+ "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.applicationInsightsConnectionString.value]"
}
},
"template": {
@@ -2305,7 +2309,7 @@
"_generator": {
"name": "bicep",
"version": "0.34.44.8038",
- "templateHash": "8701343999231764795"
+ "templateHash": "15866728948176241669"
}
},
"parameters": {
@@ -2693,6 +2697,9 @@
},
"aiProjectName": {
"type": "string"
+ },
+ "applicationInsightsConnectionString": {
+ "type": "string"
}
},
"variables": {
@@ -2732,6 +2739,10 @@
"name": "APPINSIGHTS_INSTRUMENTATIONKEY",
"value": "[reference(parameters('applicationInsightsId'), '2015-05-01').InstrumentationKey]"
},
+ {
+ "name": "APPLICATIONINSIGHTS_CONNECTION_STRING",
+ "value": "[parameters('applicationInsightsConnectionString')]"
+ },
{
"name": "AZURE_SEARCH_SERVICE",
"value": "[parameters('AzureSearchService')]"
diff --git a/src/App/app.py b/src/App/app.py
index 411829551..4c9357573 100644
--- a/src/App/app.py
+++ b/src/App/app.py
@@ -37,6 +37,10 @@
from db import dict_cursor
from backend.chat_logic_handler import stream_response_from_wealth_assistant
+from backend.event_utils import track_event_if_configured
+from azure.monitor.opentelemetry import configure_azure_monitor
+from opentelemetry import trace
+from opentelemetry.trace import Status, StatusCode
bp = Blueprint("routes", __name__, static_folder="static", template_folder="static")
@@ -61,6 +65,30 @@
UI_FAVICON = os.environ.get("UI_FAVICON") or "/favicon.ico"
UI_SHOW_SHARE_BUTTON = os.environ.get("UI_SHOW_SHARE_BUTTON", "true").lower() == "true"
+# Check if the Application Insights Instrumentation Key is set in the environment variables
+instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")
+if instrumentation_key:
+ # Configure Application Insights if the Instrumentation Key is found
+ configure_azure_monitor(connection_string=instrumentation_key)
+ logging.info("Application Insights configured with the provided Instrumentation Key")
+else:
+ # Log a warning if the Instrumentation Key is not found
+ logging.warning("No Application Insights Instrumentation Key found. Skipping configuration")
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+
+# Suppress INFO logs from 'azure.core.pipeline.policies.http_logging_policy'
+logging.getLogger("azure.core.pipeline.policies.http_logging_policy").setLevel(
+ logging.WARNING
+)
+logging.getLogger("azure.identity.aio._internal").setLevel(logging.WARNING)
+
+# Suppress info logs from OpenTelemetry exporter
+logging.getLogger("azure.monitor.opentelemetry.exporter.export._base").setLevel(
+ logging.WARNING
+)
+
def create_app():
app = Quart(__name__)
@@ -384,9 +412,19 @@ def init_openai_client(use_data=SHOULD_USE_DATA):
azure_endpoint=endpoint,
)
+ track_event_if_configured("AzureOpenAIClientInitialized", {
+ "status": "success",
+ "endpoint": endpoint,
+ "use_api_key": bool(aoai_api_key),
+ })
+
return azure_openai_client
except Exception as e:
logging.exception("Exception in Azure OpenAI initialization", e)
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
azure_openai_client = None
raise e
@@ -411,8 +449,20 @@ def init_cosmosdb_client():
container_name=AZURE_COSMOSDB_CONVERSATIONS_CONTAINER,
enable_message_feedback=AZURE_COSMOSDB_ENABLE_FEEDBACK,
)
+
+ track_event_if_configured("CosmosDBClientInitialized", {
+ "status": "success",
+ "endpoint": cosmos_endpoint,
+ "database": AZURE_COSMOSDB_DATABASE,
+ "container": AZURE_COSMOSDB_CONVERSATIONS_CONTAINER,
+ "feedback_enabled": AZURE_COSMOSDB_ENABLE_FEEDBACK,
+ })
except Exception as e:
logging.exception("Exception in CosmosDB initialization", e)
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
cosmos_conversation_client = None
raise e
else:
@@ -425,6 +475,7 @@ def get_configured_data_source():
data_source = {}
query_type = "simple"
if DATASOURCE_TYPE == "AzureCognitiveSearch":
+ track_event_if_configured("datasource_selected", {"type": "AzureCognitiveSearch"})
# Set query type
if AZURE_SEARCH_QUERY_TYPE:
query_type = AZURE_SEARCH_QUERY_TYPE
@@ -433,6 +484,7 @@ def get_configured_data_source():
and AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG
):
query_type = "semantic"
+ track_event_if_configured("query_type_determined", {"query_type": query_type})
# Set filter
filter = None
@@ -441,11 +493,13 @@ def get_configured_data_source():
userToken = request.headers.get("X-MS-TOKEN-AAD-ACCESS-TOKEN", "")
logging.debug(f"USER TOKEN is {'present' if userToken else 'not present'}")
if not userToken:
+ track_event_if_configured("user_token_missing", {})
raise Exception(
"Document-level access control is enabled, but user access token could not be fetched."
)
filter = generateFilterString(userToken)
+ track_event_if_configured("filter_generated", {"filter": filter})
logging.debug(f"FILTER: {filter}")
# Set authentication
@@ -455,6 +509,7 @@ def get_configured_data_source():
else:
# If key is not provided, assume AOAI resource identity has been granted access to the search service
authentication = {"type": "system_assigned_managed_identity"}
+ track_event_if_configured("authentication_set", {"auth_type": authentication["type"]})
data_source = {
"type": "azure_search",
@@ -508,6 +563,7 @@ def get_configured_data_source():
}
elif DATASOURCE_TYPE == "AzureCosmosDB":
query_type = "vector"
+ track_event_if_configured("datasource_selected", {"type": "AzureCosmosDB"})
data_source = {
"type": "azure_cosmos_db",
@@ -566,8 +622,10 @@ def get_configured_data_source():
},
}
elif DATASOURCE_TYPE == "Elasticsearch":
+ track_event_if_configured("datasource_selected", {"type": "Elasticsearch"})
if ELASTICSEARCH_QUERY_TYPE:
query_type = ELASTICSEARCH_QUERY_TYPE
+ track_event_if_configured("query_type_determined", {"query_type": query_type})
data_source = {
"type": "elasticsearch",
@@ -621,8 +679,10 @@ def get_configured_data_source():
},
}
elif DATASOURCE_TYPE == "AzureMLIndex":
+ track_event_if_configured("datasource_selected", {"type": "AzureMLIndex"})
if AZURE_MLINDEX_QUERY_TYPE:
query_type = AZURE_MLINDEX_QUERY_TYPE
+ track_event_if_configured("query_type_determined", {"query_type": query_type})
data_source = {
"type": "azure_ml_index",
@@ -674,6 +734,7 @@ def get_configured_data_source():
}
elif DATASOURCE_TYPE == "Pinecone":
query_type = "vector"
+ track_event_if_configured("datasource_selected", {"type": "Pinecone"})
data_source = {
"type": "pinecone",
@@ -716,6 +777,7 @@ def get_configured_data_source():
},
}
else:
+ track_event_if_configured("unknown_datasource_type", {"type": DATASOURCE_TYPE})
raise Exception(
f"DATASOURCE_TYPE is not configured or unknown: {DATASOURCE_TYPE}"
)
@@ -742,15 +804,26 @@ def get_configured_data_source():
"model_id": ELASTICSEARCH_EMBEDDING_MODEL_ID,
}
else:
+ track_event_if_configured("embedding_dependency_missing", {
+ "datasource_type": DATASOURCE_TYPE,
+ "query_type": query_type
+ })
raise Exception(
f"Vector query type ({query_type}) is selected for data source type {DATASOURCE_TYPE} but no embedding dependency is configured"
)
+ track_event_if_configured("embedding_dependency_set", {
+ "embedding_type": embeddingDependency.get("type")
+ })
data_source["parameters"]["embedding_dependency"] = embeddingDependency
-
+ track_event_if_configured("get_configured_data_source_complete", {
+ "datasource_type": DATASOURCE_TYPE,
+ "query_type": query_type
+ })
return data_source
def prepare_model_args(request_body, request_headers):
+ track_event_if_configured("prepare_model_args_start", {})
request_messages = request_body.get("messages", [])
messages = []
if not SHOULD_USE_DATA:
@@ -775,6 +848,7 @@ def prepare_model_args(request_body, request_headers):
),
}
user_json = json.dumps(user_args)
+ track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]})
model_args = {
"messages": messages,
@@ -792,6 +866,7 @@ def prepare_model_args(request_body, request_headers):
}
if SHOULD_USE_DATA:
+ track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]})
model_args["extra_body"] = {"data_sources": [get_configured_data_source()]}
model_args_clean = copy.deepcopy(model_args)
@@ -829,11 +904,13 @@ def prepare_model_args(request_body, request_headers):
]["authentication"][field] = "*****"
logging.debug(f"REQUEST BODY: {json.dumps(model_args_clean, indent=4)}")
+ track_event_if_configured("prepare_model_args_complete", {"model": AZURE_OPENAI_MODEL})
return model_args
async def promptflow_request(request):
+ track_event_if_configured("promptflow_request_start", {})
try:
headers = {
"Content-Type": "application/json",
@@ -861,12 +938,18 @@ async def promptflow_request(request):
)
resp = response.json()
resp["id"] = request["messages"][-1]["id"]
+ track_event_if_configured("promptflow_request_success", {})
return resp
except Exception as e:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
logging.error(f"An error occurred while making promptflow_request: {e}")
async def send_chat_request(request_body, request_headers):
+ track_event_if_configured("send_chat_request_start", {})
filtered_messages = []
messages = request_body.get("messages", [])
for message in messages:
@@ -885,13 +968,20 @@ async def send_chat_request(request_body, request_headers):
)
response = raw_response.parse()
apim_request_id = raw_response.headers.get("apim-request-id")
+
+ track_event_if_configured("send_chat_request_success", {"model": model_args.get("model")})
except Exception as e:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
logging.exception("Exception in send_chat_request")
raise e
return response, apim_request_id
async def complete_chat_request(request_body, request_headers):
+ track_event_if_configured("complete_chat_request_start", {})
if USE_PROMPTFLOW and PROMPTFLOW_ENDPOINT and PROMPTFLOW_API_KEY:
response = await promptflow_request(request_body)
history_metadata = request_body.get("history_metadata", {})
@@ -902,6 +992,7 @@ async def complete_chat_request(request_body, request_headers):
PROMPTFLOW_CITATIONS_FIELD_NAME,
)
elif USE_INTERNAL_STREAM:
+ track_event_if_configured("internal_stream_selected", {})
request_body = await request.get_json()
client_id = request_body.get("client_id")
print(request_body)
@@ -963,10 +1054,13 @@ async def complete_chat_request(request_body, request_headers):
{"role": "assistant", "content": query_response}
)
+ track_event_if_configured("complete_chat_request_success", {"client_id": client_id})
+
return response
async def stream_chat_request(request_body, request_headers):
+ track_event_if_configured("stream_chat_request_start", {})
if USE_INTERNAL_STREAM:
history_metadata = request_body.get("history_metadata", {})
# function_url = STREAMING_AZUREFUNCTION_ENDPOINT
@@ -974,8 +1068,10 @@ async def stream_chat_request(request_body, request_headers):
client_id = request_body.get("client_id")
if client_id is None:
+ track_event_if_configured("client_id_missing", {})
return jsonify({"error": "No client ID provided"}), 400
query = request_body.get("messages")[-1].get("content")
+ track_event_if_configured("stream_internal_selected", {"client_id": client_id})
sk_response = await stream_response_from_wealth_assistant(query, client_id)
@@ -1028,11 +1124,16 @@ async def generate():
yield format_stream_response(
completionChunk, history_metadata, apim_request_id
)
-
+ track_event_if_configured("stream_openai_selected", {})
return generate()
async def conversation_internal(request_body, request_headers):
+ track_event_if_configured("conversation_internal_start", {
+ "streaming": SHOULD_STREAM,
+ "promptflow": USE_PROMPTFLOW,
+ "internal_stream": USE_INTERNAL_STREAM
+ })
try:
if SHOULD_STREAM:
return await stream_chat_request(request_body, request_headers)
@@ -1042,9 +1143,14 @@ async def conversation_internal(request_body, request_headers):
# return response
else:
result = await complete_chat_request(request_body, request_headers)
+ track_event_if_configured("conversation_internal_success", {})
return jsonify(result)
except Exception as ex:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(ex)
+ span.set_status(Status(StatusCode.ERROR, str(ex)))
logging.exception(ex)
if hasattr(ex, "status_code"):
return jsonify({"error": str(ex)}), ex.status_code
@@ -1055,9 +1161,10 @@ async def conversation_internal(request_body, request_headers):
@bp.route("/conversation", methods=["POST"])
async def conversation():
if not request.is_json:
+ track_event_if_configured("invalid_request_format", {})
return jsonify({"error": "request must be json"}), 415
request_json = await request.get_json()
-
+ track_event_if_configured("conversation_api_invoked", {})
return await conversation_internal(request_json, request.headers)
@@ -1067,6 +1174,10 @@ def get_frontend_settings():
return jsonify(frontend_settings), 200
except Exception as e:
logging.exception("Exception in /frontend_settings")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1075,6 +1186,10 @@ def get_frontend_settings():
async def add_conversation():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
+ track_event_if_configured(
+ "HistoryGenerate_Start",
+ {"user_id": user_id}
+ )
# check request for conversation_id
request_json = await request.get_json()
@@ -1097,6 +1212,15 @@ async def add_conversation():
history_metadata["title"] = title
history_metadata["date"] = conversation_dict["createdAt"]
+ track_event_if_configured(
+ "ConversationCreated",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "title": title
+ }
+ )
+
# Format the incoming message object in the "chat/completions" messages format
# then write it to the conversation history in cosmos
messages = request_json["messages"]
@@ -1113,6 +1237,14 @@ async def add_conversation():
+ conversation_id
+ "."
)
+ track_event_if_configured(
+ "UserMessageAdded",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "message": messages[-1],
+ }
+ )
else:
raise Exception("No user message found")
@@ -1122,9 +1254,28 @@ async def add_conversation():
request_body = await request.get_json()
history_metadata["conversation_id"] = conversation_id
request_body["history_metadata"] = history_metadata
+ track_event_if_configured(
+ "SendingToChatCompletions",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ }
+ )
+
+ track_event_if_configured(
+ "HistoryGenerate_Completed",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ }
+ )
return await conversation_internal(request_body, request.headers)
except Exception as e:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
logging.exception("Exception in /history/generate")
return jsonify({"error": str(e)}), 500
@@ -1138,6 +1289,11 @@ async def update_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("UpdateConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
try:
# make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
@@ -1160,6 +1316,10 @@ async def update_conversation():
user_id=user_id,
input_message=messages[-2],
)
+ track_event_if_configured("ToolMessageStored", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
# write the assistant message
await cosmos_conversation_client.create_message(
uuid=messages[-1]["id"],
@@ -1167,16 +1327,28 @@ async def update_conversation():
user_id=user_id,
input_message=messages[-1],
)
+ track_event_if_configured("AssistantMessageStored", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "message": messages[-1]
+ })
else:
raise Exception("No bot messages found")
-
# Submit request to Chat Completions for response
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("UpdateConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
response = {"success": True}
return jsonify(response), 200
except Exception as e:
logging.exception("Exception in /history/update")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1190,6 +1362,11 @@ async def update_message():
request_json = await request.get_json()
message_id = request_json.get("message_id", None)
message_feedback = request_json.get("message_feedback", None)
+
+ track_event_if_configured("MessageFeedback_Start", {
+ "user_id": user_id,
+ "message_id": message_id
+ })
try:
if not message_id:
return jsonify({"error": "message_id is required"}), 400
@@ -1202,6 +1379,11 @@ async def update_message():
user_id, message_id, message_feedback
)
if updated_message:
+ track_event_if_configured("MessageFeedback_Updated", {
+ "user_id": user_id,
+ "message_id": message_id,
+ "feedback": message_feedback
+ })
return (
jsonify(
{
@@ -1212,6 +1394,10 @@ async def update_message():
200,
)
else:
+ track_event_if_configured("MessageFeedback_NotFound", {
+ "user_id": user_id,
+ "message_id": message_id
+ })
return (
jsonify(
{
@@ -1223,6 +1409,10 @@ async def update_message():
except Exception as e:
logging.exception("Exception in /history/message_feedback")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1236,6 +1426,11 @@ async def delete_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("DeleteConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
try:
if not conversation_id:
return jsonify({"error": "conversation_id is required"}), 400
@@ -1253,6 +1448,11 @@ async def delete_conversation():
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("DeleteConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
return (
jsonify(
{
@@ -1264,6 +1464,10 @@ async def delete_conversation():
)
except Exception as e:
logging.exception("Exception in /history/delete")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1273,6 +1477,11 @@ async def list_conversations():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
+ track_event_if_configured("ListConversations_Start", {
+ "user_id": user_id,
+ "offset": offset
+ })
+
# make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
@@ -1284,10 +1493,19 @@ async def list_conversations():
)
await cosmos_conversation_client.cosmosdb_client.close()
if not isinstance(conversations, list):
+ track_event_if_configured("ListConversations_Empty", {
+ "user_id": user_id,
+ "offset": offset
+ })
return jsonify({"error": f"No conversations for {user_id} were found"}), 404
# return the conversation ids
+ track_event_if_configured("ListConversations_Success", {
+ "user_id": user_id,
+ "conversation_count": len(conversations)
+ })
+
return jsonify(conversations), 200
@@ -1300,7 +1518,17 @@ async def get_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("GetConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ })
+
if not conversation_id:
+ track_event_if_configured("GetConversation_Failed", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "error": f"Conversation {conversation_id} not found",
+ })
return jsonify({"error": "conversation_id is required"}), 400
# make sure cosmos is configured
@@ -1341,6 +1569,11 @@ async def get_conversation():
]
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("GetConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "message_count": len(messages)
+ })
return jsonify({"conversation_id": conversation_id, "messages": messages}), 200
@@ -1353,7 +1586,17 @@ async def rename_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("RenameConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
if not conversation_id:
+ track_event_if_configured("RenameConversation_Failed", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "error": f"Conversation {conversation_id} not found",
+ })
return jsonify({"error": "conversation_id is required"}), 400
# make sure cosmos is configured
@@ -1385,6 +1628,12 @@ async def rename_conversation():
)
await cosmos_conversation_client.cosmosdb_client.close()
+
+ track_event_if_configured("RenameConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "new_title": title
+ })
return jsonify(updated_conversation), 200
@@ -1394,6 +1643,10 @@ async def delete_all_conversations():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
+ track_event_if_configured("DeleteAllConversations_Start", {
+ "user_id": user_id
+ })
+
# get conversations for user
try:
# make sure cosmos is configured
@@ -1405,6 +1658,9 @@ async def delete_all_conversations():
user_id, offset=0, limit=None
)
if not conversations:
+ track_event_if_configured("DeleteAllConversations_Empty", {
+ "user_id": user_id,
+ })
return jsonify({"error": f"No conversations for {user_id} were found"}), 404
# delete each conversation
@@ -1419,6 +1675,12 @@ async def delete_all_conversations():
user_id, conversation["id"]
)
await cosmos_conversation_client.cosmosdb_client.close()
+
+ track_event_if_configured("DeleteAllConversations_Success", {
+ "user_id": user_id,
+ "conversation_count": len(conversations)
+ })
+
return (
jsonify(
{
@@ -1430,6 +1692,10 @@ async def delete_all_conversations():
except Exception as e:
logging.exception("Exception in /history/delete_all")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1443,8 +1709,18 @@ async def clear_messages():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("ClearConversationMessages_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ })
+
try:
if not conversation_id:
+ track_event_if_configured("ClearConversationMessages_Failed", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "error": "conversation_id is required"
+ })
return jsonify({"error": "conversation_id is required"}), 400
# make sure cosmos is configured
@@ -1455,6 +1731,11 @@ async def clear_messages():
# delete the conversation messages from cosmos
await cosmos_conversation_client.delete_messages(conversation_id, user_id)
+ track_event_if_configured("ClearConversationMessages_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
return (
jsonify(
{
@@ -1466,12 +1747,19 @@ async def clear_messages():
)
except Exception as e:
logging.exception("Exception in /history/clear_messages")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@bp.route("/history/ensure", methods=["GET"])
async def ensure_cosmos():
if not AZURE_COSMOSDB_ACCOUNT:
+ track_event_if_configured("EnsureCosmosDB_Failed", {
+ "error": "CosmosDB is not configured",
+ })
return jsonify({"error": "CosmosDB is not configured"}), 404
try:
@@ -1479,13 +1767,23 @@ async def ensure_cosmos():
success, err = await cosmos_conversation_client.ensure()
if not cosmos_conversation_client or not success:
if err:
+ track_event_if_configured("EnsureCosmosDB_Failed", {
+ "error": err,
+ })
return jsonify({"error": err}), 422
return jsonify({"error": "CosmosDB is not configured or not working"}), 500
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("EnsureCosmosDB_Failed", {
+ "error": "CosmosDB is not configured or not working",
+ })
return jsonify({"message": "CosmosDB is configured and working"}), 200
except Exception as e:
logging.exception("Exception in /history/ensure")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
cosmos_exception = str(e)
if "Invalid credentials" in cosmos_exception:
return jsonify({"error": cosmos_exception}), 401
@@ -1512,6 +1810,7 @@ async def ensure_cosmos():
async def generate_title(conversation_messages):
+
# make sure the messages are sorted by _ts descending
title_prompt = 'Summarize the conversation so far into a 4-word or less title. Do not use any quotation marks or punctuation. Respond with a json object in the format {{"title": string}}. Do not include any other commentary or description.'
@@ -1540,6 +1839,8 @@ async def generate_title(conversation_messages):
@bp.route("/api/users", methods=["GET"])
def get_users():
+
+ track_event_if_configured("UserFetch_Start", {})
conn = None
try:
conn = get_connection()
@@ -1594,6 +1895,9 @@ def get_users():
rows = dict_cursor(cursor)
if len(rows) <= 6:
+ track_event_if_configured("UserFetch_SampleUpdate", {
+ "rows_count": len(rows),
+ })
# update ClientMeetings,Assets,Retirement tables sample data to current date
cursor = conn.cursor()
combined_stmt = """
@@ -1678,9 +1982,17 @@ def get_users():
}
users.append(user)
+ track_event_if_configured("UserFetch_Success", {
+ "user_count": len(users),
+ })
+
return jsonify(users)
except Exception as e:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
print("Exception occurred:", e)
return str(e), 500
finally:
diff --git a/src/App/backend/event_utils.py b/src/App/backend/event_utils.py
new file mode 100644
index 000000000..c04214b64
--- /dev/null
+++ b/src/App/backend/event_utils.py
@@ -0,0 +1,29 @@
+import logging
+import os
+from azure.monitor.events.extension import track_event
+
+
+def track_event_if_configured(event_name: str, event_data: dict):
+ """Track an event if Application Insights is configured.
+
+ This function safely wraps the Azure Monitor track_event function
+ to handle potential errors with the ProxyLogger.
+
+ Args:
+ event_name: The name of the event to track
+ event_data: Dictionary of event data/dimensions
+ """
+ try:
+ instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")
+ if instrumentation_key:
+ track_event(event_name, event_data)
+ else:
+ logging.warning(
+ f"Skipping track_event for {event_name} as Application Insights is not configured"
+ )
+ except AttributeError as e:
+ # Handle the 'ProxyLogger' object has no attribute 'resource' error
+ logging.warning(f"ProxyLogger error in track_event: {e}")
+ except Exception as e:
+ # Catch any other exceptions to prevent them from bubbling up
+ logging.warning(f"Error in track_event: {e}")
diff --git a/src/App/requirements.txt b/src/App/requirements.txt
index 1a87b8001..4d52ee10d 100644
--- a/src/App/requirements.txt
+++ b/src/App/requirements.txt
@@ -31,4 +31,14 @@ pyodbc==5.2.0
semantic_kernel==1.21.3
azure-search-documents==11.6.0b9
azure-ai-projects==1.0.0b9
-azure-ai-inference==1.0.0b9
\ No newline at end of file
+azure-ai-inference==1.0.0b9
+
+opentelemetry-exporter-otlp-proto-grpc
+opentelemetry-exporter-otlp-proto-http
+opentelemetry-exporter-otlp-proto-grpc
+azure-monitor-events-extension
+opentelemetry-sdk==1.31.1
+opentelemetry-api==1.31.1
+opentelemetry-semantic-conventions==0.52b1
+opentelemetry-instrumentation==0.52b1
+azure-monitor-opentelemetry==1.6.8
\ No newline at end of file
From 58a3f42d805f34d09903314c61c42b647732610a Mon Sep 17 00:00:00 2001
From: Harsh-Microsoft
Date: Tue, 27 May 2025 11:57:12 +0530
Subject: [PATCH 07/19] Update main.json
---
infra/main.json | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/infra/main.json b/infra/main.json
index 859f12668..ccc9b833d 100644
--- a/infra/main.json
+++ b/infra/main.json
@@ -5,7 +5,7 @@
"_generator": {
"name": "bicep",
"version": "0.35.1.17967",
- "templateHash": "12824324392196719415"
+ "templateHash": "10579732773480527563"
}
},
"parameters": {
@@ -339,9 +339,9 @@
"uniqueId": "[toLower(uniqueString(parameters('environmentName'), subscription().id, variables('solutionLocation')))]",
"solutionPrefix": "[format('ca{0}', padLeft(take(variables('uniqueId'), 12), 12, '0'))]",
"abbrs": "[variables('$fxv#0')]",
- "functionAppSqlPrompt": "Generate a valid T-SQL query to find {query} for tables and columns provided below:\r\n 1. Table: Clients\r\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\r\n 2. Table: InvestmentGoals\r\n Columns: ClientId, InvestmentGoal\r\n 3. Table: Assets\r\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\r\n 4. Table: ClientSummaries\r\n Columns: ClientId, ClientSummary\r\n 5. Table: InvestmentGoalsDetails\r\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\r\n 6. Table: Retirement\r\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\r\n 7. Table: ClientMeetings\r\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\r\n Always use the Investment column from the Assets table as the value.\r\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\r\n Do not use client name in filters.\r\n Do not include assets values unless asked for.\r\n ALWAYS use ClientId = {clientid} in the query filter.\r\n ALWAYS select Client Name (Column: Client) in the query.\r\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\r\n Only return the generated SQL query. Do not return anything else.",
- "functionAppCallTranscriptSystemPrompt": "You are an assistant who supports wealth advisors in preparing for client meetings. \r\n You have access to the client’s past meeting call transcripts. \r\n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \r\n If no data is available, state 'No relevant data found for previous meetings.",
- "functionAppStreamTextSystemPrompt": "You are a helpful assistant to a Wealth Advisor. \r\n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\r\n If no name is provided, assume the question is about '{SelectedClientName}'.\r\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\r\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \r\n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response."
+ "functionAppSqlPrompt": "Generate a valid T-SQL query to find {query} for tables and columns provided below:\n 1. Table: Clients\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\n 2. Table: InvestmentGoals\n Columns: ClientId, InvestmentGoal\n 3. Table: Assets\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\n 4. Table: ClientSummaries\n Columns: ClientId, ClientSummary\n 5. Table: InvestmentGoalsDetails\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\n 6. Table: Retirement\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\n 7. Table: ClientMeetings\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\n Always use the Investment column from the Assets table as the value.\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\n Do not use client name in filters.\n Do not include assets values unless asked for.\n ALWAYS use ClientId = {clientid} in the query filter.\n ALWAYS select Client Name (Column: Client) in the query.\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\n Only return the generated SQL query. Do not return anything else.",
+ "functionAppCallTranscriptSystemPrompt": "You are an assistant who supports wealth advisors in preparing for client meetings. \n You have access to the client’s past meeting call transcripts. \n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \n If no data is available, state 'No relevant data found for previous meetings.",
+ "functionAppStreamTextSystemPrompt": "You are a helpful assistant to a Wealth Advisor. \n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\n If no name is provided, assume the question is about '{SelectedClientName}'.\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response."
},
"resources": [
{
@@ -708,7 +708,7 @@
"_generator": {
"name": "bicep",
"version": "0.35.1.17967",
- "templateHash": "15504864984003912125"
+ "templateHash": "16963364971780216238"
}
},
"parameters": {
@@ -2310,7 +2310,7 @@
"_generator": {
"name": "bicep",
"version": "0.35.1.17967",
- "templateHash": "9862570739171059712"
+ "templateHash": "18358947382114771550"
}
},
"parameters": {
From 99738aa64f13518a5c70d15439a37762277a86f4 Mon Sep 17 00:00:00 2001
From: Harsh-Microsoft
Date: Thu, 29 May 2025 12:39:28 +0530
Subject: [PATCH 08/19] fix: Replace Gunicorn with Uvicorn for the backend
server (#555)
* Replace Gunicorn with Uvicorn for the backend server and remove Gunicorn configuration
* Update deployment instructions to use Uvicorn instead of Gunicorn
---
docs/LocalSetupAndDeploy.md | 8 +++++---
src/App/WebApp.Dockerfile | 2 +-
src/App/gunicorn.conf.py | 13 -------------
src/App/requirements-dev.txt | 1 -
src/App/requirements.txt | 1 -
5 files changed, 6 insertions(+), 19 deletions(-)
delete mode 100644 src/App/gunicorn.conf.py
diff --git a/docs/LocalSetupAndDeploy.md b/docs/LocalSetupAndDeploy.md
index ca09606fc..6b7547e3e 100644
--- a/docs/LocalSetupAndDeploy.md
+++ b/docs/LocalSetupAndDeploy.md
@@ -40,9 +40,11 @@ Follow these steps to deploy the application to Azure App Service:
If this is your first time deploying the app, use the `az webapp up` command. Run the following commands from the `App` folder, replacing the placeholders with your desired values:
```sh
-az webapp up --runtime PYTHON:3.11 --sku B1 --name --resource-group --location --subscription
+az webapp up --runtime PYTHON:3.11 --sku B1 --name --resource-group --location --subscription
-az webapp config set --startup-file "python3 -m gunicorn app:app" --name --resource-group
+az webapp config set --startup-file "python3 -m uvicorn app:app --host 0.0.0.0 --port 8000" --name --resource-group
+
+az webapp config appsettings set --resource-group --name --settings WEBSITES_PORT=8000
```
Next, configure the required environment variables in the deployed app to ensure it functions correctly.
@@ -83,7 +85,7 @@ az webapp up \
--resource-group
az webapp config set \
- --startup-file "python3 -m gunicorn app:app" \
+ --startup-file "python3 -m uvicorn app:app --host 0.0.0.0 --port 8000" \
--name --resource-group
```
diff --git a/src/App/WebApp.Dockerfile b/src/App/WebApp.Dockerfile
index f54e2e30c..48bcd5ff5 100644
--- a/src/App/WebApp.Dockerfile
+++ b/src/App/WebApp.Dockerfile
@@ -36,4 +36,4 @@ COPY --from=frontend /home/node/app/static /usr/src/app/static/
WORKDIR /usr/src/app
EXPOSE 80
-CMD ["gunicorn", "-b", "0.0.0.0:80", "app:app"]
+CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4", "--log-level", "info", "--access-log"]
diff --git a/src/App/gunicorn.conf.py b/src/App/gunicorn.conf.py
deleted file mode 100644
index b1aded069..000000000
--- a/src/App/gunicorn.conf.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import multiprocessing
-
-max_requests = 1000
-max_requests_jitter = 50
-log_file = "-"
-bind = "0.0.0.0"
-
-timeout = 230
-# https://learn.microsoft.com/en-us/troubleshoot/azure/app-service/web-apps-performance-faqs#why-does-my-request-time-out-after-230-seconds
-
-num_cpus = multiprocessing.cpu_count()
-workers = (num_cpus * 2) + 1
-worker_class = "uvicorn.workers.UvicornWorker"
diff --git a/src/App/requirements-dev.txt b/src/App/requirements-dev.txt
index aacba54f0..302b39b8b 100644
--- a/src/App/requirements-dev.txt
+++ b/src/App/requirements-dev.txt
@@ -7,7 +7,6 @@ python-dotenv==1.0.1
azure-cosmos==4.9.0
quart==0.20.0
uvicorn==0.34.0
-gunicorn==23.0.0
aiohttp==3.11.12
quart-session==3.0.0
pymssql==2.3.2
diff --git a/src/App/requirements.txt b/src/App/requirements.txt
index 4d52ee10d..a02606dfd 100644
--- a/src/App/requirements.txt
+++ b/src/App/requirements.txt
@@ -8,7 +8,6 @@ python-dotenv==1.0.1
azure-cosmos==4.9.0
quart==0.20.0
uvicorn==0.34.0
-gunicorn==23.0.0
aiohttp==3.11.12
quart-session==3.0.0
pymssql==2.3.2
From 2cc84ded8799468681186e145153f5b449f8adec Mon Sep 17 00:00:00 2001
From: Vamshi-Microsoft
Date: Fri, 30 May 2025 09:06:09 +0000
Subject: [PATCH 09/19] EXP environment changes for Log Analytics workspace
---
docs/CustomizingAzdParameters.md | 5 +++++
docs/DeploymentGuide.md | 2 ++
infra/deploy_ai_foundry.bicep | 19 ++++++++++++++++---
infra/main.bicep | 4 ++++
infra/main.bicepparam | 1 +
5 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/docs/CustomizingAzdParameters.md b/docs/CustomizingAzdParameters.md
index fbc1f73d3..fc02f6d17 100644
--- a/docs/CustomizingAzdParameters.md
+++ b/docs/CustomizingAzdParameters.md
@@ -40,4 +40,9 @@ Change the Embedding Deployment Capacity (choose a number based on available emb
```shell
azd env set AZURE_ENV_EMBEDDING_MODEL_CAPACITY 80
+```
+
+Set the Log Analytics Workspace Id if you need to reuse the existing workspace which is already existing
+```shell
+azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID ''
```
\ No newline at end of file
diff --git a/docs/DeploymentGuide.md b/docs/DeploymentGuide.md
index 96362bb6a..b77c23450 100644
--- a/docs/DeploymentGuide.md
+++ b/docs/DeploymentGuide.md
@@ -114,6 +114,8 @@ When you start the deployment, most parameters will have **default values**, but
| **GPT Model Deployment Capacity** | Configure capacity for **GPT models**. | 30k |
| **Embedding Model** | OpenAI embedding model | text-embedding-ada-002 |
| **Embedding Model Capacity** | Set the capacity for **embedding models**. | 80k |
+| **Existing Log analytics workspace** | To reuse the existing Log analytics workspace Id. | |
+
diff --git a/infra/deploy_ai_foundry.bicep b/infra/deploy_ai_foundry.bicep
index b8954277d..ef2e81fc7 100644
--- a/infra/deploy_ai_foundry.bicep
+++ b/infra/deploy_ai_foundry.bicep
@@ -9,6 +9,7 @@ param gptDeploymentCapacity int
param embeddingModel string
param embeddingDeploymentCapacity int
param managedIdentityObjectId string
+param existingLogAnalyticsWorkspaceId string = ''
// Load the abbrevations file required to name the azure resources.
var abbrs = loadJsonContent('./abbreviations.json')
@@ -54,7 +55,16 @@ resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = {
name: keyVaultName
}
-resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2023-09-01' = {
+var useExisting = !empty(existingLogAnalyticsWorkspaceId)
+var existingLawResourceGroup = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[4] : ''
+var existingLawName = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[8] : ''
+
+resource existingLogAnalyticsWorkspace 'Microsoft.OperationalInsights/workspaces@2023-09-01' existing = if (useExisting) {
+ name: existingLawName
+ scope: resourceGroup(existingLawResourceGroup)
+}
+
+resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2023-09-01' = if (!useExisting) {
name: workspaceName
location: location
tags: {}
@@ -93,7 +103,7 @@ resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = {
Application_Type: 'web'
publicNetworkAccessForIngestion: 'Enabled'
publicNetworkAccessForQuery: 'Enabled'
- WorkspaceResourceId: logAnalytics.id
+ WorkspaceResourceId: useExisting ? existingLogAnalyticsWorkspace.id : logAnalytics.id
}
}
@@ -490,7 +500,10 @@ output aiSearchService string = aiSearch.name
output aiProjectName string = aiHubProject.name
output applicationInsightsId string = applicationInsights.id
-output logAnalyticsWorkspaceResourceName string = logAnalytics.name
+output logAnalyticsWorkspaceResourceName string = useExisting ? existingLogAnalyticsWorkspace.name : logAnalytics.name
+output logAnalyticsWorkspaceResourceGroup string = useExisting ? existingLawResourceGroup : resourceGroup().name
+
+
output storageAccountName string = storageNameCleaned
output applicationInsightsConnectionString string = applicationInsights.properties.ConnectionString
diff --git a/infra/main.bicep b/infra/main.bicep
index e437e94d1..b92388651 100644
--- a/infra/main.bicep
+++ b/infra/main.bicep
@@ -6,6 +6,9 @@ targetScope = 'resourceGroup'
@description('A unique prefix for all resources in this deployment. This should be 3-20 characters long:')
param environmentName string
+@description('Optional: Existing Log Analytics Workspace Resource ID')
+param existingLogAnalyticsWorkspaceId string = ''
+
@description('CosmosDB Location')
param cosmosLocation string
@@ -140,6 +143,7 @@ module aifoundry 'deploy_ai_foundry.bicep' = {
embeddingModel: embeddingModel
embeddingDeploymentCapacity: embeddingDeploymentCapacity
managedIdentityObjectId:managedIdentityModule.outputs.managedIdentityOutput.objectId
+ existingLogAnalyticsWorkspaceId: existingLogAnalyticsWorkspaceId
}
scope: resourceGroup(resourceGroup().name)
}
diff --git a/infra/main.bicepparam b/infra/main.bicepparam
index 42c04971b..1e5c053c1 100644
--- a/infra/main.bicepparam
+++ b/infra/main.bicepparam
@@ -9,3 +9,4 @@ param gptDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_MODEL_CAPAC
param embeddingDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_EMBEDDING_MODEL_CAPACITY', '80'))
param AzureOpenAILocation = readEnvironmentVariable('AZURE_ENV_OPENAI_LOCATION', 'eastus2')
param AZURE_LOCATION = readEnvironmentVariable('AZURE_LOCATION', '')
+param existingLogAnalyticsWorkspaceId = readEnvironmentVariable('AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID', '')
From b88f111bf079d1f380a2ee8dd7915875e476137b Mon Sep 17 00:00:00 2001
From: Abdul-Microsoft
Date: Mon, 2 Jun 2025 13:09:39 +0530
Subject: [PATCH 10/19] refactor: Cleanup the unused variables in all the files
(#557)
* cleanup the unused variables in all the files
* fix: remove unused OPENAI_API_VERSION variable
* fix: resolve unit tests issue
---
.github/dependabot.yml | 9 -
infra/deploy_app_service.bicep | 51 ---
infra/main.bicep | 5 +-
infra/main.json | 83 +----
src/App/.env.sample | 154 +++-----
src/App/app.py | 503 +-------------------------
src/App/backend/chat_logic_handler.py | 4 +-
src/App/tests/backend/test_utils.py | 2 +-
src/App/tests/test_app.py | 13 +-
9 files changed, 64 insertions(+), 760 deletions(-)
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 164355b62..3f0b6a97a 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -20,15 +20,6 @@ updates:
target-branch: "dependabotchanges"
open-pull-requests-limit: 100
- - package-ecosystem: "pip"
- directory: "/src/AzureFunction"
- schedule:
- interval: "monthly"
- commit-message:
- prefix: "build"
- target-branch: "dependabotchanges"
- open-pull-requests-limit: 100
-
- package-ecosystem: "pip"
directory: "/src/infra/scripts/fabric_scripts"
schedule:
diff --git a/infra/deploy_app_service.bicep b/infra/deploy_app_service.bicep
index a5531ce4f..d06cf2f74 100644
--- a/infra/deploy_app_service.bicep
+++ b/infra/deploy_app_service.bicep
@@ -32,9 +32,6 @@ param AzureSearchUseSemanticSearch string = 'False'
@description('Semantic search config')
param AzureSearchSemanticSearchConfig string = 'default'
-@description('Is the index prechunked')
-param AzureSearchIndexIsPrechunked string = 'False'
-
@description('Top K results')
param AzureSearchTopK string = '5'
@@ -59,9 +56,6 @@ param AzureOpenAIResource string
@description('Azure OpenAI Model Deployment Name')
param AzureOpenAIModel string
-@description('Azure OpenAI Model Name')
-param AzureOpenAIModelName string = 'gpt-4o-mini'
-
@description('Azure Open AI Endpoint')
param AzureOpenAIEndpoint string = ''
@@ -116,15 +110,9 @@ param AzureOpenAIEmbeddingkey string = ''
@description('Azure Open AI Embedding Endpoint')
param AzureOpenAIEmbeddingEndpoint string = ''
-@description('Enable chat history by deploying a Cosmos DB instance')
-param WebAppEnableChatHistory string = 'False'
-
@description('Use Azure Function')
param USE_INTERNAL_STREAM string = 'True'
-@description('Azure Function Endpoint')
-param STREAMING_AZUREFUNCTION_ENDPOINT string = ''
-
@description('SQL Database Server Name')
param SQLDB_SERVER string = ''
@@ -163,8 +151,6 @@ param userassignedIdentityId string
param userassignedIdentityClientId string
param applicationInsightsId string
-@secure()
-param azureSearchAdminKey string
param azureSearchServiceEndpoint string
@description('Azure Function App SQL System Prompt')
@@ -240,10 +226,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG'
value: AzureSearchSemanticSearchConfig
}
- {
- name: 'AZURE_SEARCH_INDEX_IS_PRECHUNKED'
- value: AzureSearchIndexIsPrechunked
- }
{
name: 'AZURE_SEARCH_TOP_K'
value: AzureSearchTopK
@@ -284,10 +266,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'AZURE_OPENAI_KEY'
value: AzureOpenAIKey
}
- {
- name: 'AZURE_OPENAI_MODEL_NAME'
- value: AzureOpenAIModelName
- }
{
name: 'AZURE_OPENAI_TEMPERATURE'
value: AzureOpenAITemperature
@@ -346,11 +324,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'AZURE_OPENAI_EMBEDDING_ENDPOINT'
value: AzureOpenAIEmbeddingEndpoint
}
-
- {
- name: 'WEB_APP_ENABLE_CHAT_HISTORY'
- value: WebAppEnableChatHistory
- }
{name: 'SQLDB_SERVER'
value: SQLDB_SERVER
@@ -372,10 +345,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
value: USE_INTERNAL_STREAM
}
- {name: 'STREAMING_AZUREFUNCTION_ENDPOINT'
- value: STREAMING_AZUREFUNCTION_ENDPOINT
- }
-
{name: 'AZURE_COSMOSDB_ACCOUNT'
value: AZURE_COSMOSDB_ACCOUNT
}
@@ -391,30 +360,10 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
//{name: 'VITE_POWERBI_EMBED_URL'
// value: VITE_POWERBI_EMBED_URL
//}
- {
- name: 'SCM_DO_BUILD_DURING_DEPLOYMENT'
- value: 'true'
- }
- {
- name: 'UWSGI_PROCESSES'
- value: '2'
- }
- {
- name: 'UWSGI_THREADS'
- value: '2'
- }
{
name: 'SQLDB_USER_MID'
value: userassignedIdentityClientId
}
- {
- name: 'OPENAI_API_VERSION'
- value: AzureOpenAIApiVersion
- }
- {
- name: 'AZURE_AI_SEARCH_API_KEY'
- value: azureSearchAdminKey
- }
{
name: 'AZURE_AI_SEARCH_ENDPOINT'
value: azureSearchServiceEndpoint
diff --git a/infra/main.bicep b/infra/main.bicep
index e437e94d1..d7fea84da 100644
--- a/infra/main.bicep
+++ b/infra/main.bicep
@@ -200,17 +200,15 @@ module appserviceModule 'deploy_app_service.bicep' = {
AzureSearchKey:keyVault.getSecret('AZURE-SEARCH-KEY')
AzureSearchUseSemanticSearch:'True'
AzureSearchSemanticSearchConfig:'my-semantic-config'
- AzureSearchIndexIsPrechunked:'False'
AzureSearchTopK:'5'
AzureSearchContentColumns:'content'
AzureSearchFilenameColumn:'chunk_id'
AzureSearchTitleColumn:'client_id'
AzureSearchUrlColumn:'sourceurl'
- AzureOpenAIResource:aifoundry.outputs.aiServicesTarget
+ AzureOpenAIResource:aifoundry.outputs.aiServicesName
AzureOpenAIEndpoint:aifoundry.outputs.aiServicesTarget
AzureOpenAIModel:gptModelName
AzureOpenAIKey:keyVault.getSecret('AZURE-OPENAI-KEY')
- AzureOpenAIModelName:gptModelName
AzureOpenAITemperature:'0'
AzureOpenAITopP:'1'
AzureOpenAIMaxTokens:'1000'
@@ -239,7 +237,6 @@ module appserviceModule 'deploy_app_service.bicep' = {
userassignedIdentityClientId:managedIdentityModule.outputs.managedIdentityWebAppOutput.clientId
userassignedIdentityId:managedIdentityModule.outputs.managedIdentityWebAppOutput.id
applicationInsightsId: aifoundry.outputs.applicationInsightsId
- azureSearchAdminKey:keyVault.getSecret('AZURE-SEARCH-KEY')
azureSearchServiceEndpoint:aifoundry.outputs.aiSearchTarget
sqlSystemPrompt: functionAppSqlPrompt
callTranscriptSystemPrompt: functionAppCallTranscriptSystemPrompt
diff --git a/infra/main.json b/infra/main.json
index ccc9b833d..fee4c39e0 100644
--- a/infra/main.json
+++ b/infra/main.json
@@ -2139,9 +2139,6 @@
"AzureSearchSemanticSearchConfig": {
"value": "my-semantic-config"
},
- "AzureSearchIndexIsPrechunked": {
- "value": "False"
- },
"AzureSearchTopK": {
"value": "5"
},
@@ -2158,7 +2155,7 @@
"value": "sourceurl"
},
"AzureOpenAIResource": {
- "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesTarget.value]"
+ "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesName.value]"
},
"AzureOpenAIEndpoint": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesTarget.value]"
@@ -2174,9 +2171,6 @@
"secretName": "AZURE-OPENAI-KEY"
}
},
- "AzureOpenAIModelName": {
- "value": "[parameters('gptModelName')]"
- },
"AzureOpenAITemperature": {
"value": "0"
},
@@ -2268,14 +2262,6 @@
"applicationInsightsId": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.applicationInsightsId.value]"
},
- "azureSearchAdminKey": {
- "reference": {
- "keyVault": {
- "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.KeyVault/vaults', reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.keyvaultName.value)]"
- },
- "secretName": "AZURE-SEARCH-KEY"
- }
- },
"azureSearchServiceEndpoint": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiSearchTarget.value]"
},
@@ -2383,13 +2369,6 @@
"description": "Semantic search config"
}
},
- "AzureSearchIndexIsPrechunked": {
- "type": "string",
- "defaultValue": "False",
- "metadata": {
- "description": "Is the index prechunked"
- }
- },
"AzureSearchTopK": {
"type": "string",
"defaultValue": "5",
@@ -2444,13 +2423,6 @@
"description": "Azure OpenAI Model Deployment Name"
}
},
- "AzureOpenAIModelName": {
- "type": "string",
- "defaultValue": "gpt-4o-mini",
- "metadata": {
- "description": "Azure OpenAI Model Name"
- }
- },
"AzureOpenAIEndpoint": {
"type": "string",
"defaultValue": "",
@@ -2576,13 +2548,6 @@
"description": "Azure Open AI Embedding Endpoint"
}
},
- "WebAppEnableChatHistory": {
- "type": "string",
- "defaultValue": "False",
- "metadata": {
- "description": "Enable chat history by deploying a Cosmos DB instance"
- }
- },
"USE_INTERNAL_STREAM": {
"type": "string",
"defaultValue": "True",
@@ -2590,13 +2555,6 @@
"description": "Use Azure Function"
}
},
- "STREAMING_AZUREFUNCTION_ENDPOINT": {
- "type": "string",
- "defaultValue": "",
- "metadata": {
- "description": "Azure Function Endpoint"
- }
- },
"SQLDB_SERVER": {
"type": "string",
"defaultValue": "",
@@ -2665,9 +2623,6 @@
"applicationInsightsId": {
"type": "string"
},
- "azureSearchAdminKey": {
- "type": "securestring"
- },
"azureSearchServiceEndpoint": {
"type": "string"
},
@@ -2764,10 +2719,6 @@
"name": "AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG",
"value": "[parameters('AzureSearchSemanticSearchConfig')]"
},
- {
- "name": "AZURE_SEARCH_INDEX_IS_PRECHUNKED",
- "value": "[parameters('AzureSearchIndexIsPrechunked')]"
- },
{
"name": "AZURE_SEARCH_TOP_K",
"value": "[parameters('AzureSearchTopK')]"
@@ -2808,10 +2759,6 @@
"name": "AZURE_OPENAI_KEY",
"value": "[parameters('AzureOpenAIKey')]"
},
- {
- "name": "AZURE_OPENAI_MODEL_NAME",
- "value": "[parameters('AzureOpenAIModelName')]"
- },
{
"name": "AZURE_OPENAI_TEMPERATURE",
"value": "[parameters('AzureOpenAITemperature')]"
@@ -2868,10 +2815,6 @@
"name": "AZURE_OPENAI_EMBEDDING_ENDPOINT",
"value": "[parameters('AzureOpenAIEmbeddingEndpoint')]"
},
- {
- "name": "WEB_APP_ENABLE_CHAT_HISTORY",
- "value": "[parameters('WebAppEnableChatHistory')]"
- },
{
"name": "SQLDB_SERVER",
"value": "[parameters('SQLDB_SERVER')]"
@@ -2892,10 +2835,6 @@
"name": "USE_INTERNAL_STREAM",
"value": "[parameters('USE_INTERNAL_STREAM')]"
},
- {
- "name": "STREAMING_AZUREFUNCTION_ENDPOINT",
- "value": "[parameters('STREAMING_AZUREFUNCTION_ENDPOINT')]"
- },
{
"name": "AZURE_COSMOSDB_ACCOUNT",
"value": "[parameters('AZURE_COSMOSDB_ACCOUNT')]"
@@ -2912,30 +2851,10 @@
"name": "AZURE_COSMOSDB_ENABLE_FEEDBACK",
"value": "[parameters('AZURE_COSMOSDB_ENABLE_FEEDBACK')]"
},
- {
- "name": "SCM_DO_BUILD_DURING_DEPLOYMENT",
- "value": "true"
- },
- {
- "name": "UWSGI_PROCESSES",
- "value": "2"
- },
- {
- "name": "UWSGI_THREADS",
- "value": "2"
- },
{
"name": "SQLDB_USER_MID",
"value": "[parameters('userassignedIdentityClientId')]"
},
- {
- "name": "OPENAI_API_VERSION",
- "value": "[parameters('AzureOpenAIApiVersion')]"
- },
- {
- "name": "AZURE_AI_SEARCH_API_KEY",
- "value": "[parameters('azureSearchAdminKey')]"
- },
{
"name": "AZURE_AI_SEARCH_ENDPOINT",
"value": "[parameters('azureSearchServiceEndpoint')]"
diff --git a/src/App/.env.sample b/src/App/.env.sample
index 50f33c7e3..7dc66e86e 100644
--- a/src/App/.env.sample
+++ b/src/App/.env.sample
@@ -1,28 +1,19 @@
-# Chat
-DEBUG=True
+# Azure OpenAI settings
AZURE_OPENAI_RESOURCE=
-AZURE_OPENAI_MODEL=gpt-35-turbo-16k
+AZURE_OPENAI_MODEL="gpt-4o-mini"
AZURE_OPENAI_KEY=
-AZURE_OPENAI_MODEL_NAME=gpt-35-turbo-16k
-AZURE_OPENAI_TEMPERATURE=0
-AZURE_OPENAI_TOP_P=1.0
-AZURE_OPENAI_MAX_TOKENS=1000
+AZURE_OPENAI_TEMPERATURE="0"
+AZURE_OPENAI_TOP_P="1"
+AZURE_OPENAI_MAX_TOKENS="1000"
AZURE_OPENAI_STOP_SEQUENCE=
-AZURE_OPENAI_SEED=
-AZURE_OPENAI_CHOICES_COUNT=1
-AZURE_OPENAI_PRESENCE_PENALTY=0.0
-AZURE_OPENAI_FREQUENCY_PENALTY=0.0
-AZURE_OPENAI_LOGIT_BIAS=
-AZURE_OPENAI_USER=
-AZURE_OPENAI_TOOLS=
-AZURE_OPENAI_TOOL_CHOICE=
-AZURE_OPENAI_SYSTEM_MESSAGE=You are an AI assistant that helps people find information.
-AZURE_OPENAI_PREVIEW_API_VERSION=2024-05-01-preview
-AZURE_OPENAI_STREAM=True
+AZURE_OPENAI_SYSTEM_MESSAGE="You are a helpful Wealth Advisor assistant"
+AZURE_OPENAI_PREVIEW_API_VERSION="2025-01-01-preview"
+AZURE_OPENAI_STREAM="True"
AZURE_OPENAI_ENDPOINT=
-AZURE_OPENAI_EMBEDDING_NAME=text-embedding-ada-002
+AZURE_OPENAI_EMBEDDING_NAME="text-embedding-ada-002"
AZURE_OPENAI_EMBEDDING_ENDPOINT=
AZURE_OPENAI_EMBEDDING_KEY=
+
# User Interface
UI_TITLE=
UI_LOGO=
@@ -30,100 +21,49 @@ UI_CHAT_LOGO=
UI_CHAT_TITLE=
UI_CHAT_DESCRIPTION=
UI_FAVICON=
-# Chat history
+
+# Cosmos DB settings
AZURE_COSMOSDB_ACCOUNT=
-AZURE_COSMOSDB_DATABASE=db_conversation_history
-AZURE_COSMOSDB_CONVERSATIONS_CONTAINER=conversations
-AZURE_COSMOSDB_ACCOUNT_KEY=
-AZURE_COSMOSDB_ENABLE_FEEDBACK=True
-# Chat with data: common settings
-SEARCH_TOP_K=5
-SEARCH_STRICTNESS=3
-SEARCH_ENABLE_IN_DOMAIN=True
-# Chat with data: Azure AI Search
+AZURE_COSMOSDB_DATABASE="db_conversation_history"
+AZURE_COSMOSDB_CONVERSATIONS_CONTAINER="conversations"
+AZURE_COSMOSDB_ENABLE_FEEDBACK="True"
+
+# Azure Search settings
AZURE_SEARCH_SERVICE=
-AZURE_SEARCH_INDEX=
+AZURE_SEARCH_INDEX="transcripts_index"
AZURE_SEARCH_KEY=
-AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG=
-AZURE_SEARCH_INDEX_IS_PRECHUNKED=False
-AZURE_SEARCH_TOP_K=5
-AZURE_SEARCH_ENABLE_IN_DOMAIN=True
-AZURE_SEARCH_CONTENT_COLUMNS=content
-AZURE_SEARCH_FILENAME_COLUMN=sourceurl
-AZURE_SEARCH_TITLE_COLUMN=client_id
-AZURE_SEARCH_URL_COLUMN=sourceurl
-AZURE_SEARCH_VECTOR_COLUMNS=
-AZURE_SEARCH_QUERY_TYPE=simple
+AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG="my-semantic-config"
+AZURE_SEARCH_TOP_K="5"
+AZURE_SEARCH_ENABLE_IN_DOMAIN="False"
+AZURE_SEARCH_CONTENT_COLUMNS="content"
+AZURE_SEARCH_FILENAME_COLUMN="chunk_id"
+AZURE_SEARCH_TITLE_COLUMN="client_id"
+AZURE_SEARCH_URL_COLUMN="sourceurl"
+AZURE_SEARCH_VECTOR_COLUMNS="contentVector"
+AZURE_SEARCH_QUERY_TYPE="simple"
AZURE_SEARCH_PERMITTED_GROUPS_COLUMN=
-AZURE_SEARCH_STRICTNESS=3
-# Chat with data: Azure CosmosDB Mongo VCore
-AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING=
-AZURE_COSMOSDB_MONGO_VCORE_DATABASE=
-AZURE_COSMOSDB_MONGO_VCORE_CONTAINER=
-AZURE_COSMOSDB_MONGO_VCORE_INDEX=
-AZURE_COSMOSDB_MONGO_VCORE_INDEX=
-AZURE_COSMOSDB_MONGO_VCORE_TOP_K=
-AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS=
-AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN=
-AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS=
-AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN=
-AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN=
-AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN=
-AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS=
-# Chat with data: Elasticsearch
-ELASTICSEARCH_ENDPOINT=
-ELASTICSEARCH_ENCODED_API_KEY=
-ELASTICSEARCH_INDEX=
-ELASTICSEARCH_QUERY_TYPE=
-ELASTICSEARCH_TOP_K=
-ELASTICSEARCH_ENABLE_IN_DOMAIN=
-ELASTICSEARCH_CONTENT_COLUMNS=
-ELASTICSEARCH_FILENAME_COLUMN=
-ELASTICSEARCH_TITLE_COLUMN=
-ELASTICSEARCH_URL_COLUMN=
-ELASTICSEARCH_VECTOR_COLUMNS=
-ELASTICSEARCH_STRICTNESS=
-ELASTICSEARCH_EMBEDDING_MODEL_ID=
-# Chat with data: Pinecone
-PINECONE_ENVIRONMENT=
-PINECONE_API_KEY=
-PINECONE_INDEX_NAME=
-PINECONE_TOP_K=
-PINECONE_STRICTNESS=
-PINECONE_ENABLE_IN_DOMAIN=
-PINECONE_CONTENT_COLUMNS=
-PINECONE_FILENAME_COLUMN=
-PINECONE_TITLE_COLUMN=
-PINECONE_URL_COLUMN=
-PINECONE_VECTOR_COLUMNS=
-# Chat with data: Azure Machine Learning MLIndex
-AZURE_MLINDEX_NAME=
-AZURE_MLINDEX_VERSION=
-AZURE_ML_PROJECT_RESOURCE_ID=
-AZURE_MLINDEX_TOP_K=
-AZURE_MLINDEX_STRICTNESS=
-AZURE_MLINDEX_ENABLE_IN_DOMAIN=
-AZURE_MLINDEX_CONTENT_COLUMNS=
-AZURE_MLINDEX_FILENAME_COLUMN=
-AZURE_MLINDEX_TITLE_COLUMN=
-AZURE_MLINDEX_URL_COLUMN=
-AZURE_MLINDEX_VECTOR_COLUMNS=
-AZURE_MLINDEX_QUERY_TYPE=
-# Chat with data: Prompt flow API
-USE_PROMPTFLOW=False
-PROMPTFLOW_ENDPOINT=
-PROMPTFLOW_API_KEY=
-PROMPTFLOW_RESPONSE_TIMEOUT=120
-PROMPTFLOW_REQUEST_FIELD_NAME=query
-PROMPTFLOW_RESPONSE_FIELD_NAME=reply
-PROMPTFLOW_CITATIONS_FIELD_NAME=documents
-STREAMING_AZUREFUNCTION_ENDPOINT=
-USE_AZUREFUNCTION=True
-SQL_CONNECTION=
+AZURE_SEARCH_STRICTNESS="3"
+AZURE_SEARCH_USE_SEMANTIC_SEARCH="True"
+AZURE_AI_SEARCH_ENDPOINT=
+
+# Azure SQL settings
SQLDB_CONNECTION_STRING=
SQLDB_SERVER=
SQLDB_DATABASE=
SQLDB_USERNAME=
SQLDB_PASSWORD=
-SQLDB_DRIVER=
-VITE_POWERBI_EMBED_URL=
\ No newline at end of file
+SQLDB_USER_MID=
+
+# AI Project
+AZURE_AI_PROJECT_CONN_STRING=
+USE_AI_PROJECT_CLIENT="false"
+
+# Prompts
+AZURE_CALL_TRANSCRIPT_SYSTEM_PROMPT="You are an assistant who supports wealth advisors in preparing for client meetings. \n You have access to the client’s past meeting call transcripts. \n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \n If no data is available, state 'No relevant data found for previous meetings."
+AZURE_OPENAI_STREAM_TEXT_SYSTEM_PROMPT="You are a helpful assistant to a Wealth Advisor. \n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\n If no name is provided, assume the question is about '{SelectedClientName}'.\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response."
+AZURE_SQL_SYSTEM_PROMPT="Generate a valid T-SQL query to find {query} for tables and columns provided below:\n 1. Table: Clients\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\n 2. Table: InvestmentGoals\n Columns: ClientId, InvestmentGoal\n 3. Table: Assets\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\n 4. Table: ClientSummaries\n Columns: ClientId, ClientSummary\n 5. Table: InvestmentGoalsDetails\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\n 6. Table: Retirement\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\n 7. Table: ClientMeetings\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\n Always use the Investment column from the Assets table as the value.\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\n Do not use client name in filters.\n Do not include assets values unless asked for.\n ALWAYS use ClientId = {clientid} in the query filter.\n ALWAYS select Client Name (Column: Client) in the query.\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\n Only return the generated SQL query. Do not return anything else."
+
+# Misc
+APPINSIGHTS_INSTRUMENTATIONKEY=
+AUTH_ENABLED="false"
+USE_INTERNAL_STREAM="True"
\ No newline at end of file
diff --git a/src/App/app.py b/src/App/app.py
index 4c9357573..b1559eb25 100644
--- a/src/App/app.py
+++ b/src/App/app.py
@@ -6,8 +6,6 @@
import uuid
from types import SimpleNamespace
-import httpx
-import requests
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
from dotenv import load_dotenv
@@ -26,12 +24,9 @@
from backend.auth.auth_utils import get_authenticated_user_details, get_tenantid
from backend.history.cosmosdbservice import CosmosConversationClient
from backend.utils import (
- convert_to_pf_format,
- format_as_ndjson,
- format_pf_non_streaming_response,
format_stream_response,
generateFilterString,
- parse_multi_columns,
+ parse_multi_columns
)
from db import get_connection
from db import dict_cursor
@@ -123,9 +118,6 @@ async def assets(path):
# On Your Data Settings
DATASOURCE_TYPE = os.environ.get("DATASOURCE_TYPE", "AzureCognitiveSearch")
-SEARCH_TOP_K = os.environ.get("SEARCH_TOP_K", 5)
-SEARCH_STRICTNESS = os.environ.get("SEARCH_STRICTNESS", 3)
-SEARCH_ENABLE_IN_DOMAIN = os.environ.get("SEARCH_ENABLE_IN_DOMAIN", "true")
# ACS Integration Settings
AZURE_SEARCH_SERVICE = os.environ.get("AZURE_SEARCH_SERVICE")
@@ -137,9 +129,9 @@ async def assets(path):
AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG = os.environ.get(
"AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG", "default"
)
-AZURE_SEARCH_TOP_K = os.environ.get("AZURE_SEARCH_TOP_K", SEARCH_TOP_K)
+AZURE_SEARCH_TOP_K = os.environ.get("AZURE_SEARCH_TOP_K", 5)
AZURE_SEARCH_ENABLE_IN_DOMAIN = os.environ.get(
- "AZURE_SEARCH_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
+ "AZURE_SEARCH_ENABLE_IN_DOMAIN", "true"
)
AZURE_SEARCH_CONTENT_COLUMNS = os.environ.get("AZURE_SEARCH_CONTENT_COLUMNS")
AZURE_SEARCH_FILENAME_COLUMN = os.environ.get("AZURE_SEARCH_FILENAME_COLUMN")
@@ -150,7 +142,7 @@ async def assets(path):
AZURE_SEARCH_PERMITTED_GROUPS_COLUMN = os.environ.get(
"AZURE_SEARCH_PERMITTED_GROUPS_COLUMN"
)
-AZURE_SEARCH_STRICTNESS = os.environ.get("AZURE_SEARCH_STRICTNESS", SEARCH_STRICTNESS)
+AZURE_SEARCH_STRICTNESS = os.environ.get("AZURE_SEARCH_STRICTNESS", 3)
# AOAI Integration Settings
AZURE_OPENAI_RESOURCE = os.environ.get("AZURE_OPENAI_RESOURCE")
@@ -170,49 +162,10 @@ async def assets(path):
MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION,
)
AZURE_OPENAI_STREAM = os.environ.get("AZURE_OPENAI_STREAM", "true")
-AZURE_OPENAI_MODEL_NAME = os.environ.get(
- "AZURE_OPENAI_MODEL_NAME", "gpt-35-turbo-16k"
-) # Name of the model, e.g. 'gpt-35-turbo-16k' or 'gpt-4'
AZURE_OPENAI_EMBEDDING_ENDPOINT = os.environ.get("AZURE_OPENAI_EMBEDDING_ENDPOINT")
AZURE_OPENAI_EMBEDDING_KEY = os.environ.get("AZURE_OPENAI_EMBEDDING_KEY")
AZURE_OPENAI_EMBEDDING_NAME = os.environ.get("AZURE_OPENAI_EMBEDDING_NAME", "")
-# CosmosDB Mongo vcore vector db Settings
-AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING"
-) # This has to be secure string
-AZURE_COSMOSDB_MONGO_VCORE_DATABASE = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_DATABASE"
-)
-AZURE_COSMOSDB_MONGO_VCORE_CONTAINER = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_CONTAINER"
-)
-AZURE_COSMOSDB_MONGO_VCORE_INDEX = os.environ.get("AZURE_COSMOSDB_MONGO_VCORE_INDEX")
-AZURE_COSMOSDB_MONGO_VCORE_TOP_K = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_TOP_K", AZURE_SEARCH_TOP_K
-)
-AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS", AZURE_SEARCH_STRICTNESS
-)
-AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN", AZURE_SEARCH_ENABLE_IN_DOMAIN
-)
-AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS", ""
-)
-AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN"
-)
-AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN"
-)
-AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN"
-)
-AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS"
-)
-
SHOULD_STREAM = True if AZURE_OPENAI_STREAM.lower() == "true" else False
# Chat History CosmosDB Integration Settings
@@ -225,78 +178,7 @@ async def assets(path):
AZURE_COSMOSDB_ENABLE_FEEDBACK = (
os.environ.get("AZURE_COSMOSDB_ENABLE_FEEDBACK", "false").lower() == "true"
)
-
-# Elasticsearch Integration Settings
-ELASTICSEARCH_ENDPOINT = os.environ.get("ELASTICSEARCH_ENDPOINT")
-ELASTICSEARCH_ENCODED_API_KEY = os.environ.get("ELASTICSEARCH_ENCODED_API_KEY")
-ELASTICSEARCH_INDEX = os.environ.get("ELASTICSEARCH_INDEX")
-ELASTICSEARCH_QUERY_TYPE = os.environ.get("ELASTICSEARCH_QUERY_TYPE", "simple")
-ELASTICSEARCH_TOP_K = os.environ.get("ELASTICSEARCH_TOP_K", SEARCH_TOP_K)
-ELASTICSEARCH_ENABLE_IN_DOMAIN = os.environ.get(
- "ELASTICSEARCH_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
-)
-ELASTICSEARCH_CONTENT_COLUMNS = os.environ.get("ELASTICSEARCH_CONTENT_COLUMNS")
-ELASTICSEARCH_FILENAME_COLUMN = os.environ.get("ELASTICSEARCH_FILENAME_COLUMN")
-ELASTICSEARCH_TITLE_COLUMN = os.environ.get("ELASTICSEARCH_TITLE_COLUMN")
-ELASTICSEARCH_URL_COLUMN = os.environ.get("ELASTICSEARCH_URL_COLUMN")
-ELASTICSEARCH_VECTOR_COLUMNS = os.environ.get("ELASTICSEARCH_VECTOR_COLUMNS")
-ELASTICSEARCH_STRICTNESS = os.environ.get("ELASTICSEARCH_STRICTNESS", SEARCH_STRICTNESS)
-ELASTICSEARCH_EMBEDDING_MODEL_ID = os.environ.get("ELASTICSEARCH_EMBEDDING_MODEL_ID")
-
-# Pinecone Integration Settings
-PINECONE_ENVIRONMENT = os.environ.get("PINECONE_ENVIRONMENT")
-PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
-PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX_NAME")
-PINECONE_TOP_K = os.environ.get("PINECONE_TOP_K", SEARCH_TOP_K)
-PINECONE_STRICTNESS = os.environ.get("PINECONE_STRICTNESS", SEARCH_STRICTNESS)
-PINECONE_ENABLE_IN_DOMAIN = os.environ.get(
- "PINECONE_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
-)
-PINECONE_CONTENT_COLUMNS = os.environ.get("PINECONE_CONTENT_COLUMNS", "")
-PINECONE_FILENAME_COLUMN = os.environ.get("PINECONE_FILENAME_COLUMN")
-PINECONE_TITLE_COLUMN = os.environ.get("PINECONE_TITLE_COLUMN")
-PINECONE_URL_COLUMN = os.environ.get("PINECONE_URL_COLUMN")
-PINECONE_VECTOR_COLUMNS = os.environ.get("PINECONE_VECTOR_COLUMNS")
-
-# Azure AI MLIndex Integration Settings - for use with MLIndex data assets created in Azure AI Studio
-AZURE_MLINDEX_NAME = os.environ.get("AZURE_MLINDEX_NAME")
-AZURE_MLINDEX_VERSION = os.environ.get("AZURE_MLINDEX_VERSION")
-AZURE_ML_PROJECT_RESOURCE_ID = os.environ.get(
- "AZURE_ML_PROJECT_RESOURCE_ID"
-) # /subscriptions/{sub ID}/resourceGroups/{rg name}/providers/Microsoft.MachineLearningServices/workspaces/{AML project name}
-AZURE_MLINDEX_TOP_K = os.environ.get("AZURE_MLINDEX_TOP_K", SEARCH_TOP_K)
-AZURE_MLINDEX_STRICTNESS = os.environ.get("AZURE_MLINDEX_STRICTNESS", SEARCH_STRICTNESS)
-AZURE_MLINDEX_ENABLE_IN_DOMAIN = os.environ.get(
- "AZURE_MLINDEX_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
-)
-AZURE_MLINDEX_CONTENT_COLUMNS = os.environ.get("AZURE_MLINDEX_CONTENT_COLUMNS", "")
-AZURE_MLINDEX_FILENAME_COLUMN = os.environ.get("AZURE_MLINDEX_FILENAME_COLUMN")
-AZURE_MLINDEX_TITLE_COLUMN = os.environ.get("AZURE_MLINDEX_TITLE_COLUMN")
-AZURE_MLINDEX_URL_COLUMN = os.environ.get("AZURE_MLINDEX_URL_COLUMN")
-AZURE_MLINDEX_VECTOR_COLUMNS = os.environ.get("AZURE_MLINDEX_VECTOR_COLUMNS")
-AZURE_MLINDEX_QUERY_TYPE = os.environ.get("AZURE_MLINDEX_QUERY_TYPE")
-# Promptflow Integration Settings
-USE_PROMPTFLOW = os.environ.get("USE_PROMPTFLOW", "false").lower() == "true"
-PROMPTFLOW_ENDPOINT = os.environ.get("PROMPTFLOW_ENDPOINT")
-PROMPTFLOW_API_KEY = os.environ.get("PROMPTFLOW_API_KEY")
-PROMPTFLOW_RESPONSE_TIMEOUT = os.environ.get("PROMPTFLOW_RESPONSE_TIMEOUT", 30.0)
-# default request and response field names are input -> 'query' and output -> 'reply'
-PROMPTFLOW_REQUEST_FIELD_NAME = os.environ.get("PROMPTFLOW_REQUEST_FIELD_NAME", "query")
-PROMPTFLOW_RESPONSE_FIELD_NAME = os.environ.get(
- "PROMPTFLOW_RESPONSE_FIELD_NAME", "reply"
-)
-PROMPTFLOW_CITATIONS_FIELD_NAME = os.environ.get(
- "PROMPTFLOW_CITATIONS_FIELD_NAME", "documents"
-)
USE_INTERNAL_STREAM = os.environ.get("USE_INTERNAL_STREAM", "false").lower() == "true"
-FUNCTIONAPP_RESPONSE_FIELD_NAME = os.environ.get(
- "FUNCTIONAPP_RESPONSE_FIELD_NAME", "reply"
-)
-FUNCTIONAPP_CITATIONS_FIELD_NAME = os.environ.get(
- "FUNCTIONAPP_CITATIONS_FIELD_NAME", "documents"
-)
-AZUREFUNCTION_ENDPOINT = os.environ.get("AZUREFUNCTION_ENDPOINT")
-STREAMING_AZUREFUNCTION_ENDPOINT = os.environ.get("STREAMING_AZUREFUNCTION_ENDPOINT")
# Frontend Settings via Environment Variables
AUTH_ENABLED = os.environ.get("AUTH_ENABLED", "true").lower() == "true"
CHAT_HISTORY_ENABLED = (
@@ -321,7 +203,7 @@ async def assets(path):
# Enable Microsoft Defender for Cloud Integration
MS_DEFENDER_ENABLED = os.environ.get("MS_DEFENDER_ENABLED", "false").lower() == "true"
-VITE_POWERBI_EMBED_URL = os.environ.get("VITE_POWERBI_EMBED_URL")
+# VITE_POWERBI_EMBED_URL = os.environ.get("VITE_POWERBI_EMBED_URL")
def should_use_data():
@@ -331,31 +213,6 @@ def should_use_data():
logging.debug("Using Azure Cognitive Search")
return True
- if (
- AZURE_COSMOSDB_MONGO_VCORE_DATABASE
- and AZURE_COSMOSDB_MONGO_VCORE_CONTAINER
- and AZURE_COSMOSDB_MONGO_VCORE_INDEX
- and AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING
- ):
- DATASOURCE_TYPE = "AzureCosmosDB"
- logging.debug("Using Azure CosmosDB Mongo vcore")
- return True
-
- if ELASTICSEARCH_ENDPOINT and ELASTICSEARCH_ENCODED_API_KEY and ELASTICSEARCH_INDEX:
- DATASOURCE_TYPE = "Elasticsearch"
- logging.debug("Using Elasticsearch")
- return True
-
- if PINECONE_ENVIRONMENT and PINECONE_API_KEY and PINECONE_INDEX_NAME:
- DATASOURCE_TYPE = "Pinecone"
- logging.debug("Using Pinecone")
- return True
-
- if AZURE_MLINDEX_NAME and AZURE_MLINDEX_VERSION and AZURE_ML_PROJECT_RESOURCE_ID:
- DATASOURCE_TYPE = "AzureMLIndex"
- logging.debug("Using Azure ML Index")
- return True
-
return False
@@ -544,7 +401,7 @@ def get_configured_data_source():
True if AZURE_SEARCH_ENABLE_IN_DOMAIN.lower() == "true" else False
),
"top_n_documents": (
- int(AZURE_SEARCH_TOP_K) if AZURE_SEARCH_TOP_K else int(SEARCH_TOP_K)
+ int(AZURE_SEARCH_TOP_K)
),
"query_type": query_type,
"semantic_configuration": (
@@ -556,224 +413,7 @@ def get_configured_data_source():
"filter": filter,
"strictness": (
int(AZURE_SEARCH_STRICTNESS)
- if AZURE_SEARCH_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- },
- }
- elif DATASOURCE_TYPE == "AzureCosmosDB":
- query_type = "vector"
- track_event_if_configured("datasource_selected", {"type": "AzureCosmosDB"})
-
- data_source = {
- "type": "azure_cosmos_db",
- "parameters": {
- "authentication": {
- "type": "connection_string",
- "connection_string": AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING,
- },
- "index_name": AZURE_COSMOSDB_MONGO_VCORE_INDEX,
- "database_name": AZURE_COSMOSDB_MONGO_VCORE_DATABASE,
- "container_name": AZURE_COSMOSDB_MONGO_VCORE_CONTAINER,
- "fields_mapping": {
- "content_fields": (
- parse_multi_columns(AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS)
- if AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN
- if AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN
- else None
- ),
- "url_field": (
- AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN
- if AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN
- else None
- ),
- "filepath_field": (
- AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN
- if AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN
- else None
- ),
- "vector_fields": (
- parse_multi_columns(AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS)
- if AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True
- if AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN.lower() == "true"
- else False
),
- "top_n_documents": (
- int(AZURE_COSMOSDB_MONGO_VCORE_TOP_K)
- if AZURE_COSMOSDB_MONGO_VCORE_TOP_K
- else int(SEARCH_TOP_K)
- ),
- "strictness": (
- int(AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS)
- if AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
- },
- }
- elif DATASOURCE_TYPE == "Elasticsearch":
- track_event_if_configured("datasource_selected", {"type": "Elasticsearch"})
- if ELASTICSEARCH_QUERY_TYPE:
- query_type = ELASTICSEARCH_QUERY_TYPE
- track_event_if_configured("query_type_determined", {"query_type": query_type})
-
- data_source = {
- "type": "elasticsearch",
- "parameters": {
- "endpoint": ELASTICSEARCH_ENDPOINT,
- "authentication": {
- "type": "encoded_api_key",
- "encoded_api_key": ELASTICSEARCH_ENCODED_API_KEY,
- },
- "index_name": ELASTICSEARCH_INDEX,
- "fields_mapping": {
- "content_fields": (
- parse_multi_columns(ELASTICSEARCH_CONTENT_COLUMNS)
- if ELASTICSEARCH_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- ELASTICSEARCH_TITLE_COLUMN
- if ELASTICSEARCH_TITLE_COLUMN
- else None
- ),
- "url_field": (
- ELASTICSEARCH_URL_COLUMN if ELASTICSEARCH_URL_COLUMN else None
- ),
- "filepath_field": (
- ELASTICSEARCH_FILENAME_COLUMN
- if ELASTICSEARCH_FILENAME_COLUMN
- else None
- ),
- "vector_fields": (
- parse_multi_columns(ELASTICSEARCH_VECTOR_COLUMNS)
- if ELASTICSEARCH_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True if ELASTICSEARCH_ENABLE_IN_DOMAIN.lower() == "true" else False
- ),
- "top_n_documents": (
- int(ELASTICSEARCH_TOP_K)
- if ELASTICSEARCH_TOP_K
- else int(SEARCH_TOP_K)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
- "strictness": (
- int(ELASTICSEARCH_STRICTNESS)
- if ELASTICSEARCH_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- },
- }
- elif DATASOURCE_TYPE == "AzureMLIndex":
- track_event_if_configured("datasource_selected", {"type": "AzureMLIndex"})
- if AZURE_MLINDEX_QUERY_TYPE:
- query_type = AZURE_MLINDEX_QUERY_TYPE
- track_event_if_configured("query_type_determined", {"query_type": query_type})
-
- data_source = {
- "type": "azure_ml_index",
- "parameters": {
- "name": AZURE_MLINDEX_NAME,
- "version": AZURE_MLINDEX_VERSION,
- "project_resource_id": AZURE_ML_PROJECT_RESOURCE_ID,
- "fieldsMapping": {
- "content_fields": (
- parse_multi_columns(AZURE_MLINDEX_CONTENT_COLUMNS)
- if AZURE_MLINDEX_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- AZURE_MLINDEX_TITLE_COLUMN
- if AZURE_MLINDEX_TITLE_COLUMN
- else None
- ),
- "url_field": (
- AZURE_MLINDEX_URL_COLUMN if AZURE_MLINDEX_URL_COLUMN else None
- ),
- "filepath_field": (
- AZURE_MLINDEX_FILENAME_COLUMN
- if AZURE_MLINDEX_FILENAME_COLUMN
- else None
- ),
- "vector_fields": (
- parse_multi_columns(AZURE_MLINDEX_VECTOR_COLUMNS)
- if AZURE_MLINDEX_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True if AZURE_MLINDEX_ENABLE_IN_DOMAIN.lower() == "true" else False
- ),
- "top_n_documents": (
- int(AZURE_MLINDEX_TOP_K)
- if AZURE_MLINDEX_TOP_K
- else int(SEARCH_TOP_K)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
- "strictness": (
- int(AZURE_MLINDEX_STRICTNESS)
- if AZURE_MLINDEX_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- },
- }
- elif DATASOURCE_TYPE == "Pinecone":
- query_type = "vector"
- track_event_if_configured("datasource_selected", {"type": "Pinecone"})
-
- data_source = {
- "type": "pinecone",
- "parameters": {
- "environment": PINECONE_ENVIRONMENT,
- "authentication": {"type": "api_key", "key": PINECONE_API_KEY},
- "index_name": PINECONE_INDEX_NAME,
- "fields_mapping": {
- "content_fields": (
- parse_multi_columns(PINECONE_CONTENT_COLUMNS)
- if PINECONE_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- PINECONE_TITLE_COLUMN if PINECONE_TITLE_COLUMN else None
- ),
- "url_field": PINECONE_URL_COLUMN if PINECONE_URL_COLUMN else None,
- "filepath_field": (
- PINECONE_FILENAME_COLUMN if PINECONE_FILENAME_COLUMN else None
- ),
- "vector_fields": (
- parse_multi_columns(PINECONE_VECTOR_COLUMNS)
- if PINECONE_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True if PINECONE_ENABLE_IN_DOMAIN.lower() == "true" else False
- ),
- "top_n_documents": (
- int(PINECONE_TOP_K) if PINECONE_TOP_K else int(SEARCH_TOP_K)
- ),
- "strictness": (
- int(PINECONE_STRICTNESS)
- if PINECONE_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
},
}
else:
@@ -798,11 +438,6 @@ def get_configured_data_source():
"key": AZURE_OPENAI_EMBEDDING_KEY,
},
}
- elif DATASOURCE_TYPE == "Elasticsearch" and ELASTICSEARCH_EMBEDDING_MODEL_ID:
- embeddingDependency = {
- "type": "model_id",
- "model_id": ELASTICSEARCH_EMBEDDING_MODEL_ID,
- }
else:
track_event_if_configured("embedding_dependency_missing", {
"datasource_type": DATASOURCE_TYPE,
@@ -909,45 +544,6 @@ def prepare_model_args(request_body, request_headers):
return model_args
-async def promptflow_request(request):
- track_event_if_configured("promptflow_request_start", {})
- try:
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {PROMPTFLOW_API_KEY}",
- }
- # Adding timeout for scenarios where response takes longer to come back
- logging.debug(f"Setting timeout to {PROMPTFLOW_RESPONSE_TIMEOUT}")
- async with httpx.AsyncClient(
- timeout=float(PROMPTFLOW_RESPONSE_TIMEOUT)
- ) as client:
- pf_formatted_obj = convert_to_pf_format(
- request, PROMPTFLOW_REQUEST_FIELD_NAME, PROMPTFLOW_RESPONSE_FIELD_NAME
- )
- # NOTE: This only support question and chat_history parameters
- # If you need to add more parameters, you need to modify the request body
- response = await client.post(
- PROMPTFLOW_ENDPOINT,
- json={
- f"{PROMPTFLOW_REQUEST_FIELD_NAME}": pf_formatted_obj[-1]["inputs"][
- PROMPTFLOW_REQUEST_FIELD_NAME
- ],
- "chat_history": pf_formatted_obj[:-1],
- },
- headers=headers,
- )
- resp = response.json()
- resp["id"] = request["messages"][-1]["id"]
- track_event_if_configured("promptflow_request_success", {})
- return resp
- except Exception as e:
- span = trace.get_current_span()
- if span is not None:
- span.record_exception(e)
- span.set_status(Status(StatusCode.ERROR, str(e)))
- logging.error(f"An error occurred while making promptflow_request: {e}")
-
-
async def send_chat_request(request_body, request_headers):
track_event_if_configured("send_chat_request_start", {})
filtered_messages = []
@@ -980,90 +576,10 @@ async def send_chat_request(request_body, request_headers):
return response, apim_request_id
-async def complete_chat_request(request_body, request_headers):
- track_event_if_configured("complete_chat_request_start", {})
- if USE_PROMPTFLOW and PROMPTFLOW_ENDPOINT and PROMPTFLOW_API_KEY:
- response = await promptflow_request(request_body)
- history_metadata = request_body.get("history_metadata", {})
- return format_pf_non_streaming_response(
- response,
- history_metadata,
- PROMPTFLOW_RESPONSE_FIELD_NAME,
- PROMPTFLOW_CITATIONS_FIELD_NAME,
- )
- elif USE_INTERNAL_STREAM:
- track_event_if_configured("internal_stream_selected", {})
- request_body = await request.get_json()
- client_id = request_body.get("client_id")
- print(request_body)
-
- if client_id is None:
- return jsonify({"error": "No client ID provided"}), 400
- # client_id = '10005'
- print("Client ID in complete_chat_request: ", client_id)
- # answer = "Sample response from Azure Function"
- # Construct the URL of your Azure Function endpoint
- # function_url = STREAMING_AZUREFUNCTION_ENDPOINT
- # request_headers = {
- # "Content-Type": "application/json",
- # # 'Authorization': 'Bearer YOUR_TOKEN_HERE' # if applicable
- # }
- # print(request_body.get("messages")[-1].get("content"))
- # print(request_body)
-
- query = request_body.get("messages")[-1].get("content")
-
- print("Selected ClientId:", client_id)
- # print("Selected ClientName:", selected_client_name)
-
- # endpoint = STREAMING_AZUREFUNCTION_ENDPOINT + '?query=' + query + ' - for Client ' + selected_client_name + ':::' + selected_client_id
- endpoint = (
- STREAMING_AZUREFUNCTION_ENDPOINT + "?query=" + query + ":::" + client_id
- )
-
- print("Endpoint: ", endpoint)
- query_response = ""
- try:
- with requests.get(endpoint, stream=True) as r:
- for line in r.iter_lines(chunk_size=10):
- # query_response += line.decode('utf-8')
- query_response = query_response + "\n" + line.decode("utf-8")
- # print(line.decode('utf-8'))
- except Exception as e:
- print(format_as_ndjson({"error" + str(e)}))
-
- # print("query_response: " + query_response)
-
- history_metadata = request_body.get("history_metadata", {})
- response = {
- "id": "",
- "model": "",
- "created": 0,
- "object": "",
- "choices": [{"messages": []}],
- "apim-request-id": "",
- "history_metadata": history_metadata,
- }
-
- response["id"] = str(uuid.uuid4())
- response["model"] = AZURE_OPENAI_MODEL_NAME
- response["created"] = int(time.time())
- response["object"] = "extensions.chat.completion.chunk"
- # response["apim-request-id"] = headers.get("apim-request-id")
- response["choices"][0]["messages"].append(
- {"role": "assistant", "content": query_response}
- )
-
- track_event_if_configured("complete_chat_request_success", {"client_id": client_id})
-
- return response
-
-
async def stream_chat_request(request_body, request_headers):
track_event_if_configured("stream_chat_request_start", {})
if USE_INTERNAL_STREAM:
history_metadata = request_body.get("history_metadata", {})
- # function_url = STREAMING_AZUREFUNCTION_ENDPOINT
apim_request_id = ""
client_id = request_body.get("client_id")
@@ -1085,7 +601,7 @@ async def generate():
completionChunk = {
"id": chunk_id,
- "model": AZURE_OPENAI_MODEL_NAME,
+ "model": AZURE_OPENAI_MODEL,
"created": created_time,
"object": "extensions.chat.completion.chunk",
"choices": [
@@ -1131,7 +647,6 @@ async def generate():
async def conversation_internal(request_body, request_headers):
track_event_if_configured("conversation_internal_start", {
"streaming": SHOULD_STREAM,
- "promptflow": USE_PROMPTFLOW,
"internal_stream": USE_INTERNAL_STREAM
})
try:
@@ -1141,10 +656,6 @@ async def conversation_internal(request_body, request_headers):
# response.timeout = None
# response.mimetype = "application/json-lines"
# return response
- else:
- result = await complete_chat_request(request_body, request_headers)
- track_event_if_configured("conversation_internal_success", {})
- return jsonify(result)
except Exception as ex:
span = trace.get_current_span()
diff --git a/src/App/backend/chat_logic_handler.py b/src/App/backend/chat_logic_handler.py
index f848a011f..8d04a2384 100644
--- a/src/App/backend/chat_logic_handler.py
+++ b/src/App/backend/chat_logic_handler.py
@@ -17,10 +17,10 @@
# --------------------------
endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
api_key = os.environ.get("AZURE_OPENAI_KEY")
-api_version = os.environ.get("OPENAI_API_VERSION")
+api_version = os.environ.get("AZURE_OPENAI_PREVIEW_API_VERSION")
deployment = os.environ.get("AZURE_OPENAI_MODEL")
search_endpoint = os.environ.get("AZURE_AI_SEARCH_ENDPOINT")
-search_key = os.environ.get("AZURE_AI_SEARCH_API_KEY")
+search_key = os.environ.get("AZURE_SEARCH_KEY")
project_connection_string = os.environ.get("AZURE_AI_PROJECT_CONN_STRING")
use_ai_project_client = os.environ.get("USE_AI_PROJECT_CLIENT", "false").lower() == "true"
diff --git a/src/App/tests/backend/test_utils.py b/src/App/tests/backend/test_utils.py
index 1585cd7fb..cf6c293e3 100644
--- a/src/App/tests/backend/test_utils.py
+++ b/src/App/tests/backend/test_utils.py
@@ -37,7 +37,7 @@ def test_parse_multi_columns(input_str, expected):
assert parse_multi_columns(input_str) == expected
-@patch("app.requests.get")
+@patch("backend.utils.requests.get")
def test_fetch_user_groups(mock_get):
mock_response = MagicMock()
mock_response.status_code = 200
diff --git a/src/App/tests/test_app.py b/src/App/tests/test_app.py
index bf82ccf3a..ff0ef42c2 100644
--- a/src/App/tests/test_app.py
+++ b/src/App/tests/test_app.py
@@ -1218,15 +1218,12 @@ async def test_conversation_route(client):
with patch("app.stream_chat_request", new_callable=AsyncMock) as mock_stream:
mock_stream.return_value = ["chunk1", "chunk2"]
- with patch(
- "app.complete_chat_request", new_callable=AsyncMock
- ) as mock_complete:
- mock_complete.return_value = {"response": "test response"}
- response = await client.post(
- "/conversation", json=request_body, headers=request_headers
- )
- assert response.status_code == 200
+ response = await client.post(
+ "/conversation", json=request_body, headers=request_headers
+ )
+
+ assert response.status_code == 200
@pytest.mark.asyncio
From 7ae9718f9b5d92d1dcfbcb5288cb03fd20c65cdb Mon Sep 17 00:00:00 2001
From: Vamshi-Microsoft
Date: Tue, 3 Jun 2025 06:55:11 +0000
Subject: [PATCH 11/19] EXP environment changes for Existing Fabric workspace
---
docs/FabricDeployment.md | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/docs/FabricDeployment.md b/docs/FabricDeployment.md
index 5e95f4b5f..232a2fd89 100644
--- a/docs/FabricDeployment.md
+++ b/docs/FabricDeployment.md
@@ -1,5 +1,8 @@
## Fabric Deployment
## Step 1: Create Fabric workspace
+
+ℹ️ Note: If you already have an existing Microsoft Fabric Workspace, you can **skip this step** and proceed to Step 2. To retrieve an existing Workspace ID, check **Point 5 below**.
+
1. Navigate to ([Fabric Workspace](https://app.fabric.microsoft.com/))
2. Click on Workspaces from left Navigation
3. Click on + New Workspace
@@ -19,7 +22,7 @@
- ```cd ./Build-your-own-copilot-Solution-Accelerator/infra/scripts/fabric_scripts```
- ```sh ./run_fabric_items_scripts.sh keyvault_param workspaceid_param solutionprefix_param```
1. keyvault_param - the name of the keyvault that was created in Step 1
- 2. workspaceid_param - the workspaceid created in Step 2
+ 2. workspaceid_param - Existing Workspaceid or workspaceid created in Step 2
3. solutionprefix_param - prefix used to append to lakehouse upon creation
4. Get Fabric Lakehouse connection details:
5. Once deployment is complete, navigate to Fabric Workspace
From 17f48fa982ceaeae1698a29c0f4fb5d73887d15f Mon Sep 17 00:00:00 2001
From: Vamshi-Microsoft
Date: Tue, 3 Jun 2025 15:24:44 +0530
Subject: [PATCH 12/19] Updated Heading
---
docs/FabricDeployment.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/FabricDeployment.md b/docs/FabricDeployment.md
index 232a2fd89..4d85b2d82 100644
--- a/docs/FabricDeployment.md
+++ b/docs/FabricDeployment.md
@@ -1,5 +1,5 @@
## Fabric Deployment
-## Step 1: Create Fabric workspace
+## Step 1: Create or Use an Existing Microsoft Fabric Workspace
ℹ️ Note: If you already have an existing Microsoft Fabric Workspace, you can **skip this step** and proceed to Step 2. To retrieve an existing Workspace ID, check **Point 5 below**.
From 562533d556c7e9c5b932aae218a808e4feb6a1b3 Mon Sep 17 00:00:00 2001
From: Vamshi-Microsoft
Date: Wed, 4 Jun 2025 07:06:03 +0000
Subject: [PATCH 13/19] To reuse Log Analytics across subscriptions
---
docs/CustomizingAzdParameters.md | 2 +-
infra/deploy_ai_foundry.bicep | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/docs/CustomizingAzdParameters.md b/docs/CustomizingAzdParameters.md
index fc02f6d17..d2ec2fef9 100644
--- a/docs/CustomizingAzdParameters.md
+++ b/docs/CustomizingAzdParameters.md
@@ -44,5 +44,5 @@ azd env set AZURE_ENV_EMBEDDING_MODEL_CAPACITY 80
Set the Log Analytics Workspace Id if you need to reuse the existing workspace which is already existing
```shell
-azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID ''
+azd env set AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID '/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/'
```
\ No newline at end of file
diff --git a/infra/deploy_ai_foundry.bicep b/infra/deploy_ai_foundry.bicep
index ef2e81fc7..4ba89548e 100644
--- a/infra/deploy_ai_foundry.bicep
+++ b/infra/deploy_ai_foundry.bicep
@@ -56,12 +56,13 @@ resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = {
}
var useExisting = !empty(existingLogAnalyticsWorkspaceId)
+var existingLawSubscription = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[2] : ''
var existingLawResourceGroup = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[4] : ''
var existingLawName = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[8] : ''
resource existingLogAnalyticsWorkspace 'Microsoft.OperationalInsights/workspaces@2023-09-01' existing = if (useExisting) {
name: existingLawName
- scope: resourceGroup(existingLawResourceGroup)
+ scope: resourceGroup(existingLawSubscription, existingLawResourceGroup)
}
resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2023-09-01' = if (!useExisting) {
From 1a996f1f2688cf158c2e7becbfead199d2cc901c Mon Sep 17 00:00:00 2001
From: Venkateswarlu Marthula
Date: Fri, 6 Jun 2025 07:01:16 +0530
Subject: [PATCH 14/19] automate
---
.github/workflows/test_automation.yml | 111 ++++++++++++
tests/e2e-test/.gitignore | 166 ++++++++++++++++++
tests/e2e-test/README.md | 41 +++++
tests/e2e-test/base/__init__.py | 1 +
tests/e2e-test/base/base.py | 140 +++++++++++++++
tests/e2e-test/config/constants.py | 21 +++
tests/e2e-test/img.png | Bin 0 -> 85099 bytes
tests/e2e-test/img_1.png | Bin 0 -> 62274 bytes
tests/e2e-test/pages/__init__.py | 2 +
tests/e2e-test/pages/homePage.py | 83 +++++++++
tests/e2e-test/pages/loginPage.py | 43 +++++
tests/e2e-test/requirements.txt | 3 +
tests/e2e-test/sample_dotenv_file.txt | 6 +
tests/e2e-test/tests/__init__.py | 0
tests/e2e-test/tests/conftest.py | 59 +++++++
.../tests/test_poc_byoc_client_advisor.py | 141 +++++++++++++++
16 files changed, 817 insertions(+)
create mode 100644 .github/workflows/test_automation.yml
create mode 100644 tests/e2e-test/.gitignore
create mode 100644 tests/e2e-test/README.md
create mode 100644 tests/e2e-test/base/__init__.py
create mode 100644 tests/e2e-test/base/base.py
create mode 100644 tests/e2e-test/config/constants.py
create mode 100644 tests/e2e-test/img.png
create mode 100644 tests/e2e-test/img_1.png
create mode 100644 tests/e2e-test/pages/__init__.py
create mode 100644 tests/e2e-test/pages/homePage.py
create mode 100644 tests/e2e-test/pages/loginPage.py
create mode 100644 tests/e2e-test/requirements.txt
create mode 100644 tests/e2e-test/sample_dotenv_file.txt
create mode 100644 tests/e2e-test/tests/__init__.py
create mode 100644 tests/e2e-test/tests/conftest.py
create mode 100644 tests/e2e-test/tests/test_poc_byoc_client_advisor.py
diff --git a/.github/workflows/test_automation.yml b/.github/workflows/test_automation.yml
new file mode 100644
index 000000000..f49beee8d
--- /dev/null
+++ b/.github/workflows/test_automation.yml
@@ -0,0 +1,111 @@
+name: Test Automation ClientAdvisor
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ - VE-Automate
+ # paths:
+ # - 'byoc-client-advisor/**'
+ schedule:
+ - cron: '0 13 * * *' # Runs at 1 PM UTC
+ workflow_dispatch:
+
+env:
+ url: ${{ vars.CLIENT_ADVISOR_URL }}
+ accelerator_name: "Client Advisor"
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.13'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r byoc-client-advisor/requirements.txt
+
+ - name: Ensure browsers are installed
+ run: python -m playwright install --with-deps chromium
+
+ - name: Run tests(1)
+ id: test1
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: byoc-client-advisor
+ continue-on-error: true
+
+ - name: Sleep for 30 seconds
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: sleep 30s
+ shell: bash
+
+ - name: Run tests(2)
+ if: ${{ steps.test1.outcome == 'failure' }}
+ id: test2
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: byoc-client-advisor
+ continue-on-error: true
+
+ - name: Sleep for 60 seconds
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: sleep 60s
+ shell: bash
+
+ - name: Run tests(3)
+ if: ${{ steps.test2.outcome == 'failure' }}
+ id: test3
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: byoc-client-advisor
+
+ - name: Upload test report
+ id: upload_report
+ uses: actions/upload-artifact@v4
+ if: ${{ !cancelled() }}
+ with:
+ name: test-report
+ path: byoc-client-advisor/report/*
+
+ - name: Send Notification
+ if: always()
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ REPORT_URL=${{ steps.upload_report.outputs.artifact-url }}
+ IS_SUCCESS=${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
+ # Construct the email body
+ if [ "$IS_SUCCESS" = "true" ]; then
+ EMAIL_BODY=$(cat <Dear Team,
We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has completed successfully.
Run URL: ${RUN_URL}
+
Test Report: ${REPORT_URL}
Best regards,
+ Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Success"
+ }
+ EOF
+ )
+ else
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has encountered an issue and has failed to complete successfully.
Run URL: ${RUN_URL}
+ ${OUTPUT}
Test Report: ${REPORT_URL}
Please investigate the matter at your earliest convenience.
Best regards,
+ Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Failure"
+ }
+ EOF
+ )
+ fi
+
+ # Send the notification
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA}}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send notification"
diff --git a/tests/e2e-test/.gitignore b/tests/e2e-test/.gitignore
new file mode 100644
index 000000000..de16f2df0
--- /dev/null
+++ b/tests/e2e-test/.gitignore
@@ -0,0 +1,166 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+microsoft/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+.idea/
+archive/
+report/
+screenshots/
diff --git a/tests/e2e-test/README.md b/tests/e2e-test/README.md
new file mode 100644
index 000000000..453eb273a
--- /dev/null
+++ b/tests/e2e-test/README.md
@@ -0,0 +1,41 @@
+# Automation Proof Of Concept for BYOc Client Advisor Accelerator
+
+
+
+Write end-to-end tests for your web apps with [Playwright](https://github.com/microsoft/playwright-python) and [pytest](https://docs.pytest.org/en/stable/).
+
+- Support for **all modern browsers** including Chromium, WebKit and Firefox.
+- Support for **headless and headed** execution.
+- **Built-in fixtures** that provide browser primitives to test functions.
+
+Pre-Requisites:
+- Install Visual Studio Code: Download and Install Visual Studio Code(VSCode).
+- Install NodeJS: Download and Install Node JS
+
+Create and Activate Python Virtual Environment
+- From your directory open and run cmd : "python -m venv microsoft"
+This will create a virtual environment directory named microsoft inside your current directory
+- To enable virtual environment, copy location for "microsoft\Scripts\activate.bat" and run from cmd
+
+
+Installing Playwright Pytest from Virtual Environment
+- To install libraries run "pip install -r requirements.txt"
+- Install the required browsers "playwright install"
+
+Run test cases
+- To run test cases from your 'tests' folder : "pytest --headed --html=report/report.html"
+
+Steps need to be followed to enable Access Token and Client Credentials
+- Go to App Service from the resource group and select the Access Tokens check box in 'Manage->Authentication' tab
+
+- Go to Manage->Certificates & secrets tab to generate Client Secret value
+
+- Go to Overview tab to get the client id and tenant id.
+
+Create .env file in project root level with web app url and client credentials
+- create a .env file in project root level and add your user_name, pass_word, client_id,client_secret,
+ tenant_id and url for the resource group. please refer 'sample_dotenv_file.txt' file.
+
+## Documentation
+
+See on [playwright.dev](https://playwright.dev/python/docs/test-runners) for examples and more detailed information.
\ No newline at end of file
diff --git a/tests/e2e-test/base/__init__.py b/tests/e2e-test/base/__init__.py
new file mode 100644
index 000000000..cf50d1ccd
--- /dev/null
+++ b/tests/e2e-test/base/__init__.py
@@ -0,0 +1 @@
+from . import base
\ No newline at end of file
diff --git a/tests/e2e-test/base/base.py b/tests/e2e-test/base/base.py
new file mode 100644
index 000000000..e36c7534e
--- /dev/null
+++ b/tests/e2e-test/base/base.py
@@ -0,0 +1,140 @@
+from config.constants import *
+import requests
+import json
+from dotenv import load_dotenv
+import os
+import re
+from datetime import datetime
+import uuid
+
+
+class BasePage:
+ def __init__(self, page):
+ self.page = page
+
+ def scroll_into_view(self,locator,text):
+ elements = locator.all()
+ for element in elements:
+ client_e = element.text_content()
+ if client_e == text:
+ element.scroll_into_view_if_needed()
+ break
+
+ def select_an_element(self,locator,text):
+ elements = locator.all()
+ for element in elements:
+ clientele = element.text_content()
+ if clientele == text:
+ element.click()
+ break
+
+ def is_visible(self,locator):
+ locator.is_visible()
+
+ def validate_response_status(self):
+ load_dotenv()
+ # client_id = os.getenv('client_id')
+ # client_secret = os.getenv('client_secret')
+ # tenant_id = os.getenv('tenant_id')
+ # token_url = f"https://login.microsoft.com/{tenant_id}/oauth2/v2.0/token"
+ # The URL of the API endpoint you want to access
+ url = f"{URL}/history/update"
+
+ # Generate unique IDs for the messages
+ user_message_id = str(uuid.uuid4())
+ assistant_message_id = str(uuid.uuid4())
+ conversation_id = str(uuid.uuid4())
+
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "*/*"
+ }
+ payload = {
+ "conversation_id": conversation_id,
+ "messages": [
+ {
+ "id": user_message_id,
+ "role": "user",
+ "content":""
+ },
+ {
+ "id": assistant_message_id,
+ "role": "assistant",
+ "content":""
+ }
+ ]
+ }
+ # Make the POST request
+ response = self.page.request.post(url, headers=headers,data=json.dumps(payload))
+ # Check the response status code
+ assert response.status == 200, "response code is "+str(response.status)+" "+str(response.json())
+
+ # data = {
+ # 'grant_type': 'client_credentials',
+ # 'client_id': client_id,
+ # 'client_secret': client_secret,
+ # 'scope': f'api://{client_id}/.default'
+ # }
+ # response = requests.post(token_url, data=data)
+ # if response.status_code == 200:
+ # token_info = response.json()
+ # access_token = token_info['access_token']
+ # # Set the headers, including the access token
+ # headers = {
+ # "Content-Type": "application/json",
+ # "Authorization": f"Bearer {access_token}",
+ # "Accept": "*/*"
+ # }
+ # payload = {
+ # "conversation_id": conversation_id,
+ # "messages": [
+ # {
+ # "id": user_message_id,
+ # "role": "user",
+ # "content":""
+ # },
+ # {
+ # "id": assistant_message_id,
+ # "role": "assistant",
+ # "content":""
+ # }
+ # ]
+ # }
+ # # Make the POST request
+ # response = self.page.request.post(url, headers=headers,data=json.dumps(payload))
+ # # Check the response status code
+ # assert response.status == 200, "response code is "+str(response.status)+" "+str(response.json())
+ # else:
+ # assert response.status_code == 200,"Failed to get token "+response.text
+
+ def compare_raw_date_time(self,response_text,sidepanel_text):
+ # Extract date and time from response_text using regex
+ match = re.search(r"((\d{4}-\d{2}-\d{2}) from (\d{2}:\d{2}:\d{2}))|((\w+ \d{1,2}, \d{4}),? from (\d{2}:\d{2}))",response_text)
+ if match:
+ # check for YYYY-MM-DD format in response_text
+ if match.group(2) and match.group(3):
+ date1_str = match.group(2)
+ time1_str = match.group(3)
+ date_time1 = datetime.strptime(f"{date1_str} {time1_str}","%Y-%m-%d %H:%M:%S")
+
+ # check for 'Month DD, YYYY' format in response_text
+ elif match.group(5) and match.group(6):
+ date1_str = match.group(5)
+ time1_str = match.group(6)
+ date_time1 = datetime.strptime(f"{date1_str} {time1_str}", "%B %d, %Y %H:%M")
+
+ else:
+ raise ValueError("Date and time format not found in response_text: " + response_text)
+ # remove special chars in raw sidepanel_text
+ sidepanel_text_cleaned = re.sub(r"[\ue000-\uf8ff]", "",sidepanel_text)
+
+ # Extract date and time from sidepanel_text using regex
+ match2 = re.search(r"(\w+ \w+ \d{1,2}, \d{4})\s*(\d{2}:\d{2})",sidepanel_text_cleaned)
+ if match2:
+ date2_str = match2.group(1)
+ time2_str = match2.group(2)
+ date_time2 = datetime.strptime(f"{date2_str} {time2_str}", "%A %B %d, %Y %H:%M")
+ else:
+ raise ValueError("Date and time format not found in sidepanel_text: "+sidepanel_text)
+ # Compare the two datetime objects
+ assert date_time1 == date_time2
diff --git a/tests/e2e-test/config/constants.py b/tests/e2e-test/config/constants.py
new file mode 100644
index 000000000..004a55f5d
--- /dev/null
+++ b/tests/e2e-test/config/constants.py
@@ -0,0 +1,21 @@
+from dotenv import load_dotenv
+import os
+
+load_dotenv()
+URL = os.getenv('url')
+if URL.endswith('/'):
+ URL = URL[:-1]
+
+# HomePage input data
+homepage_title = "Woodgrove Bank"
+client_name = "Karen Berg"
+# next_meeting_question = "when is the next meeting scheduled with this client?"
+golden_path_question1 = "What were karen's concerns during our last meeting?"
+golden_path_question2 = "Did karen express any concerns over market fluctuation in prior meetings?"
+golden_path_question3 = "What type of asset does karen own ?"
+golden_path_question4 = "Show latest asset value by asset type?"
+golden_path_question5 = "How did equities asset value change in the last six months?"
+# golden_path_question6 = "Give summary of previous meetings?"
+golden_path_question7 = "Summarize Arun sharma previous meetings?"
+invalid_response = "No data found for that client."
+# invalid_response = "I cannot answer this question from the data available. Please rephrase or add more details."
diff --git a/tests/e2e-test/img.png b/tests/e2e-test/img.png
new file mode 100644
index 0000000000000000000000000000000000000000..c7c891ad7ad009184078ab6b559e82638884fea4
GIT binary patch
literal 85099
zcma%iXH*kw)U`@SFd!lUq$9l-si7)GkgkaIF4B7vnskuR1nEVjcLbz&kluR@HHIE~
z=zO`}`@ZkrZ>=oWFv-j_^UQh9*?XTo;Tmd+#Dr9Y_wL;zR#tkcb?+XQ#l3s?b@6dA
zdxnxD*Y4f>d{6nMtPa>{w;9jrU0(uHkEXHtXPy_UxS0ghy2hNz$_y&vVJ6}sZU(Js
z%-oqdg%=m0`>!4l5~7;XYG7340UV#Ri3~15z}yr>_5fQ+?(qWx(bU%CSUOz_P5kp9
zY;ox|+|06}R1wEN7f7*WG>D(5lkeXTM=-jB^MC(?iO8#B&!tP$J_p?dSj?KK;P7lw
z469iEc?&r|Fs;L<3NZy@KHd6#wht@y=jJMHdmbsPfVL8J^z8~O5IP@Ho-MW-YpJK<
zQ^@}}0+ga_^>71x%OiZEF8C3`!($M$?9(y$KcOzL0ZTbi*hGEc>5`d7asT6K8PEZ3mP1
zU2ZN<4_LClm<8-kK{mVkyQ?ntZw9{7raBaeaz?o4qh2q$;8z|ahwIVS)hAm^`iV=8
zo}r$Qkbr;y0qZ`CrSGy>xvoexmu;O7VAuLpN&ETg1Tt2&-o7~2T*qZ8j&Y-J+WBhA
zBeYAu79p=+zpi%pva@Wg5KU`&8v6Hnl)nmoScwOqQz@RBvCvg8bu;{_ZMGx=kLjvz
z^ao|S+}-*li#q>cW@awX{AE2tTMs?`jk`7wKu<00+XU@{)duR4TYz6j(Mi)9(P$s^
zZftBwUKj#z4BrBO?Id^$-c@p`55-Z2x^84ACvF@}{Wx%_YEqBlsc5CSMf)
z;Mce~_f{swC?#Lt7S$9X<>;kfwu!sTt;WBu+;4Jr->cnklnTk#%5!q
z-fV(D$F9vXP`m8St&+$yhCbR~YW$j1_aPe4g7ii?9sYP#X|t{X8g@O|)YPkU%6cXV
z))1%fJ=c-@Py}5XP7`B_NHv;8##ZXxmX$_ZoT0H+h3Q3S>)KA
z4zYp_m_RG-ju$yP_&DSia{AbM(|)!>wsyG*qD^cpM_uc%#KEKhi+Pj!V(T`DNiFro
zn@D+>h@``McjUz0&P=&(>o=er~2OM47{1ofj?AO6BSBclJeZ8p|l!Ex6fdd6l%xufM;mK=3Zp$UcpL2R8W*tt2FBb0STXrU2Z=NA#dvG803;Nx9
zZ%zITjGF0)n9TO_oNKn#b-ViU>Y;EoV8nDJ1H@bmIw+Jhj4}GL?H{9DBW8TBK
z#{TY^`wzNuQW~tQv%PPnys<4Jeb?qoREc;BFX_2b+eT?VX|n
zaE^0*Z=eF7Hk9^0n=Vq)a2hsx#PS$66i|52R%8kMZaxO*lc-8HB_H{J&Neb3nrmwV
z3c!zR>=!;Jw@tu{t<_H}XKsHcKzrvBWB$+wS>C=c&YLVa3?vi>&^F3q;cYWN{}(Wn
z%Lfw*?0)C>a_C#xoBQg-s6NkZrQ95(N}1#~5&qNK`opow248rnNKTF8a#`~MJ3G68
z$L{QA*L^!{1|d>MD!v*b&8JT-fZE_rzMOlV#8~{Ig?Lx=y>`#^rVy4ml!$w%{#&0b
z=HpL!WJj(#GF>T5ijM~7f7$+$++V6veBn5+^NBI6-|E%2A-O3?WGM!s=xRCNE;0TQ
zlIJasto%}G!nPh^;;mbpz&$CrxjeR`r#D`$Oh(K=OG3dqkjNdeFYVlcoh)G;)&RWFxTI$tT>Y}Ehm@=co^oR)`1zc}A@c3zBKvpCAag=_k8_17h
zNELVgRJe5{;;>k7wd!mRqw1+_&8$g*JC)hcRu&Bm5DKg$S;FYtEc>aEw-nSeu3{RzB(j5ubzC^y!AM~oMmE86@CdehqCBP_%>
zOh6$n$YV4(T8O+l*?$i5I?+&n%=kzF1}rnfzyE|;_%x^5enLS)|9uD%ZHXU%T3n|m
z7}2=STW?$AFsIdHPfRPR^f*DGi!v^FMU#3rs=fGKu@y17MG*gV*-iHTfL%n+H&hw|
zf%tTZpZQG0p=4h^9}wrxr%-B}$dh8#!2=0tUh=s|7R5&!bs2EA0SB&s`Ujqd9E$6N
z(RbEFEO=-)IMkJCFSZiGj@vfn!wwfZ&v|f{nKCvb@wzB>@Dt6J^=lmp!RvS5Yae!|
z(YpC@`B__AEB3%l@hjozUh22ex}+ALd(VD+|M7hhyY=VWx8002#;K=^hbc|Pbc1zl
z663~FC;PWIjC%)4%F0jWo&=&JHpL)oLC~ZC6utR5KI_I3(c_DIu6bT9v7XVMq(1{i
zb}mozIuWiWTkWL4jfLB#U?|VuimskjzcnfW+>WxbErpXcYJ8%FWo{E
zYWdD!Zs&Tm>|UyB0Z!|B+RN{ek^cfSm0)~<7P0BM@Sll$?jkUX!=iu^@xT?c4_8Qj
zw}m&9O?z{Wft3F)L~?b^kDsaju@K;RKxV-^g29n4SMcK=yS$}3-q3!0>%X{Fi6^WdYq>7FX!~DLw%rOAKA;4R-NZ@3SWS_xwcb_Q+p2lzcdD%E~{v
zYjn;Zr@e#!$5G%OW{kc6{j8G@me_-x{4Yf?1u8r@4H)Eu-^akT=hD)~te~I2pX%HF
z`vmf{7t4Udv-8puH0q81kWK*9-o-d)pH4
zYAmq7IJ3;U)Zq2|o8uzpo=DDM4Bg#PG@KZk^))8uyMlDLPYDjCf@gLJ@dlj5naD~D-&~bqD`Z$CPfR^v7whsd10@WN3Zhw^|<{{s9JevfhLoo
z9u=7pO|Bx^LBOpyMr((}Vkkw}UCut?cId`?Gw3RcMq=YT+3v*JI-(PBOK&}#HoZ5H
z$eY-(fAItp;FxC&6OmKJ(escEzqUl)92rFz9-%v7F~?SYoF6{eSWm}HwV$NQAX
zIRuQEy^i8jDoB~CD~+T+KI8Wiujq#vjr15kIdOCFb+OJ_zLgQR>Y4hY)UxffsAr3W
z<(DLSJLI#uxw(|5!;$n7kfWSGBazfIr+4E$k}()|zy0`R5_`h^*oxUuDn90}UcH9=
z$&*cC)N(5wXu4Ahj22BqTH@$2FU5Lsu&*ce3Rj+7=FA}LSusmTo`H0*T|;S{W--xg
zHTfr?r`mQ#?H}Z@nqisy%d^a)gxjv{{AUXQh6(e
z%dituPl^EkF3u(fGp^n*2wsW(ICRQ>{GH@U(xje~+HXofKjEABYtuF5n;WD(y0<0q
zi3J6(QM{O|)j-hUy5+@@<)=CwY|`xf(ln6Qi>5xl{#T>I5T~ZexA56
zcDACz7Sw1Y>V#nNbS-yAAc^qsVs~mJ!aI&dEk24yeDl<|NL!z$zM(TNrb4IX<`4D7
zry^P>UY9Qi8`aonN6(5;9OY1f6;gyD2?8`?K1Li`OhyjvJD
zS9)=IVV&8xIhOsS!&$~44MqtPZ$K4l=exD;?qURn4)8_3-=*S^ryLswK$9(v918>t
zKIGX?zM88r4ZOAgH=Dt^_{l9GuKlO_8a*i(@#B@(9P#>F84FYFiEzG8
zlWgRsMlnB$sDygIt-so~Z10W!Mo%r}opg;siX|?%?j7T~|8Pm+8bQJGemuYP=99ez
z#qo)WsdL3@lS$(SH)YT(@Sc>%1Kj%rg4V+^%eQ|-#B0Lbcs|sPhQmgt^gA2J=E8rO
zc7_z;AFT-zJ=obPZ*gM$)Z*brUrho`8+k}6X@a4TTJ2%yff(ldiH&Bfi|j-@U3~0N
zJ?zW!^`&m5=ZFz#M9VW1lQj#WDN1;!*zgQhPkZ=!0=K?s?7VUGQjL2tne(#PeTZ!4
zh@7W-5IBIsP@1*(^R?Oa)EDxBx6Cd}Mj$Vx#u|G!iHp;%{LGJ+eKVb*fIdgRE8a1_
zPE4uXIhX2oUL{yJvYkGcwHi*!TJS<|Kkqbbfof@MyO&V7)7b3K|0+(PJHpJ1Hmz)p
zEAr9BW;Vd`bbCtN!vVwZI82&1h5s!zjtNPGm5pe+*FzW9h`6o%oSSW;RU^G~yWUMxW6#RRI=1^Sd}ge2jL#UGiQ^bQUY=tdR8EHxL?R
zTcY%oN{gtzH1f^9T*H~mdy|@D-0J&D1~1Vc3$K4POa5(ztX{}z@n_^VMeDh>&E`vm
zr3UFj)^LrNaDe0+Ao??j4EF9E8L4pwgFNm)+kLDP^vTvlj%$9@n7zoN*Vz<>aqred
ztg`|r9iLLf!_MvM;uv{qxbVwXSBTja-UJzMcw20}AMLpxO4s5!vrJ|f`hIgXtNGT-
zuZDqZzQ(4MHmlwhxm`0?Wu@%Jx*^%*YV3EBn^W(_G(zfOJ5v(wpeQQrFsb3^xidXQ
zkiv7xLnrDa^EjQpU7|-KLg@k`JFVk4k}0j;>?B)&)#CDijF-~=@d$$wCPnKc$+IS6
z80PQ9`4+1;KPHRybtgtFsPJt8BSiO+5}C76o(GVXH_!NgoSZh(Gj-e}S-Lv_+`YOP7Wv~V6s82V%pl$DG+T=nTX~CG=LyXb^9+BdrSmQXO7EIegh@v#Z=Di*v5McRzuY;=-0t=14Zi?}3N2mbw>P%z{_k5t}@rsw<
zqx2eIXi-2m4;Z|^R*vR-q6+d!4%i;wZe|)fdX`nJ5RSJROOpr7>crGHIL%dJ-zTzY
zhQUd)HXTU#P@u_;_GNlzWkwUju2|T}6-6{d1ik<^t!Y+puzF7gt)!F8CsEmV8Zo?q
zc!2TbN1z}#Mjl0JN{W!)R~5M7(V0}I3IcGxb0Shq-Q1KCR_(6liYe
zlk^;dofhZg{Y2*V>n~73^*n{{iBg05jw$y@vv)jyb(nLA8AmT6o#a;|Y98CVwWRbJ
zW`>Idx7STsYYIwvu0lpmVw
zDVi06kyV%mA7UjsWt16;tK>(~
zJghw8`6=LixFL|iPJ~D$U<)8qrw2=2oSU=au2~-Ey_qj;(?P1R+NjG|?NTH$8rr
z*;J^{VJ0p_k|QHmZGsuB*pJtXr%MfYUQVg{7?T(_Wr7JnZoU<~Zd;3bDPqoD%w$1#
zH%E!BVId0)PZ~XUs6dZUPtdZ81OY@@NC^XzsiJRM0|abGVp9ZBBT`p7J+Ai7r~R<4
z3Cp5liF@jjS!0&Xtr-40rlBMgCTu_ca)+2jtX(h+Xl}K-Zc!_$gr0KPEY@X(*j*w!W~I&)Kf0hy
zUG^89*BV1YN?ji1HS|!2;GdXIW5)Z&+j?C&NfaLx%J{4=|84QRK04n!31YzWd6THC
zOn&EoSrQQjl+->{+3-X8y}Zrmxk8ALsl6T`-#XG=0=yn6O|&?jv)gLfs=hvsZ`P7v
z&`Zc@;=!vr%00kYW{M!%E-y8glmrsX;{v{%{UL(}llGibbca;XjJyReQm#evGq+Pd
z;qMP2rYSPjI9Z&?9uft>bXlZKNbr@M*#-c_$m6qr?bCQC~Ow;B4IRwd6Udy_%%Vf>LbB*W{&|)+->#vH+$o`
z$0Jdx7SeK%t!M33gN>bDVKVh#ex?RGBknGSm`8BLi&FjK&+@wmLtHWy*vo?AZ%II5
zYS?%T!R;EV-4Xl}Sri|@=LJ~!dKUAyuE^mns2qR3TKbO>CL%%p%Ato;M*$7~ht8O4
zd}06jW3JTpXiKx*Le1Nrg$sU*m<(Upok#ls-W(MDgWAmdF8ap%>~J533_VgSh=hSz03vVhTNRKF|z@R8;VpVb;!
z@xosYycSkSKY@yIpD$nUx;-9U*Yau?yQq5MwK4EKKj-kzu>kL0-fn$7Q)n8Aer7CF
z@$hm&BOpWgbH}sUR^#odD`n`}W|?^xaKxXVoG6&lJ9ciiX;l{B5_oy4!^Mwq(;9yv
z?2#ne=68>LI8&PS?sCB|{H$%lR&{or+Lz
zM<~r#RAPFpBboYuZn0Wl6epzePX~>j_n(B41`R1$+>@2ZAAUinIK*5w4g^7S7@0S6
zm=TE)dvet7x3~`^e0)z1T0JT95(TV=_)2v1gBEYsOGL_+N1ZsK+oX=PU5R%sq{xL@
zPse1sEa4(L({{wGMC1N>t0rnbdCv8*g7O0^ey@%EOz$7sxBn_D@vl^I=CXUD5iY|%
zeRV#@WeAXlzenFqGd;KdrxGzH;}Xf|3p~izKgcX1)T{@lRq9>tRwp#*I>8k95!{kh
zVeXO*K|6<@V7m3C&l*UDNUBX{Dr4Wn;HUv6yYau%u@1q&4-kNP<|j4II7BhT&Y96p
zULh*Sf}$Ub%)DDdN#i5wTE7l@B42%GPVRzUh>qnE%DPE{#%ci5jjHb3(8R#g~Z6v^x)Y5v>XG=mOy#m
zcEnu0x#p8}gOM^HAUx#{P-qC+DDjo4ssHXm?a0#d=|P?@BZm2_rN54tr{u*#d){6{
z?C*?mG7d2e^29fC*8H#DWQku%BjKS`TV|M}-ScR?<_-1Md~Ow{?#WXpKcbnpT?cMp
zD(rZ!D{H+ksCe?S5-13bFEMWAX}ZkSt93|kyvtghLJ5h9iD|X^f~=Vh@g0kfOhlP1
z`USMus_eWdvsZTKs>WMd02y4{VOpNp7xAT~R$}qehMinC8f941L`=%RX2emSOE*-*Q33s(JeTcA(zD#w=x+akD@KlNwI%DIHjP|LjBDI#8Z>&LOhbeQHN
zSLuhE_>J`*6??C@B<3J5H_r^&Wd4z;Q3Uow3%8idf-^zjgQ0xp{Ng%lujk(D(0Sl$^4E<`L5p
zQCV*B=_no`#$8X{9&F>I6|`~#pW4R8$1_)14NWfN2Mni6=q9UuX%ECJmc1z(tx^Qc
zVVbaD_jn=e(P-M6qtgmUbIjA`#6-mC3zmT?VsLGPs57QDGZl_`Ood9s7^pZql=31r
zEwO-=^KlUP%43gXiu$6#ZmHqzpTT4@qtZe59SkGKtc2-*^-N2A0YGyShAv0k`mA<9
zwx&yqbC_8NFkH}b0)vx2I_>Q()b8Ac2~Vph3+RFDO&`%oJtM9a$H-r*nJ?Zv_GxkK$M$)QxN_p--Bag>d{*F9d
zX~(Mc*rO8j;HpGDO05B^+8HFeOBxS1P9Ub{s;q^U1`?jXBz|0DPVy#G>P<6HsMnI9vTFYlSokb6
z-G{SD4|P#6kc}8fC>+BALzVE5xoC2S<(B6WPq`-*wpX$OQ+SSWd0~!V2+u+->#N_R
z7_pUE%D0HQ%!nJEnUm^m%;(pFv4I$7q8r?85pmIX%$*^RV%K}3x1`!`xIffsNL_50
z3-1osBe4|?#FAfd8&Urr6oCFFwZQm5T#@2skSfc8giXPwjWQ9>VY_58*E~X*pj`#l
z6(1<6F2?0Ddh}(+dF;n4hh<8t4Dz5#Nv|`mH|VZ#GJ}@<9SKD}B~=6ND(fMRDGBe%
zZOM{6H7*<3fG|XDFhIKN`@FUv@b>ol^|wzJYM-W`9XM!tAV1DUrX^~yoaycL>tMVq
zZpQoMfPQuNPbwHI1lrEE`WeQBq5iZH*}Jy$wf8D9XJ-A*X$f;(>u1%Z?Jq)(#
zo(Hb@%>J^?=Seka!8#>G*ID$f6;LF*bYA7y2dnU2$g=Sup1+Uk6WPvqfWAGF^cLkl
z1-Xat>Nk4qPr91!UY*?xr!SPz7!)8P!~+Cq?cxg5!Eid?^M8y8$`fj0?|*&D&4@s6
z+xT4Plo_||QjYLj#-S4hRGVMlR88dKsbxqqu&U+sVw$9uAH16gtpKMa7fQ)XRo2lt
zxp|)?NS;`vVRHFgnfKs?=W|tt(>Sk8_G78#x*Fcl}^*l&8=iSZK(O;8H;Wz`mdxB3Q#qgs9*k$pgc;q^=5_9p{ODeN1fVi5)H)Zo=J6AqUmlRUf=-kR?rxI4X@4Rz42-h5+43GIo(nXOE
z*R_l0nO^_dDW9rnTe-e%M7ep7VEQaMfwqHnFBBdSQ%8d3j-
zB8xyb#j&yDV0{p~yfa<)O*Rm>|NL-WDEedX=8XbytZv_xcorjq
z7msixsmJSPw0k;GkHB2x(2}nEp_S7)&g<4MDc3er9b2Y%el7!+{dkoZY6?jME95s^
zxwg7YK28>7rMaOW;eV3Dbn0A$0esg94cGL30T7jWww`4lbR^YX7_X$oP^z%@Vc+r2
zY*zDra=
z-=q2&{mgX~i7vfu*85+KWV%+nSwf=(EABBRxAONwPd4}af$5B=J5$=(uxkqg
z_H+V_$!l!yT3sxs#EAy`fn}__+mo1XjDWx0;>I=UjEIgReVG{QwLMc_F2$L_r0^6)
zYqOR?fTZF#3)+uxUMSX8CtnVNYpMmrpyL6K1!MYBv)@dKTAmK2+V)^0$g^Ihw>J)Y
z!8E7y+-*xym@2pnUrOO<33z~)-S|sF#4P2KTXHzs3t?!ukA2?H>OKR~+o;Sa9N!XvIIdEc9wULm>~C43Nm`m(eGGh5Y1)qScA0z?L=VqXjqkLA
zMeQ^Fbp5U6^g2iL4M3&REmNNHF25U>F+QSFPXfW*<3P{P9$!X{;Emm7WowkUt2cQi
zK4)|J`;=T0wobHg!)@Y_$1qx&t#f*smNG!dI>V%X&BrVnlX&xf;q(#%YLdEN26}lQ
zrro#$urzI%Y#_9(G`)D_EKvGdO(JYx;(i$YI;HVIQ(kLgozOUeBHO3;_WQ%NwJ56vs9g6!=kVBOk4%BftM~<%1kO){)Z~L>VYzWfm!~t
zmrd}QSSH1fOsW7%TxpQ8rK~#T+9Ry=5In54={@~z)2x6lFyrK_jL8RK$@ata`RIh#
z)X`L+Tw;UlfK|n8)rNc7WoOaSpX5VgqsEjG)ONF5d!2RJn(f*XEr$BSWd{LHfRo)R
z*AV>X88DHA`*x0zKmEzLDfX3-AIEhFG1W&bb@OoNiSNEXrMe_EC0I?QKs4~ZkQV%5
z;Ll*qeJ?){@U2kV2KkR1t4O+x!eS*s1)c*tJ`bBGTjaTlWLT8z_hYx`wu!Q8)ZRP$
zPEW+#-!k=P7@@`rxn|e{XMywb4Y1VUCa^1U(!=RFL@b&XdsR}>__jyXVH>6dV;`C2
zH+Lg4CxK?{OchgeTZc(^vp$$%j0C^IglIDr1b=HDf>cb=2rt>Q~g^{WmRYVDl+~S|@h?Et}?ttp;
z@_!=J@D26{0+ZII_;UD|>4(GJB;(y&urq1SUE1sGjQeqH@_5?6pPICN)de0*^3%>s
z!sL(YYRO&zBV@#EH@E%2C&FwQ<~vsHgSgb(y17U0t*3ZqN>T~CSO^if?3z89aorS{(3@_Ne1Z&d=TWF1wRTPTJ@h@O`a*z
zlW*>9k$M%wG)|HZTh0gi=|#(%sm
z)J10G40{tTJ)NI=oeeXad`$lN``PG)f9z{e2;1)}>{(7=N&BI(uVe56?+T&4m*QJ(Uo2e{R(XezEiH$R&Ayysf~od^0(
zlO_Lkm|9(Jn5HyhHZ%%=tqYXYkR)sJftP*fkxRR0H1<=yY0e<9Nft(|_);u(Ba2wU
zE!GjABZDCQQ9SQ4ER+PtJ&NGM1M;jq8?~E4ODFBQesl
z;`J!E3!y
z;3Y1*Dtt+DkMs<1jjZ!5KS(|ZrfB~>Pz!O&I^5QIE~{{k)eJSN8@&n|Q?d**4O|WJ
zCq;2slhc-I5sPpLnY8$`3dwn883dBQFtVxGWY9;vd;rHMBEcp#;w)V1)-)chT`tVYtF#OYkQwf;el~I%+Hz!jcHRpvDje3RQ-c<5wPVc0j}_#j
z`MRZSR?ao|TbdL{Iwc}x-$V<7zTg+lm&Nu(w7ZMZMGD{&fxfidw$aOPYHTry`>+vS
zs4~T@?z7@|^ZfWmz`7aSHZ22lPOK>9~t6WCvD;_)y
zU+qWxaU1@R+NLb8?rL@^x`PsIuY7j(plkZq{7^PL0+qx~!5$5EUl+PX016v4v*_yd
zfD~KU=pk(7J3SNjllz?)fuqF(-yvteH**c>f3WHLJXqsq>^-whITZB*H=`N1d|e6m6S
zV>c;d0CH==>9!{@{Cf|Rgqu>DfMCUCGT
zN`V4(b7gq;djq#0kB8rXL$h7Jx|*K9sHrYMn{eWLGo^dUvn{dJ&(o1{m7N`QJUaKP
z{<&YBRL`2d1#rW$6J?k!Zz&eg
zLX@p3(%Fw6Af`IKb{Bx+hMVu#mxyZlS=G#v6r1=yw`ED5i_=7tnue7ehlrr~Azct`?vLN2&9q<<
z&L*Oxkt)>ZP2p9Hh=z#$i0`I{0YtICB_1@fE@EZ*Oy)|7KOo_Yhh&hBk#2Chb0Y?k
zOW>LP5czkc3N4F|JNkqf1GwZDY{b>HeluSGtx{?hF
z=t*=wiu5|%{!*6
zQMc;EJ*^(D^J`VYxd-ai)0?Tz%0o_i#!GTBp_(;in>C89-YC095_W@DRb^ZU_&?dz
zg$#x2ko2f$M?2*xp2ED&KGR7fqR0%VX|k~1ZZg1#@3^l!>DZ{0@%QSKf0kdVvjulomr%yfsd5OW^ULPaB^
zG9eE{7;83$+(st-
zr2st`xHoG1O7<4FGl(xmiB}`!pzS609D%TaPjU}^_rZ@D8uda70GME2v~$m&6M%8R
zY4Sc9DGpazpDND%Eu8+m#TuST{ZrPO@FGl?IFQg=YU1`%i1BrA1OI#uVl*lR)ndIk@);or2sz!^X+)y1I%#<6E%7z|CT^l-
zzlgMD(#ZzAU-#YAIdrEzb$K7-42r~lU!`!@0$I<45*C&hm^Yo%t3%X`VqP(lGnzKS
zt2(dbtnmW4z^$hJPc770PuooPQADo1y>+^GcD=}LqcP?)-S^zXdHqc9t0GBJA|yj3
zyZ3THKe*Wk911uvwq%J!ZRTIo>EFtAGZX&nG)Xuo@Lc=`OnpywKMJXyb&<8oPoPl)
z1|9f+5dQQMFUl)S1YybQYLny*B#NYN&T4x9Bn-y-?KvQ=KU7XR?EaNDyQZGgBj{26
z(<}SMYGwKIf;zRv#H52KgyOsmIG+i>D!^zzHCa0iQW}-?e0zhXDyrS36t4QfM2GJB
z*q0ic@*8vWiO#srJD{0q=M&j(NVEk1XpyDsp1}^me~B&
z&fX32L3-fROnfo8ewnP)S4Xf3eqWYLk4%S(jJ#?(Mhy!3@Nzu;`+DSN?#5+MT9EwH
zLVXgYG{^5`G`q*H0Yv!vS(-WMShyS!G=tnMAwvz?K`GejinIL+#@9p{^w9MEllpE-
z|6o&@>iZW;j#42!3Jkf$HD1$w3DWHJA-nB8{!>3%7Ba9Ofvxwl?&kYQ>h-!F9u6g=*yHJ6
zhR0-qkK`i-o>_gk))}IJ8y{h*iy*r`_e3bwO|Hs^{(@=s#E3LSz@BUI7B}WsVpFcR
zgpv7`w;!nW3((?p5BwxiBM7QJRQpO{5IAbQi`^Zgna9SI6WIql_;WDKpQkAQbjh)w
zVXJF2ujwP+*G9lH!4ab@uGC<(Ilm5pY+l)SL45yL4b@y?Sqfq+dYX#b(a&c6WOfY4
z<=#`n>B|&ZDUISP!IZKgk-twj*|wFa*Uz#8hiesS=Yw;Ym%CqP$m-dvzK=Pad)i^B
z2ey*JPQU!({e3_CQJ$LVc&JkIlG!)E|mnuJ`WUTmVs0Td4KmOQELx$>;HSO`qer
z#Ur70I|qv$5859JbQ58DFzo@5Ht-*M)x_Clyect=zFKtJ37aH844GQ9X)9ys&5f}s
zd&c~gse>t~o!5J$j(yJw%Ia9f+1Z)Ep8{h|7PF&BPZ1t)Nkhf-UI~m$a#|n_9{K25
z0QlY%K=e~-E{90VS1h!4&gKr{P;I+GVT1F78ow1$cfj|sgPHO!^iAiD=JM$NR=wee
zPTIC2g<$+jL*3(zh`0G0p~BW~vPnZKULV5w670INR4H9I=Cm&?J$N+FCP-$}D
z=jr4jlh=JZgWEgZw@%yMs+0?p69#&}iFwP*p&g^y&L1`}->Hb$p(gAX1!Wn;B}bfR
zej#6v{~~4g5{sN27aRG*GwO;k!~@5)StO>OMIlI}t=^+Ti-qP^60k2lY7;M3yBuo2
z%8v(R0)p_JD!C<}%Nx@U*l16N0ERr#MahlaYmq%{qSe~SzrU>uo|rk>Ug
zb$#S})r9opg@V#jp&7Qpk=g(xi+#%=q?`>s;u@U*NbE@q4H5qc1B*Vw`fSdo(uB82
zhpWjvl&_L|U;f+h!weiGm#+)W@Q__n+uiMl#fQj;310odemwz*fTlIQ7}iy&;zV<+zEXgty)Tuiw@f@<-=(!BK0gXIuj9Q%N*VyliC5<>Wm!+E>{YuWh+2rjM%3Ex
zE}0PZZq#HMCJ)%I+K~?zIo+S7pS64R&6KB6FAR=c4%}`KT*x+@ry^ID&5t-l*@-_k
z@ANYxMBY0UC$ll^y~_6;8(e#{|Vl4xYq$n
z5-H)yqb0qroYUEZTF<;cLZ!}o@#Lw9#=B%djWx81b7zCUMmQ_om(Dmet~M5ZHHUn3
zOGDi1X#ns_^$ee*4crq&G?Y|s?8r&)>y4z7va4Gn9N#ki3D`Z^^BU)2mv2##q#?Ih
zZp=NcaXW>z|CpFNid-;q>=e^Wy|t64!$#)-l<_;TtPiAlu|ObSsOhc3m^MBibn9Eb
zZm`jSNuUbDtEKgMnicFMUVMDPm&xTy{vwz_9$)sSs10$Fl)o<*#u}>o&z$_`-HN*aWod
zlDXxAUHM!Uv0Gj8VE(fBzRoZyL8Ekx$#+dLI`?W{<;b#4;tz$SmFIAyyw8vZ0vPYylU(%^}uV
zZkzB>I$xUSnT@uryVKCh(F&7k|^RKA%($ZKYK
z)7jnlo8BqU7Z(~7zJS|p$MAJ_#;f-rrgi1kOu3_wS_aq?iqk|de-5duS>}DFYuB`ZdHIIp;dY7>FqBl^mnRyMSlS{2
zE5bq9x_j|%@g1!uk>i|I(hZw5Nny-ps@BVnD9Z~o1p5Uo?`#~RamqReV=%O-IBfE+
zx^!~`weE=edoZBqW^d@h%|JJpli75RwC_olZ_-3h5MxIYG_ie8wdYOwJzn~(5Z@R4
zR$FE(qJb32Wn}6=bM7#AD|&W8FMb8s{c+fhT*+Z1o{5PV7S08jW#Z#>nW2i862Ui%
z6=ob-`2|1E`@r-59%WYbj9ns>wU<_;=ReMs|4b_3h4g_Zi&x_i#gLVa`5@yW6W7cpa)j75fKoMi8G*bS2gY
zU+e@c?4lwc1u`L_51gYsAGqK(5Tz3?nM-U>_3fs^ofMQwtWX^O{foq-T{f`9e3Svp
zy;y%l)%|~8!O}D`f=&w^w{&ZD*D)2U|I|#mU1pENyXu8Rd1}oFA=$
zP1}9$=xdexgi5!RSg0@czx5!E%n^N}ek8Lc8je4*8t}sfuPyvMJJbh}oo-6+A6`hf
zCi-hnMz1xTqdS_8)M&u2*!K@(MnJw;!>@mQrs97BFF;O^zNlI6@z_rQ)SfWB%`f}j
z3uUbyR|10Nn;5nRRn|O!^5Lox^_e`n$M%eF9&}k)xVfKspKj-vvtnVqo(HrDG~NQU
zSo5-BQI
z7If7M#^Jqntboouc60*J5-W{Ton?&km1UXUuQ>{?8SHKGHggj0*7Vn&eDiE8-$?94
z{%Ykssrl~YXrF*c28cV4pAftgy*$$nNCMjKUO+9%__|S$ILR>l{tP{C{K9SP*nGn?
z5xw@|eJWAexK&3~l6l?D!JOa{<3@2$%FrV~kyq-}29>aPHDGa+&U=Heh{cwZPNJiy
zF9$Rd+1Xl`aJr~>7XY+*_O`V@P0(`tJ^$hQB9ofihW$neBk{`H8?kRzTeWE!;SYIV
zRy}5orZ4RF!l#^
zDNZloQ0Hx_2BiMVTv=#^pYfPVWM|7h`tG#R+uJ*Q5{^%s061JK2nCYESy@FUx^nUF
zwtcZX(|Ut3tLtMp^wQRQ#EsDB;fFEP;0Rd7d+0Tumv0QZrLPDZodrYmIY1Ymrv*>FMSqllY&1YQ<&ymOGx?`(7doV0@8`xIL*(
zNFpIr-kJWM3|M#TZVracJTWtlZ8y44w0q%%OolJRHvL1+>}P7^0?5f_9wUXW*+ep8SW
z4KAK@0v;ELvXmci>z|O2u-!3l+|%r%Hzv{A`CdSt`k>*v{j{)n-x5$5uL7Y{Vn>H6
zl*POk(*#$`*zDihlKxv;P^!$au&(VBzhuZD;G~m&8hd6o9P8z>W<9~kTz3sLkvYxu
z(p>SNwQdcCe52P&6Ho%7l~@YntMpRLCI?E!+a;!rui)o4X1}CrF)npSFZQHwmAN4t
zI!3|}wj09*LoQBxA&jTnLq_Nwojqkn@oG{8&Oj#UCOOTVs^_`+p4sB}=lD&u4xb;U
ztyiC4D9ZXD;hB#x=EPq~IkknX{UZ1))(q2m_V4wpMAdgg9}@Ki%(Q=E*l5l>-3OGP
z6+TCU78+V<(L;o1Dd_>sUcUW{@5y(;^^TP2c>j>^)rPK^`}PaZ#J`_)83g{%|NZ-l
z^F<&2jZ)G7gd{+g+VEFydPj2j`v>g2$y{){zQ#*0T|=`cPpJ3q{}VI-b>pEfN6>bK
zfk36BdUH(S%Cg$l%FJlhK2O-e*ndQ>?d-gc#;|0=RKXsF)jf2(M8wN%*sb^lR8Tdl
z-RGGXps6t#RNEE(#vhogbuswiFw5B4>1cA(*!Lg#D_?IPHK|4CIGO5AY;&@Y!F*y7
zj!Y^-oEC;95ilq5{KX4)!1GH6;*?O4{0
zq)ZP362MRq4&d~%I#z11IJ$})?*Sgy=Ni7=|IyLh-F^e5+-c2=$X|$5gRNV62m4zL
zj;Rwz#nUFXh%e0v55&+G8#Usyvw35vg^~b~8vLr+aDD`!5dPTTd|zqS>6|9wXzb~h
zYAJ5xd%Lyh$$q@Nwkk)*?g+Ky>50aR3?piX+I0#
zc$1Kt%61nn>1{`o?kr+Hb!tD0qrLQA_PIP|j)>YSpKHjJ@Yt-InK9HHh^6IebmD1z
z!m6>@XeITAqo;=_Qz0Kna-G))Ax3%&yMVx>f5$)3Z?KLp)6kSELX0#A$cXuECQ&VD
zq;QQPyC410BrRz4IY}xtsNM{BZa)8GTV!;pk|yF2o0ODP^Uz`3mwH7Jxag1{X8;ZB
zaMi$j{@GbaM=`I%5by4=nysrFk3HLT-y6!+JONbw=VL;Xt8-B+E6e}0@OO^@HD$Km
zy|**^`0M#oOK2m*gIeCs=?m2%cmS`G!fobE<|Vna)wPc^?}k+EH-ulUi)Gx*%s~?g
z&Bb?O2SmGyOfb0xB@eOVd=q!J-)RvxKB`L8pPv?G#MFQ=qXzJA%foy%XFw!nYAF*bVPs#RVWK^$u>J42e92AnU%IGFslFY=25xPH|6yDhr5&CR1y+?sI{Zx
zSPzM
z#?L5T8IKx%pDl~vaeln3i;QPQoyT4=T?HUn*1B$w?f*2^%7~$pQdhvoGRoLK+*u!D
z60rJZmUpasdVO`>h_J;dAvvL0p1Ek&|(2{L9wZo}S#Br0iY-3yjmfL~4<;|A_ye
z`h2zb5WlP~CUTKGU0=G-L3I?MJ_HMRfHrdb(pls>ZN<@3JJO@h5|OR8+jXn#>~WPT
z$tg-HUG=CfsKjYmIUzH%bb74ZgzYZHnfI&&a`eL$#0#6z;sR#YvJ}9YRQpLK^ZD`S
zL`5)F&pW*$h0D`QD})1W)F>co3Bj&~yCMOTM_W0DhJ)LnYVgFf1K8x0HjAMXLdRo|
z7_232Gi2MN+^#R&Y=&zNMvlu23XQAz5ZE}Yit3S{5cQ7DPBxM(f>!;oQ?30>aW{3u
zD`b3BO61=-%pnjPQ!~P{@D~lf=g!9spgc+jdKKl{KcXG%bFF*Lw_3M!LT~cDX#?A)
zfL#cwcL}fPf`iIlf(1oUD57AcHeGT
zXUYt!%PN6uR2VL6nQoeuV24Do`Ia>9q*0vuJ#jp~&F*@sCmgPlsuEQAgN
zle`TR!U(VX{Mlyh6ff*#(a7v7IWj$#Hjg-f?*Y>|mNR#hpZI6;*sp3G%MrwEuG1hQ
zKYLLbQ#I?#=#)YHA%;pQDOl?sk?>@#i`7Xb;p>QT{8sU`2y%zR6*V0>cggb_?@o}{
z@!m;wUkcg20c~n0^E(!A9sbf6{gF@Jyeq`WW2lpTPbhNX}HaV1zQeIUj0){M(zCx8thGTw$pD!o$60N)o_#FK{+=B`O8
zf~@9QAUYSr{-Qfzz6B-9b*aE{8e5P`7us{65|9
z3RiriI##i~Y(6bnPdpEs5MZ>^BVrOz8e|7Zy_K)py^XZ}8OG>!#8vFYj-i&NE
zGymtPF1|mi3nMMR)Y*+W$LX;?lIcB>3t&O~xTp3KfVV27sLzG%i<)h2!P%Y+Rqs6>kewsM2fgHs
zo=!gA#WZ2_!#;SE;f8@D0N=LBMpHBQsf+;9D=FrL_#cAu-NZqpsq|O94ErOSC
zCA*B@Qs<(@Qy34-W70IdlJ?_OFAqOHcTM2=)!B68OIH?CJ&@g=O)na2zqpr+b`;`w
zbm*LCv*sClgxt2bjAxh~w&1?p*{~H9efi;`O@fvkaoGK^1vbug)Pt9
zu_-DNmj*mneX2+b63H+^oZ?`HsJJ-23evD1`#%HO^w&P10370vFGukTIWzn71pieh
z>Ge+Q;X8^nX+Lkz6Y5Z_HfCu>qJ6KZj*gCf9#Zt|!S|A0&X0O4?sC`$PA?@9OVwev
z$_+b<8ZFo+3tW9yV{u!%!1OpE>GI1~Z(jfP(198LNJ?M3pshScgZ}C6WFGM|f-w>6
zLD^d8RdrADc2E1_T!y^mR|iX-0{&N?JJFavrTYR0JB?029UX**lSL>aSJP}W@~H=?
zK-|WL0D<+3Ui4ri&^2sXh-9ERY6AV-OuevbDDm{cTmA@KM4Vp4S7Xyt?VHz+n2<3D
zyRA5V=LmTE-S_a?UHTgLBQP0`-#4>8djB!I(X@fX40gRQ$wWl>py3Fr_S%evdrm^r
zJ54csi|Ms0%4Ix#zc@C?WyaDBCsTb+6BqY08X4NDe|o9mjN=dZMyRCwnoD@evJsv~
z)L77N$UjCxe6=28{Hao=asej~A@3Hjfw8?um$Wd3Io;19Ja0R{E-3{pVrQvCG4>3v
zlog@ww_3N+6WMF`6tRHgqV>eXOSkO_oj9ydJ$H%!90{~S&Ojz9#)>sWgkw|#CdC=A
zGOpdbe5H!wKrtF$%ns+PcI*m;bcnmIGh8C}c)!PmAA7e3eqHwc
z>oP^Z82X-RhMYfS4YeW!CO^S2hFQ8SI;^nHgf5e9*RmWY7Y8+!2)N`EW$k%2lO8@_
zg3_h#OxJ-Q^r{0&=zSmc56CA@yn!Lwh#!^@2;3_`g;qds#`e1!sB6--8rGumR?qh*
zu{($n_f{y8@pM#bmi?GMlC`?Ov@0w+=#7smHccU
zMiV%12j>8x|*>kcF6VO3>N(mmH4SYkNwLX%L=f%`2lk?{@ctA`O
z@3`IGMAKd&X&dt3lF1#3FiMqqkD|QGO;wLia^`OR%+wCrFP*tuGD(t2l3r%J@M@sj
zoy(48Q$2a(cei12+dn%8;lBD^ZX$C{gKQqfccT`3n~$XYFpFpp3t15b-CO@{0F&K7}>AjnuE2~07V9~eg*Fr;i5+j!eV5BUEi+}gkG#S7oK&VuVrtR
z3)*baFa9%Iktl`WIGOY4RZucCRwe
z9W$NSJ9uW%gFtQi3uw4Ke*TCgxI=n7$R(#!Wu>AhLb}kgQzIpV+fKXB}b+(DQ6SWCFBc5Q>A3-rx`w>|jJF(xv_N1)4);iHl^p
zktI&6YmT4cg2Mt0WMHF3@dM)5G~me2OV=4irCjTN-)$Rw#=r%8EQIS9g0j)_SY?8t
z3{#FN1mBU|m^}K{l&Uejf8Y}|JnT{tm2fkxMb`DLe_7NE*%Af<%vPqV`}{uuGC(Ro
zaos-wm&W8BUQ{4cqh8`$7sCl#Luw;*lC6QPt%^gXyPOG~yXDz)@MAi@Dbp?dG^6%v
zJMdTcJ7ipkv|omOh8SVe#IiI(4Ts^5sGMFQU#tn;wws*vyMhO4%*U^&x{EmQ$gm{1l9nq8e3EJK0r_4^+X3eu~K@Gu5ZgP0dh4i
zx$3wZ#itf~=O+`%-NCy&salWk)j_Owz98*+Bbm2gg>{ESZpn``GU?QPX8j~n0?=LUplRK>e5VA&e
zEWiwjFia%Wh4DFowWmFSIuuipe7qG$k?MtLSQR0esx_neH-e{7g^*wj2r=#e%5eKR
zA^qG|sOZ8KltB?$9+?+^PI&kog+=Rw5cZ{Nd_2Vv2Gx5x!Jcvp5LH$BbNKfL_LY~I
z3=BKc^gRY46gI0OW*APL-{B?=x=__$G0MM{z%qIvSzFd
z3H^{g)1f`M7|QMS%gKOl74yFo1f6^fySMm80_J_{bD|JoB^!4`!b(6pA&GdTWvq!U2Is~
z>%F+gZ~Vjbv&A!p44KtXtJW7Hw_Zq+{$vi;cL(#AJM8F9*EBmE(~=1lkC$LM7;`yF
zC`4JnMv-f`T7W?k_O;IgX*YOK?Y2?0Ywr%g@2Zak*wg3NPs13d_fYlEm^%IyJIac%
zorx?GVunwIUW
z<}byq_G|tZz9T0Zec;cYp7{R$M?ia-K2z&b_&fa7lY~Uep|;TE(~5~m*2!5Dv!y%y
zNTI33utNT*TwoIDyYfF};g*lN0&9zYz5s@4}Z+EjMml0RpxIp$&(AtuEW*JDTPcV)075!=s}Do5U{X(``y2
z*8`YlrnnN7hyybk8rtIQycj@SnSft4UOA5lW@VOC9WY_GYJ5_Gs&n9y>j?~oD}htTU{tQk)UMu2d20NbXHSSWI3G5hgotH5
zyKyqdtbF}5y%(4PeN)uahpZF(!J0CS#v%cwsBudX~fcw$4;9h~Gp
zYHp1U1G`d6A?J`sAIQqLmX`~A0XD-@79e9YRuw@b=BzkQPDXa;dzY{IVQe$m;QO28
zGN24j1a3PU7dN{UsE|HTIu*Emc|KgNO^^2l_nXr)`UcP-J@h~Xis0X;75}`JF6~$M
z&}}?Rz|#3Q-+Y5jVwJ@RA47IE^b*FlIamjT7Uqu=R~kvHd^
z#{M5T?Nvk|_xx)muHAs!`K6!8$`M}SDm;05aW)_YfTRpHDZC~;
zAfZgGseziRsZX)?^mMaGNEj&4c6NR0A>)4PD|_i~U*J)0
z?0nRPcZZ{e>h!b|u}vxW0{=XgzKAbRY;1<}otVUY&%O#CuZcO0C;+9{DiDJH`pBc3
zBVgWzOX`G$g*7a*^0RH-7^pTo(U$VGcIa2b>WzU5>s?RCd3#+)t@*9oO~C1#pm-T`
z55i}ScBB4SX;^G;JvhIR&1QagI#Zv)6-qH)L#sps$i28zMU1iD)A2Bc=`^`<6&H=gmyVygfu^wg#zgTfyl
zq3hQRG`RS@@F
zlUk(iH%FpTCpKw_?4u9qo8Z2wu4yfTRk7EnzDHfrnQkA^V4~<0KF~8RaRen)89^jU
zR+a4T>t7iht4q(mt6N6M#wVyeEd-MHLoWj0aZ3;+M^f}CZCIXy!2(2`7-PYYw+M&T
z-eiYJ-b)}N&&?**2@XMx&&=f3Dfaif>}_nRwA7fD)n+862~w$AKRYlds+Cy9S^+v4
z7ICHX*N?JyU;oTl93CDv+;G&W%~1%eA!G`xM?cxB2Q5bBJrv-1`}U_4(g?}W_BIBQ
zPRQFT$Ly6JqkLR~k_B%Os2Tu>S%OrU@!IlVd^K%?arNRKEX~12fpZW}Pw2#0v3@&f
zmZ~tChu^17m@hSG*1AD2;bq=b&DI3MTGgfJCSglj0T1P0-z_j$XH^m5$l77>et{EV6l@lFOpL(@Yt=NSSq=`eRhuN++U2n9SN7m!dU)IA~3g(k%v
z?#e5}SpB1nIXUa&k+<2U0`bgQ{TuUWb5_L>uzxa_?YSg=lm|Tvq%wh?7PE+ChyN@xRdiyr_72dO3!
zv%LGhsf8vf;av|&y^d*!FIQW!X9A7jFth^y;GL|jMGaXl{~$339GyA-Sk9=rz_gTH
zher3>-EMxj>HOjWgaByrqd(yX?OmR@;gItjug-GVc*3Mq*#~x~3N#jC3oW~i)C1Axr+sdF!)%`m)Ed{VLYA(Q;vm7m1-RvlFYP3OL9i~
zM@rQ3vHFGX+-gJlSpXzYB2kzEj%2S6r!`W24K8fmQdMUKvN74VP>}}M8i|K{q$2J)
zAnYIcwdwRCu$V0Fb~4z|VTwA;A$HozDVGDYl9aZReb@M!R`tdY7%fa4U&mfncdE<=
zq}R@0uhnAAHXP~am^9%=Ki5T$IQ^PcY~Fo8D`O|RjF7RdfWzNqtv^0Vax7J@m_f?)
zrooHoglaqHRnv$O&g6{3lbeY~Liv_15=oc-_d`Uan-e*~(@pjt3{h-eeTzI*Q_FJY
z@;azTx?X*WNvHFP!P&(**wy6q;LKvnRsvSjcegwOoZoT`^Q_Z)7#7vC(_Sd%T
zh`!rQWTH-GYGPeoi&-zw6xnZO)O|{I2;qk_&N?
z4P$6ioW?B=|G&Quoou^)<)*O9#V{((XZDERWNyddsDKL*NBpDa*0MvqdKu5d$oC0>
zrWx(nZq-Bm?q_-4ZfYhEneXdLMR-y|G;o#Se`6SU@)5bJn3STh_|-~!m0w6vp(x{f
zwdWgpvmg((jeafT$*^IUPg?1d?QrjvDh=%ASlmXfLzrGWX4=1#CK#eaa`y)Nm+}2q
zWCULyCjZ~Z(cOn60?{za|M^*>P;f^%uva1df6uGnRv7!;hoQLtwIlC7WVfLBKj8rz
ze5e5V@1MT=5S#M9Zl(=f*JJqq>r%S9RpDCsh4oL^w8LbC8_$u{x8$qS0o8W&I^xsJI?Ld38ovvVf)Z6WG#n
z`2KrXVbvjjS6#<2WczaOvX*Y{1rIhZrQ}2m4Zr-HNrUGQt(fx*#s>$oieRpa6cHLS
zKmwgKKM1>n*&;a)=4E(*odm$34|<`GUo8R?=rVA=6LH{}-`@C@X=P6FAn>pzcV}(N
zjA!e10kLPXd}(Q-B9CXeEp)#dd8HgQb{sk*SHTs3vl?&){hG*L8*q;^AX8M(`?oVZ
z4gPl_YaI7L{Un9(J=@Q@3IIQW0O{tsTSGVl>XSL>57}IXM=ETS?7$g;;Pvrflv
zl>#gRpH93^x6@{Vw?G~T(Ru$t#GxZ*`|Zy4CH_pNgue4_Mg~nS#)3!{`GD8LR-R>E
zY+73N>PxhN8_ReG1!dp)i
zJ8BRBxHeIPZ;q8RS&m8%IUnlRWP0+Pgu_P5^|YG?e*H?ZLc0t`Bt;462K2UDwcdQ9
z^<-^c^?|xs|JAMaAZd8ar}
zKeM}JqrLmz&d*~esHDz1`+d}PQ?p{B2V9#F)xceu+pwmEdiH_g&P$=$wSdbn)Z2ZB
zlJhWn;OBdLeNV(xHbAdH-4b!4XHo}-&8WnzF+dDzOam3wNG?yk%ybH4ADEP7+|KGb
z07iv8jjEG`(^AMZD2gTb=5J0?%bW`$9Xbyu!fw}f@@7DHjAa~5>oMr9aO)#G_Xp{#
zZWyR}>ngJrWFL~9IX^9a(faK3e&z#{ri|$d^Ke6-i(`)Qk8*w@AV5Yi>mUMA&{d7V
z-izr~oHR^?vRBhnYV-bl~#4?PsX74TmH}g_GQ&v$2asJ1S#kXNnYndy+$~cxfAwd4IE7qLzz{e%
z<%UH2`~)lszqRRX_X0;4^*h7gMMe^vxT5QQ*PUwtF64(yZi@KSB5#;GoF|71*Ga?$el49>>+8v~<>Vt&ur9DTr
z=Ybi21MgWghS9#(Y1b+bq)c;)Xny!;O>aF457Sy8bj#sh}tvB@2fWux^|KqR@atyVt{vj&BH>Fk+P&*5wc7V%8BUwvil)=-|I$WLNo_#^jI
zl-q4dZHf<=MZjr47A87o;J2Htxx$Bwi4}r{tdMzr`EB5aD`abURiiA4&28;hR`cDbc4!c#peQ9KutoY
zd-RK5?VZ*1&F$#RDt4Hr#drO0thy?L-`1iqDVga7QnIpmbOfgA9&HR0+2`$WUaSqy
zF|b%Rzzz#Em~2PcWv+Xt+H|v?>N>zF^UpPh`SI?usk1?yB`iwxRyCQPF9;DR+uWe2
z|JD0~8VYq1b!15UJ2ta|Axy^m^C_IGKkQ>*L|Ky!`XkoqRLr@+r8J&;HP58EX$Yzb
zgAv+n{4UCC2tRiH6+n-Qn)m!Cl{{SwSac)csfL~ZlY`-@;zST3l`1MBag6lIfn_iz
zDiUFugh$yH;6(*I*VHyukKlPnKs3v!{@Hr`N9hMqr=SLzEw%gcG{Rq?XlAbw1
zG7@g;#SLtnX~3dvPyd5nj{G??bttS$)kn_SlQl%}vta$DFk=GR3w-pz=YwppwHF*w
z3_?QBREvUCXRB;L+3@+5Lbdg){D#P)Cr^yNq;RkL?HNlI%m(=0T?QIbJ%*)<}IS
zwTy$|u}CBzgy~u3^U`$QQYjS0=U^-qStA*&4O4C6tF%0vK~eqlJsncUH-J7Ip9C*3
zCYM#M0oDR+mU=CwQ`8ow;CMdF!o)3t9tfYjXtGOFgneEEDVR384~`g`q8CFsHs~Zq
z?JPvt9n6^ij`{hJA)o?t(qC6%M8ab;YAz
zd2deQjfjQ6Ay9PPKvY#3Etdb=uhyWx!ZjrjcAT5B?-lt>vJF+fAfP)^eD`X+_?dhBm`z8{&w22}>^IE0bDrW&oiVC;;0Zs4>uB-wX?M)T
zdgt>>-zMNl?|Ajl6~u((r|fgfZsCfRXPK9v%l)Wf5M6|E(MCV#|pV(`F?j&e(PZ%nAj0Jziqa~sY*nU4z
zW+dS?P{W|xNyWk_UQ%S%zg+cMw^r9RL$-fn0wwQirWWv*7@+QnRH80_9#y|Sc|&R2
zeEqZBPhYqXvAvy|At~%Fwc($!r=*|4ZNy6b{wpDtm3KvKQ39_I43$4iUmFAZd~t!2
z-}k))5ww?MP%z`@yTi`Qh~Dd+@_=|vX#Z=9Pj&v{%Gzwj
z_ggO-jY_5@)-arU3#*9OPYE?aaP>ub;fiE5aQT2wi7EktVJH+OHwaF;BX5%9ILVG0
zR4hAivpAximp#;7!eUU#oFI%9uX*6y{fEir^+3u`IjA$$-Sggek&7o3czd!*dLV3D
zjMqhNOZc2swd-C;^mm^8m*2s3KBFdIHR-*{yl_~VyZyAPIBr+!CpEM8Rn~NBP95K3
zRLGYR?{Av`P?<2M5Q)1beJ|{Fr3eg^4_g2NT8n|Bn9;K_$|60u#=n
zp1D75`WQU%;CuaY(5Dr0Xf`hIXvrX46bMmv)sZIx7<^f&
zYkRS9W%`j%O2W{4C3?$!WD
zr*B~ut;8(QoFJDTVS8^KWg0U;(j!tKwMS~j4w?^`FE@E=k}x+}WFBsPoU%-GMq3mu
zI@v#Ic?W}JQ%5Gj0)a87`cs>+{xl-WCwV+Nrf^f&ic3qrbXj;kdrsd>w`75l*8Q?x`zv&QlO
zDd;JY`aL1C==g{44t<1UQJPZQsFG&=5eX#{XxJ5TF!ZpWu5zNCVp9_*GCJ75T!9f@bCS{R`;XFV(A)cN;y1f*V-0(f2`tQong|c3|2QVSa
zQci59L)%L~h=dNN#alo_@#A6f!_<$keCHXw{Y-ED5quKUmX@#8EIvH!$lf|#1ISh0
z>+6zFjDNSj>6z$(F(ze;svmWG;xPnQ&?ulqc*agk7Mm<2O32u5vo#E*B(&wL`H)|7
zZ?YxGByu;#CSek~L=m6khv990WMkuB^z?M!Fxc5yzy$moG6obQ$?HpuqzN+H>)%3A$kfY9!BTe^fUl|~Sd=HKOH?BWd~
z@~1yqtSuLtvUb|#il&c5w%@(2P`gJB{c4@%Mht8_-R#s`V(lrqwpXC9u9l1iS$aFu
zadVL=a!QOKvxJ6CzWQTH6*r-7WHD62Q04YN10Nnxhwslu93QJ9;e`=ME6Rrj=hb-N
zcS7Wk(}a{{5cUIZ7!xnne*CL!&uShoVd*;sJyl_}`Qa75sJRaz`d_&IpaoO^9s#c~
zURrwkW)}-vUru3#dx#)~G1~uyhj4``VBq)eM%mf>Ey}mdy{>)dH+Wfd;NLpF1I4S4h1UIDw?`Z8m?;bp2c14oB5HZn4j
z-WaubD3rf`{=^C}w5#zt`c!|Fvb+fj`dxskGWO9k(aHF8nR6L77iwqMfnHnBZCIPq
z^XDn+i3xY;lmaBL22)?X*K=IDD0Ay~Mc&qQp>_o)eROM^z%WF*TyXAvi$V39RknlZ
zeZA1w6z=qTsjRCENv~nIovGFDmOiv}(*5pUbFwIahwlV}2&U1&8(C#>x9tUSS8Lud
zPaPEVKxrg`7P2NOzNR^H3KMlnerxfII(l$fuFsZUG6rzrnjXMzg>1**mw5iscf(!2+l)-z@!h2*T-RY5Z^p;m4xA@}!*VqU!0V1qlBc_~
z)0r{>(if9N<>6^hqOgzt{BrZVJOxiK_ip2}TaaLYiVAceEADzy5>AY>F<=&1v;b7y
z$+QEy$Ri~sb#-9yYN;avEy;`Pu{P`5hGL}{pkA!0s;b)Wc8tiLS!k6}WFi7m7Z>w+
z7)^d5J-Gl?)=bnLPw=p7AtyaE72F6ttu*7j3e#>(3x?nCT4&kyD5z;fbM7|ydPeTVnl#Vh|_xY0&5a@
z8g~Ys9^3d9vc%TyHSetf)&cp==6Bg&kgxb~+|{w1Od|
zCPhB#$o~Kda4>>Jvax#bF+s)ra3BtjPI%S6XfFTvU>8ebK1Rg=mARZ*=}DZqKTiguXSASLRwo$&
zicu^}fWHV#2H7_?BwmLQGdos^igtTN-qP0*R#xI=#P&e5q?Kz1e|?l<)?ts!1z
zSs5+LJM{JIcj9Z2)k^`KWY2!mAC`NnTC`Qt;
z)z#^)fA~!cR|9Y^LYD?{tVaqXYM6e81&sQb8xej5
z6AFxF8cmtsXwW8!Y|_}CTWUCFl%!ar5k}t_*X9K2s7lp&csyvVnD3%iq~J3gumW9_
z6tz4cS=p`M>(!XNyc7Vl0JL^-HU^U(l89LV%o5E3VA9UCy-5$%SR8ujN>L2>P=URRHaLl<@)+7*CA8KnmLS-yfrM|8{B3k6C|
ze9u8Z=t0a1t{p<9vEb+dR+~
z5w&}Rs;;ae0N=~B?FQiHES<1s^7G4RDk0q!G^QYb2={l(C^7-%h(WDGVkrURatB(E
zk;K98O{J8)(Sp%65CwFNi`eKdbhAd~SyG)gfzdV>^I%+gF1lEl%AG&m{UAPuX!{03
z0cmUM;TN@cz&)6WZNKVbLBWVm#{H7r_|2}=@6^@ufb;&iabn}x*jntSjA-(yC@TRi
zxkxrOxO6NwE^`o<-_`veCLnSxzRHVantt*JQHR8b?s+^
zk3kwva$Vmtj&{zrli(B_YXe-DMb#-pz+`EHyo9}UAzRK1BBFO?ueiQ80b5PUtqEOu
z3Y;pu3$)t8{$pBM@{gC+)4*;F>aW}cbJ~q+(ix^i--&M3ygj-CVxLf6!Uaj$8<@gM
zWRpTSi;oymQjGtKSN`wBIdlpY(fdUr0ymf10c;kmOv^z80`Hh&F9yetaVkl0)NO8y*~x>h2v0A=y0rxB%$5H+WoGF`LL7ps@f?^7#mcu
zE~YlTb~S-bTr|mgEzN&-N$`SePW%P)d4!ovz|H
zlgxwZ^ur{hnQ*mnr5b8xQsMEB6t~96pw6#wtsk2T7oXq
za-=>duFSYf1BSeWi_o|+ZY=lphqw(B1Dn&$(PH%*>2Ze%f(VAi(hhd0(D|Us&ql%$
zw;V@mpS+ZHJY&P*)qBQ|jCfluUN%zMHz`Ygdvb-^F3tw!3Ns)$mdbKDdAC&he1ORo
zYn^Q>GD3jqmpAdny#HOfAt9pa6w2k~d9dU~ujPoBv-TNuV8x2B2UNH;X
z4Ytfi5TCa&JkEm(2j>$kB71W1lQDErBzii;OEA(^$#D&$6A-fwj%L*Wmedmqas`4o
znU4(w@e&@D9ZZo18R!r<#t`Wi~d
z9Jo;O>xi7_8F>r-eLn1hvU6pAXWUl#IWSz9^8$Sk0T;Bu?Z!|pbnuqHG^H3aNs5}^
z6g-$Pw^&?gSwM=WmmGd)3xu+4;%ExILnjmj0>`xPuH^48AyK~DX4mQDzbA%N-O=)f
z{SYhd9nDZG9JkA(gk9t$w=`mDS+PjBwo>sSzrWhnQ_88mZUR(9lLgsBsIuM}UBbY;
zqFORchi6P|(GIg(;c-?3PY&BTwR128XC2>_&OQ<%21XpO?`@2KilL6e9f?~fXn7Iz
z)+V0`x2yZYjW9E;>|^O*)Vk|RlB+|O*hnI21|h-cFM1}l1xmMsi%jaMwYVvQn16z&
z{AY%LU?0yaI!**@8UKLl-pfs@;*X8_VVniQeFxVAlIUDQ^<&1$=*5`Cm?l>F=Oc;g
zxW29e@qRIXmQdRx@)o>ARqhEeTrc+Ux!e|)WUEqr&)2=Ii1Qy(#p^K4e1y%o1Befi%GmKpW5z=!`R_(d8V32{LWfVfO|->e
z7^*Z{{Jn9@swa(R(Cl`hybyN2osZ7#-(W}=DF-D-y{Ix;Lqqv3g3mX9cX=TWFEn@C
zujrx`bpmLrzDq&6dN3(cP5jP1937b>c%J7fp<}KP5wt!D!u_n>@YI^NFPhQsq@jrqsPB#
zm_^!K0^!S-p^`!8}@Q%mqG
zH)-E6HkmnPTDZ2eb4LqV?#iY1UU1I+w2w5Px0P7;Wya<*Byf8XkF9sC(1RBuMZtiA
zfd?fLz$Jy=zm~&J{ePHx%ebcB`0bl!^e7pgD%tlH;
zx?=*;lG5E>leoUW|9xNaY!CKeFSecR{CwWW`}inksK5QKOH`$cDakOp{X2pLh4;{>
z%d7rg?DJV(Ell>|5%;$yTd5zmnt+j|8}h;Ws6i*ViRH60Axu?J1QM&^+EwVem%=6u
zPa$sV!Rd%AVsND;LdubQH=W-9iDGi702bvfR2*G9UnOIp3)SqDkTX58IG6`g(Ta%=
z_mpr
z_$MxP0kFxtuY^_rVF-p)-zH)*6f|awjLpo+Vi0b~$r7~{x}Rd6N#w?Qm@g?0jOtM@
zCO~n~0{u)?_SAX(Q|h1Qn*5DfM5UHcItlV$zC|T4>O})R;a!%lxbFSUCyA-YKQ38G_3rijB7~)_|ByEsJ^vUaV7RBIhUMCN-MYET?AlUqn6K9
zj3mM}?2uNOhTV))=aq(Jd*h^aJhRd3+`0t#5sk6toE<+TLRs27;aHi&4eO+k0HUSS`)T~z#1Y&_Vm
zlZgOnR!kqI&7F;>e8cPO#Q{RL1tG|TyD<3%e)H|
zl-p_*8?f@HFN0665@-1%0_i?3f`;N(PNed6AdsS4Vu=_65C?zx;wTw0tm&Un=DV|gw9S5e?AwHCh&`aW9yK@pI+PZt8ovL0fOyP{)U4ZVlv
zQ;``AQHhgeZpN^p6U{B=xr(uE03<6g*J1j2$y@~rK%>u6mi`doCvtQRlHqv`4mo#<(C>mw{0
z;MGJ0VL7M9dJ#`@d7(t`Gt|S)uz0#PSa*7DzHFceu1cIQesG62J<l-}KDBf4#7iL?trJ!(XtXpuey~Wea($Q=)*l!HqgCTd0XXS2h$Zu@
z6>^mln`Zmw5PvRFIwE`E()(_tQoXfE-#%K@kjM(yhf)`}@oQoHCiRj2tDw0kW0eiu
zQ;Hs$6g3bdwdf3i(IdqelQZ!XDk25y`%zX!Wp4LlIfzh$#ITGrWXl8pEG0h)>Tuw>
zFY>>fyz2{sfSIcBA!JdTJd7l67L!Yyeg#mP$j19gU?F)d1Q{$ykx@$?}7~JUbo1i^%l&9s2?vp2Z%G6bJ
zZI+E8kF^ZFC>Wtwy&H&ZZ*!TgHeL!nyf8|1rS3IhaqK%s?;EUm8vQOa7To(GMx@7C
z>QT&yD@#PRj<1#&;^{vdto_ZGZd0F;&3)Ms
z`XopY9SBYp79uyv^tZ+DjL8ipVvl_?x!S}J83#tmZ%Gh8JQX-AK61JYN0tH?bB$F%
zTN~w^Rn6m5t&MO0%?YmBP{F*cE<;O7Zck66Jbk2O-=$1|vYdEQSbgM|)zPRNoK`UwsQXB>JwUW%~4)H*)JideD>t3bUB1G
zSr<*p`gba%SMrFb98oAJxiM>CW(J+E+^@*%&Vz2L{n^H!QqZYVLKQu~?3Qnv|Ewf^
ziafV;KqBFvW!M;FcMITKgV2se%-O|7SD-!Be^EYkS5R5{_Fi^k!T?ky=@>orboX+vq0
z32QWl*fn}CSS9)uvfJUZqN%mCC=7oFDe-V~)x4327dQ@^TaBsI4^=%MOZ}uXt$uMS
zqx#Y=y2`GixIakH)yKtr!cgVf`&?;X7!EiJ5Av}Q(Wfwc49Aet5bmv}CoV`#4{wzv
zFG@Au2TTGJ?R>{H!S79leO2XrdBvoXAfWnEWG2nTPtcwuScyuNPWZ`|N%22JmGxxr
zo?tl@jk}l6V$%KmIP6KK5xPqTHWZYD`1u^ZF_Y@;yl?P=OFYmFl;?LV*-@)qc-Z7j
z{dGczw;@GT{_fWLchWw1!#}F|&pK}5#VX^mw_pC-xO#!idQ+%3*!1v5mut#lA(cv;
z;2ciUIFGg~|Dk?9J^CJ;X?h@=ZG{nCD=KjDor!Zz$Q^xq3V4Pk!x%jCI9JfhHw`o&
zij%+K-_Yh{!QV#AXr9^fMXG7O@+zPSg{jQ`9j-*Y}KH^Gyc}@gU!stpxYGn>^3K-J1W7a5@$~K^AjuKn9xMMYp-za~h?*
zBZDn0A-DHgSi}G?r3#U7VQl_$ZdMR!o2i{$Kc>#0?L_k{KJp6SpMM>^I{QUOjSRzJRimn8I_R6-KAPJ#WHJ(?dg}Fg=IjguAJ^Y%ddcE}S
zS~vbN9Q5kFM3V+iO+L?^@;_xzI2P@eAA^jjkiG$eCGsvj+~5b2Ro^e3Re)vxu3xji
zI9M6+liZIjU9?1(wzFcU9th<38Wint;P{jN8=7!^kp)dNp0-&Id-|`c3~^g@du>&|CSUcdtY}=PK-H|?YLc#NM~9qwh6!I^
zu>{#Jv7BSkOP0e06npqaV&jJV88om~v#14cihO6RfYv*ax_Zq+=}a?6f)c7Lr1K$;
zLvlj~P3aJN=u{FT%{({n1{ZF;E{pAzf-mvWy?AFISz)v9o@-5S%`OHlHrG;}jo(hTj4y1vwcxxXmqW(5oW_g(X
zlxm1a5dOkvHj-MWDshRbHhCWinU`Y>kYsiKzPleW2egqwx8v!f&
z=|f9~xjQkRPyhD1!uKa>&s&bqzGXJ=XZ>oNOJgRaD1mN<$u$INwgGFsZZh1>QrW-*
z3{Igj^%J7^`?*A$HbjK4SvaXOOV|nD-k0hy
zY&*!Amz)qXTo*x$s`@OOz>Sw5>mEC&LCXI=owr@MiS5+HmPkxXBy8@8mtFRe9E>?D
zXX94nChcE$;$-XXbZbN)wo)gOWj2-oa*05YeDZM`io(S&PI_zhd9VTho{q{B$Rb)$
zDdr(@_OOWU8AjzmFW>7)YTQQ>2HIL~tVZAKvK=UE^h0eBL#tyN@0I}iW-|omo)XPl
z9<<(O*7tAXaBx>Tkb-j4d7Y3hJX+%C>PVrBNBKg1=9z%gT7Ej^Na>d#djl`DRb_shxZ?Hx$B1Ue`1sTP?=m&P4`I%IN>~L6Tr_T0Z{LDH#^RO6tq2kYv_QP7}
zUh3l@a>90&&yB@I?KA45rOO|znMfISZ*?9?^KD8Mcn!Ba&`RqH(4kY)HtW^?NWJCO
zFk1ri?n;_qny2K-M{zBLtlw^#mi?PNwYcbCNxO^8ySvT0y<5OeMh@LwTi@IjIVWU6v9dkll!BV3gs?khQ)ME)NgA?%T=gSl0B<0&?v&Z
zabCfT;z{A9Mj$D0{oWgbSX>zrz~XME)^OLTrpQ4p5>0dv_fYJxG964#@%GYU6wlxL
zC0GEIFC~(`p%acVbC&g;P(tyPQ%h;lC*9ER=J%?&Av-X5Q(APWrckZ%H~l7@<++(6
z`?BFPPrj50BIN%
zG|FnHRUhk?_Ei`V6q(6U7)%L_I*D*5@k)#2QP67ba5Q{a#F4)z;-jJM6)1QwWM}#j
zwMwle+>lkKX=*#u-5IEphR4Y!)%a6Fzo)*+TB2P(V_{Xgg8HuhrI{ub>QkRSJzSH6
zNPW-89)(W)f1fj
z+V@#!1K(S+HhSj6meU301N!&Z>$w68>Ss3C>-{fI{_YCskCrKVw?u(^g$t(U_^O
zYe(%rBu<2er)Jgz!bcZ9f!S^vdHcMlK-_@D{m0ZPm&$siSnx6DbXi8V0%2nliU<^g
zKw>7yR3xke4q}}gk&s|enmABNCG!&`tL*dIQ^()*v!beg0E7VW*sk1Z9BuEa+|7Rm
z`DDR%`uqZO>wTRxe-b)=(@TKwbkwzU1THjJ$03tdrs&f@v&`{%p!4`yMSgS%R9!#qa$>#esuTm4JI8!KXg+ozid
zDKw|_W|u@;mxWeD0M
z_w(Sipl4&<&pJaI5F;hgAkFA+m)E42n{NT?A?!BxY_$M|)mGHjUU5+6OFK)m1N-Yq
z*7LOKd&~mE@QclnMUU|ESQgOU8IC#ck3EHrvNv?7vo_l{oL-t7(9#(nIwE7{Musln
zuEHv-{u?KLFOzHBP`8w+1Y7n&4EA-XSDj#EZ)L2xfkL(`%^z77^CRx}@;E(yw)-ID
z4|+{*e-h^(UHI=K(|*zXt*QRpez{iKE2(`G%8GhA!Q1*ETT@(Y(;hb(cb83Gv6M5$
zZxBd%H&u_2tfW(H4PQ+O?*1&a&%9Xry*W>}X}r+<4U^bdY_CwX_Lh`qbJQF^O0l(>+FCQSEu&Rtn$N0kAdjrRU`?y8uw#)T2dO~C7UW3g#m@vD8ZS5GSau$jBI@<
z>m<;0g`Qsp;RAd)-l7ELce6mu&X@|g&ng3WQ0hoyVEUNXV){xuZd`o%UtdcoBS{05o#nk_p6Jc4!Wgk5eM@Ro*VU}ixDxr!Z#M7Bif0*wTlXa!13#(vKJ|wAXuY%?iBF8G${3Xg
z!cV_I(Br|087@K~S@a9HD~cjc>lY<+4#OfL3RNp|dmEW|SMb5R1GX}rW
z8^rxCBqe{@0w$_QH;`C0(Wp-5{j7>)V*T;^HCYL$3Na^MZO%gDM)wsmF-mNcH0hL0
zE=Vc&qZF_^`_AE@NNocyF}L1$VjnIuo5(;^;W#z5^EmnCE=f&nhYiC{;T{TvSvG9$
ztFt3f*M55et#~hJ5hRYQUOWB!#4_QfVeHi#)$Ujck+wls00`S9Y!e53eHMANOXwmr}%ISSx9~C#p}tk*Ioos1vc`I=uyI(I38w**~S1;7fViyPtaA7N83x}tUt4~eqeLk?n40?`x^SUpTzBq-t
zO(=*79ZM63u4IB@HV_=LmDt#!jw)~u5NIP+bBt-A%?bPC8?)E8;f8nxBjvR>&_~Q#
zIWmggYtdU1pK86PT-JGb@$?K=Ca&{_p^4p@UL~O9Ci||*EBws2IR!s`I-CC)V}&ZF
zcB#?SrwvGCVorvZk^J!YQL%Z`;m;%4cNkxr`sPHiu`yICl*o5FBK?PgeEBZ6cluw;
z5tm1vmlo=+{X}JxUAbOtx$b$sNn8wC3k+k^iR|5;gj$KS1Mg^4_upPINPOwRWJwy`
z^~W6cEUBYksHKV^|7S;y<9xR_
zbo0ar$c~xqpRmuJ0poLxm~wOdV+Ya8;V5SmE1jWL&!t-mbZlc_!z!d1wj6v
zD`S5`VqYaxFj=9mjh>NTn{MQym@fX~3uME6a@AhG{EA+^6?Q?fkl<<)R3B^_aAZ
z%$J%CRBLeO0lCjQpd_0t`VU6rViZH4LBZFqn_sTK52P0XnF%o#vPr!xA-M`MWhgbJJckRpWr2$c*^!x11~@#wwtPu=MHv@-Gq%(rI9^<=%Us72H|jyvw2b-Mlu0
zk0L)nNic|r@8S^)9R}+DQG;>kO)tQchxWBUw+B|p)=mzHfcd82A7T28BYe%*EFI0C
z^xuE{`2c4%`WP+?IcGlUFLZ#U;|z0D8dwP@od`eb5FXio(YyNtkd@Ghk2_U(ryM(&Z%@B7y-78N
z(xi^z=Zu{`hCfF-_ozoI>W@rT!y1smQOH8JxyB^6t>G5SCq$oxCJpuhi3|oCEU|Up
zq_$MR6U2Pk9KIk*GzF!Qws|0}q{yKINC^~~b47%_$9tU%Zl<4pH&Pc(nIe1-l5y8+
zg{4*&$2kakp$&nK%nx^&!@?R+a3%n)OCK}#9v@1U=ZL|5N8k%TsTn=sCyY}+JqrXV
zispv@RFh5aVtv7$3el=BImr%;gkXW6U8b#U9B-G>c(2YFCKFMR?v%1il
zzV^(_EdCcXGC$DDB1;hP>4Wx_i=t>thk=i3o~d1oprgeIkFlKhvTTaU$N*8>QYgvPc@Ep`1Km(@KXdK~}kH;zkgf}nZ7b3T}C%$lmtDEZqMB{p}8-+t&)r*{~
z{2i2GeyoC8>u3r_bPscel=r*8*VaO{sobWwfuWFDvE19bctxX+i4asO)TU6oLhzja
zB+T&ihzUNgjEbSuUyPe9kpeOHy-oy`q-ndi@Tk+b@C(gHa;t)vT;coJokKyfku-Q*
z5x~d)sxs$!@fkvlt}8Y@lVgj(CsF23pix$c$$+=^LSi6@<|mIV{EDr150dehSAWr#
zg`B;#nx6MJ%Q5qzLP}4#*!kIl3#Y`zP1-LD|2hw8DCnPO$RN@sZ0u}RA(T8kH6}dk
ze){LxmwytB3=~HPBTgMZ4vyy?+k5c>^q}2_exK&QbcN2)nV5h!4z2h41rnCUh(aKB
zT*XZro0=)tr+J^u>h^v_`j}6}VCuMKTg`a%53r=?JZAy5ca#6Wx7F%-Gc_L~AkMn)
zn6#r%Hn~QF4f8IDY-5GdH}550EVHdCq>Ba~q`)#B@afgK((fupdAF}3i+Tc4isUrh
z7Dz{KMWYXYLZ{pHUt_2b_%=+e!}&C38)|kCe(=yzT)U1B6fsH*l^$;%IeR}1w}YEU??>uSxN
z@{&*NE0IG=&>QGA=1Jv4c@2WL&Cad1@XnPSk)axJRh46(R-`X0IWZ*WSb_zLVzTRLDLdKgyI#0RT!)u@zog;(n&
zd!9GlGN`NT`IkA*$FaBO|SMheT|_uw6qMXO8<@1f|o2`ks6Q4oit)10wlsw?umgr;M*(U@2~`6Pkzg^GFk
zvz7Q@9(j4o+>}yhHIr|Y&xVO_u+%VfSmX*_6ozBvlv$H=ge{YrQKj|xr&X#Lt-Q*6
z(bl8u#)t@qutTGp-%S|+SHBWlzg&4>GU5NqTQqVGzGjD1iK+_}*AFmD>CDF~owW1N
z)PvfcKJbYd#P-nc9eONOf*2dP656dcUgXhLv)h|)_`<(pfYIfHEtAs$HAOZ^K_;;Z
zvEdkC^&NbN&u>zS!XY{~{bHx?tzjUf6>43+kmH%{Y^LIUu-}}uev|(HYbt<4BZImg
zOG?K{OPxX1=u(_Xzq;X@i637QY5p}cs7fT}We7e!hr_RQu94q11}#xFh=u^twVn&*
zszF2oE~D#r99puBrdl#;FkB7tZnrP-_)kqHL?!nm{#bpf*i9T;5(QG$$`r5*Tc*I5
z!&m84%La1j8H)ZZjHSvXvZU-voQIh0
z86LSSx%WFTSbO(B2zzEmEzgZcw5D{>r5F*9mDej4g99EtZmE>{#Vbg2`+3(7q_W0%
z`Y(%Ui^?7fe72bAG!Q5K2iwl9iI(IO4Xzt@g0$3ezO)36s7o6cyK%3C#xi0hz0YE5
zc7>81FxgWWb-)CXZ4iMnqp$%V{t6D@#?e<4Hsrq~mu^|7co6XURg=xNO3YE+{%KVa
zk(+?hGZ+P_1JO#RLI?PD%>=HRmIs1P^|sU119xHP2!>KK+PA}9?yf;H!ZMHiCMilxFHZgT_d5AC8}Frj`5`4e+t?O3~Oan@6Zq`u3P3ok`4%I(Eh!L|5P
zV}*&aP0CK{4iqDKrOHRLG#HmGlDT-(LRGQJ<2zdl
zMOi#}cmr8d-g0e(rMyW{U>CNDl2Smy00S_dAu+bn^1TGSanlaDDSV|GDD2sVhcFxp
zA#{)e%g3My6c-{FqflzwpfAyK`0Em@k9JbnxcK4ZGjyF{*$0hPj{ONu!w3Cx?I1j#
z6mo)SawBUk9_p;VH}?};9FqAPe2$8{Rg9oUZfa_GMEz_84bNF$x*2FxTSm(bQ&o%k
zKAIN&K<1DM-2qGW&^Xip5co1V$7oUtF^2p?i3ol(W15?|IU-#5_Uct|9IrwS`?%@`
zUz@4=px5~l&hTdu$HxMWM~*5Jzb~%dUSDanhmV}dc&?l}C_I5?44C|7VD0D$*xKq{
zlkLHY9CO-~EdKkrDFRJ7u3j)i!|H~QC7F(MIMD0VemblT!0!JiJ_T;G`4hV22p7i@
zVtXVkWh-U~lwT9Zcf9&+L+}MRx~dn%!9~%Tka_QMd%Hc^2+2oL0NI!;?z#`s?sdJf
z3pA7n9?5(Bfeiw;EHQP^p%_!be+G%oF(mgG
zNohr!YTEP2j)GRWJ`9Ph0}!2=ZX4vmxO}b?13?eN@y96P(O#N%Af+NTZFe&z-?re=e`8A
z?VDPZHPXzqOc6nT!M|{el=Nxo(ak8uVsSN>1l>Za;S99VuNmb9%BXSEZQQiBY|T%v
zIC!N#C5Tr2F`gTfxnI_tyrt?nSP>QGbM(AuOD2;5l^KvBNU>(
zMwp|T0yO}BQ=yK?8STb|dni%}`l1CV{gen#%m6M*&t6Q7Fb#YvenLpCPIw^Rsid{u
z!T<#K^$52n+r_ML322U^+=>sND@bj4q}qNAlpmHmPyDMh=aqt8nbKSF#)Jl6Hhu!_
z4cCMhKwWcpJ%nggJJ0$@bEz_zetUR1UoE(vXRnYG^yxmORDNnMYzQgBa$n{xN**5d
z7vC#S>gmlC$tYz+uT+u8=?oIbegMcseU0m5%jrGOB978yG&;vv(
zx=wTY1@8v&<3XwBc-q%hlI2$KPVDBzfcqxny1cf|sr`$-Oul1Q`zR!750T!(?UWy{
zknDb66?TAX51KV&W@M;})vwh{@c#)bs*czT*tivjiR@~fK)(|lFm8neokHn?oDBCR
zFSGzgy1h
zZj0u%8#Z(@>XQQJCMI4-kcaoFUFj7la5;7qlE<`)JQJ}q?2|JNG(+!+6egfV%xYi$
zQ9)91Cz@J0$Q1|mi)bB!SJcsp!l$s8pTgNwAPTyp>~)eO+~nqR-rzDIfUC*zSPs7=
zD#9_(inA|3PSL-jvYJ8D#0JHiu&;y>4oM^zxH!l`30gIAZb_iE@(wS$3pX|?uvFo#wyPlA|Ag
z<>U)ib4rM?UG>t}qT?i3c}w@T^>$5YGzFcf%qgOq_}5^)0+%7J=()<<7eaC?kmKEi
z-i_R!S`K)}-(260hUeU3vxd4bxadW2cxMIFF%|nt{sKe0=IqX5XotSyyK8+jY
zupj9Mhxb5Y2y6+K^0@CK(h#;PeN;qTaiAWSI5J8qEH#Jbq%&W_wN3SiXNEkZhBJc0
zA!<|7l^~6E!X@nLqP2fUy)8sdO9o3=lH$U9tX!Zm_+2-vG9d!~N<2Y4Uw}G?@|?N1
z!e-TwvL{%$fy(PvDb6Ls6slr~oi!RclSgs)a9r5`UCdsTmD^wPcLz>ExLudjiu?1rhq*rGxNR^ROmXv
zx