diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index da8240658..7cca764a6 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -15,7 +15,7 @@ updates:
patterns:
- "*"
- # 2. Python dependencies – App
+ # 2. Python dependencies – App
- package-ecosystem: "pip"
directory: "/src/App"
schedule:
@@ -28,22 +28,8 @@ updates:
backend-deps:
patterns:
- "*"
-
- # 3. Python dependencies – Azure Function
- - package-ecosystem: "pip"
- directory: "/src/AzureFunction"
- schedule:
- interval: "monthly"
- commit-message:
- prefix: "build"
- target-branch: "dependabotchanges"
- open-pull-requests-limit: 10
- groups:
- backend-deps:
- patterns:
- - "*"
-
- # 4. Python dependencies – Fabric Scripts
+
+ # 3. Python dependencies – Fabric Scripts
- package-ecosystem: "pip"
directory: "/src/infra/scripts/fabric_scripts"
schedule:
@@ -57,7 +43,7 @@ updates:
patterns:
- "*"
- # 5. Python dependencies – Index Scripts
+ # 4. Python dependencies – Index Scripts
- package-ecosystem: "pip"
directory: "/src/infra/scripts/index_scripts"
schedule:
diff --git a/.github/workflows/test_automation.yml b/.github/workflows/test_automation.yml
new file mode 100644
index 000000000..64be66e1d
--- /dev/null
+++ b/.github/workflows/test_automation.yml
@@ -0,0 +1,111 @@
+name: Test Automation ClientAdvisor
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+
+ paths:
+ - 'tests/e2e-test/**'
+ schedule:
+ - cron: '0 13 * * *' # Runs at 1 PM UTC
+ workflow_dispatch:
+
+env:
+ url: ${{ vars.CLIENT_ADVISOR_URL }}
+ accelerator_name: "Client Advisor"
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.13'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r tests/e2e-test/requirements.txt
+
+ - name: Ensure browsers are installed
+ run: python -m playwright install --with-deps chromium
+
+ - name: Run tests(1)
+ id: test1
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 30 seconds
+ if: ${{ steps.test1.outcome == 'failure' }}
+ run: sleep 30s
+ shell: bash
+
+ - name: Run tests(2)
+ if: ${{ steps.test1.outcome == 'failure' }}
+ id: test2
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+ continue-on-error: true
+
+ - name: Sleep for 60 seconds
+ if: ${{ steps.test2.outcome == 'failure' }}
+ run: sleep 60s
+ shell: bash
+
+ - name: Run tests(3)
+ if: ${{ steps.test2.outcome == 'failure' }}
+ id: test3
+ run: |
+ xvfb-run pytest --headed --html=report/report.html --self-contained-html
+ working-directory: tests/e2e-test
+
+ - name: Upload test report
+ id: upload_report
+ uses: actions/upload-artifact@v4
+ if: ${{ !cancelled() }}
+ with:
+ name: test-report
+ path: tests/e2e-test/report/*
+
+ - name: Send Notification
+ if: always()
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+ REPORT_URL=${{ steps.upload_report.outputs.artifact-url }}
+ IS_SUCCESS=${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
+ # Construct the email body
+ if [ "$IS_SUCCESS" = "true" ]; then
+ EMAIL_BODY=$(cat <Dear Team,
We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has completed successfully.
Run URL: ${RUN_URL}
+
Test Report: ${REPORT_URL}
Best regards,
+ Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Success"
+ }
+ EOF
+ )
+ else
+ EMAIL_BODY=$(cat <Dear Team,We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has encountered an issue and has failed to complete successfully.
Run URL: ${RUN_URL}
+ ${OUTPUT}
Test Report: ${REPORT_URL}
Please investigate the matter at your earliest convenience.
Best regards,
+ Your Automation Team
",
+ "subject": "${{ env.accelerator_name }} Test Automation - Failure"
+ }
+ EOF
+ )
+ fi
+
+ # Send the notification
+ curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA}}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send notification"
diff --git a/docs/CustomizingAzdParameters.md b/docs/CustomizingAzdParameters.md
index c5462592f..49d98701a 100644
--- a/docs/CustomizingAzdParameters.md
+++ b/docs/CustomizingAzdParameters.md
@@ -3,41 +3,35 @@
By default this template will use the environment name as the prefix to prevent naming collisions within Azure. The parameters below show the default values. You only need to run the statements below if you need to change the values.
-> To override any of the parameters, run `azd env set ` before running `azd up`. On the first azd command, it will prompt you for the environment name. Be sure to choose 3-20 characters alphanumeric unique name.
+> To override any of the parameters, run `azd env set ` before running `azd up`. On the first azd command, it will prompt you for the environment name. Be sure to choose 3-20 charaters alphanumeric unique name.
+
+## Parameters
+
+| Name | Type | Default Value | Purpose |
+| -----------------------------| ------- | ------------------- | ---------------------------------------------------------------------------------------------------- |
+| `AZURE_ENV_NAME` | string | `azdtemp` | Used as a prefix for all resource names to ensure uniqueness across environments. |
+| `AZURE_ENV_COSMOS_LOCATION` | string | `eastus2` | Location of the Cosmos DB instance. Choose from (allowed values: `swedencentral`, `australiaeast`). |
+| `AZURE_ENV_MODEL_DEPLOYMENT_TYPE` | string | `GlobalStandard` | Change the Model Deployment Type (allowed values: Standard, GlobalStandard). |
+| `AZURE_ENV_MODEL_NAME` | string | `gpt-4o-mini` | Set the GPT model name (allowed values: `gpt-4o`). |
+| `AZURE_ENV_MODEL_VERSION` | string | `2025-01-01-preview` | Set the Azure OpenAI API version (allowed values: 2024-08-06). |
+| `AZURE_ENV_MODEL_CAPACITY` | integer | `30` | Set the model capacity for GPT deployment. Choose based on your Azure quota and usage needs. |
+| `AZURE_ENV_EMBEDDING_MODEL_NAME` | string | `text-embedding-ada-002` | Set the model name used for embeddings. |
+| `AZURE_ENV_EMBEDDING_MODEL_CAPACITY` | integer | `80` | Set the capacity for embedding model deployment. |
+| `AZURE_ENV_IMAGETAG` | string | `latest` | Set the image tag (allowed values: `latest`, `dev`, `hotfix`). |
+| `AZURE_ENV_OPENAI_LOCATION` | string | `eastus2` | Location of the Azure OpenAI resource. Choose from (allowed values: `swedencentral`, `australiaeast`). |
+| `AZURE_LOCATION` | string | `japaneast` | Sets the Azure region for resource deployment. |
+| `AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID` | string | `` | Reuses an existing Log Analytics Workspace instead of provisioning a new one. |
+
+## How to Set a Parameter
+To customize any of the above values, run the following command **before** `azd up`:
+
+```bash
+azd env set
-
-Change the Secondary Location (example: eastus2, westus2, etc.)
-
-```shell
-azd env set AZURE_ENV_SECONDARY_LOCATION eastus2
-```
-
-Change the Model Deployment Type (allowed values: Standard, GlobalStandard)
-
-```shell
-azd env set AZURE_ENV_MODEL_DEPLOYMENT_TYPE Standard
-```
-
-Set the Model Name (allowed values: gpt-4, gpt-4o)
-
-```shell
-azd env set AZURE_ENV_MODEL_NAME gpt-4o
-```
-
-Change the Model Capacity (choose a number based on available GPT model capacity in your subscription)
-
-```shell
-azd env set AZURE_ENV_MODEL_CAPACITY 30
-```
-
-Change the Embedding Model
-
-```shell
-azd env set AZURE_ENV_EMBEDDING_MODEL_NAME text-embedding-ada-002
```
-Change the Embedding Deployment Capacity (choose a number based on available embedding model capacity in your subscription)
+**Example:**
-```shell
-azd env set AZURE_ENV_EMBEDDING_MODEL_CAPACITY 80
+```bash
+azd env set AZURE_LOCATION westus2
```
diff --git a/docs/DeploymentGuide.md b/docs/DeploymentGuide.md
index 2dd19d964..9fe502c90 100644
--- a/docs/DeploymentGuide.md
+++ b/docs/DeploymentGuide.md
@@ -104,16 +104,24 @@ Consider the following settings during your deployment to modify specific settin
When you start the deployment, most parameters will have **default values**, but you can update the below settings by following the steps [here](CustomizingAzdParameters.md):
-| **Setting** | **Description** | **Default value** |
-|------------|----------------| ------------|
-| **Azure OpenAI Location** | The region where OpenAI deploys | eastus2 |
-| **Environment Name** | A **3-20 character alphanumeric value** used to generate a unique ID to prefix the resources. | byocatemplate |
-| **Cosmos Location** | A **less busy** region for **CosmosDB**, useful in case of availability constraints. | eastus2 |
-| **Deployment Type** | Select from a drop-down list. | Global Standard |
-| **GPT Model** | OpenAI GPT model | gpt-4o-mini |
-| **GPT Model Deployment Capacity** | Configure capacity for **GPT models**. | 30k |
-| **Embedding Model** | OpenAI embedding model | text-embedding-ada-002 |
-| **Embedding Model Capacity** | Set the capacity for **embedding models**. | 80k |
+
+| **Setting** | **Description** | **Default value** |
+| ------------------------------------ | -------------------------------------------------------------------------------------------------- | ------------------------ |
+| **Azure OpenAI Location** | The region where Azure OpenAI deploys. Choose from `swedencentral`, `australiaeast`, etc. | `eastus2` |
+| **Environment Name** | A **3-20 character alphanumeric value** used to generate a unique ID to prefix the resources. | `azdtemp` |
+| **Cosmos Location** | A **less busy** region for **CosmosDB**, useful in case of availability constraints. | `eastus2` |
+| **Deployment Type** | Select from a drop-down list (`Standard`, `GlobalStandard`). | `GlobalStandard` |
+| **GPT Model** | Azure OpenAI GPT model to deploy. | `gpt-4o-mini` |
+| **GPT Model Deployment Capacity** | Configure capacity for **GPT models**. Choose based on Azure OpenAI quota. | `30` |
+| **Embedding Model** | OpenAI embedding model used for vector similarity. | `text-embedding-ada-002` |
+| **Embedding Model Capacity** | Set the capacity for **embedding models**. Choose based on usage and quota. | `80` |
+| **Image Tag** | The version of the Docker image to use (e.g., `latest`, `dev`, `hotfix`). | `latest` |
+| **Azure OpenAI API Version** | Set the API version for OpenAI model deployments. | `2025-01-01-preview` |
+| **AZURE\_LOCATION** | Sets the Azure region for resource deployment. | `japaneast` |
+| **Existing Log Analytics Workspace** | To reuse an existing Log Analytics Workspace ID instead of creating a new one. | *(empty)* |
+
+
+
diff --git a/docs/FabricDeployment.md b/docs/FabricDeployment.md
index 5e95f4b5f..4d85b2d82 100644
--- a/docs/FabricDeployment.md
+++ b/docs/FabricDeployment.md
@@ -1,5 +1,8 @@
## Fabric Deployment
-## Step 1: Create Fabric workspace
+## Step 1: Create or Use an Existing Microsoft Fabric Workspace
+
+ℹ️ Note: If you already have an existing Microsoft Fabric Workspace, you can **skip this step** and proceed to Step 2. To retrieve an existing Workspace ID, check **Point 5 below**.
+
1. Navigate to ([Fabric Workspace](https://app.fabric.microsoft.com/))
2. Click on Workspaces from left Navigation
3. Click on + New Workspace
@@ -19,7 +22,7 @@
- ```cd ./Build-your-own-copilot-Solution-Accelerator/infra/scripts/fabric_scripts```
- ```sh ./run_fabric_items_scripts.sh keyvault_param workspaceid_param solutionprefix_param```
1. keyvault_param - the name of the keyvault that was created in Step 1
- 2. workspaceid_param - the workspaceid created in Step 2
+ 2. workspaceid_param - Existing Workspaceid or workspaceid created in Step 2
3. solutionprefix_param - prefix used to append to lakehouse upon creation
4. Get Fabric Lakehouse connection details:
5. Once deployment is complete, navigate to Fabric Workspace
diff --git a/docs/LocalSetupAndDeploy.md b/docs/LocalSetupAndDeploy.md
index ca09606fc..6b7547e3e 100644
--- a/docs/LocalSetupAndDeploy.md
+++ b/docs/LocalSetupAndDeploy.md
@@ -40,9 +40,11 @@ Follow these steps to deploy the application to Azure App Service:
If this is your first time deploying the app, use the `az webapp up` command. Run the following commands from the `App` folder, replacing the placeholders with your desired values:
```sh
-az webapp up --runtime PYTHON:3.11 --sku B1 --name --resource-group --location --subscription
+az webapp up --runtime PYTHON:3.11 --sku B1 --name --resource-group --location --subscription
-az webapp config set --startup-file "python3 -m gunicorn app:app" --name --resource-group
+az webapp config set --startup-file "python3 -m uvicorn app:app --host 0.0.0.0 --port 8000" --name --resource-group
+
+az webapp config appsettings set --resource-group --name --settings WEBSITES_PORT=8000
```
Next, configure the required environment variables in the deployed app to ensure it functions correctly.
@@ -83,7 +85,7 @@ az webapp up \
--resource-group
az webapp config set \
- --startup-file "python3 -m gunicorn app:app" \
+ --startup-file "python3 -m uvicorn app:app --host 0.0.0.0 --port 8000" \
--name --resource-group
```
diff --git a/infra/deploy_ai_foundry.bicep b/infra/deploy_ai_foundry.bicep
index d6a8c611b..4ba89548e 100644
--- a/infra/deploy_ai_foundry.bicep
+++ b/infra/deploy_ai_foundry.bicep
@@ -9,6 +9,7 @@ param gptDeploymentCapacity int
param embeddingModel string
param embeddingDeploymentCapacity int
param managedIdentityObjectId string
+param existingLogAnalyticsWorkspaceId string = ''
// Load the abbrevations file required to name the azure resources.
var abbrs = loadJsonContent('./abbreviations.json')
@@ -54,7 +55,17 @@ resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = {
name: keyVaultName
}
-resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2023-09-01' = {
+var useExisting = !empty(existingLogAnalyticsWorkspaceId)
+var existingLawSubscription = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[2] : ''
+var existingLawResourceGroup = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[4] : ''
+var existingLawName = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[8] : ''
+
+resource existingLogAnalyticsWorkspace 'Microsoft.OperationalInsights/workspaces@2023-09-01' existing = if (useExisting) {
+ name: existingLawName
+ scope: resourceGroup(existingLawSubscription, existingLawResourceGroup)
+}
+
+resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2023-09-01' = if (!useExisting) {
name: workspaceName
location: location
tags: {}
@@ -93,7 +104,7 @@ resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = {
Application_Type: 'web'
publicNetworkAccessForIngestion: 'Enabled'
publicNetworkAccessForQuery: 'Enabled'
- WorkspaceResourceId: logAnalytics.id
+ WorkspaceResourceId: useExisting ? existingLogAnalyticsWorkspace.id : logAnalytics.id
}
}
@@ -490,5 +501,10 @@ output aiSearchService string = aiSearch.name
output aiProjectName string = aiHubProject.name
output applicationInsightsId string = applicationInsights.id
-output logAnalyticsWorkspaceResourceName string = logAnalytics.name
+output logAnalyticsWorkspaceResourceName string = useExisting ? existingLogAnalyticsWorkspace.name : logAnalytics.name
+output logAnalyticsWorkspaceResourceGroup string = useExisting ? existingLawResourceGroup : resourceGroup().name
+
+
output storageAccountName string = storageNameCleaned
+output applicationInsightsConnectionString string = applicationInsights.properties.ConnectionString
+
diff --git a/infra/deploy_app_service.bicep b/infra/deploy_app_service.bicep
index 3d30f5291..d06cf2f74 100644
--- a/infra/deploy_app_service.bicep
+++ b/infra/deploy_app_service.bicep
@@ -32,9 +32,6 @@ param AzureSearchUseSemanticSearch string = 'False'
@description('Semantic search config')
param AzureSearchSemanticSearchConfig string = 'default'
-@description('Is the index prechunked')
-param AzureSearchIndexIsPrechunked string = 'False'
-
@description('Top K results')
param AzureSearchTopK string = '5'
@@ -59,9 +56,6 @@ param AzureOpenAIResource string
@description('Azure OpenAI Model Deployment Name')
param AzureOpenAIModel string
-@description('Azure OpenAI Model Name')
-param AzureOpenAIModelName string = 'gpt-4o-mini'
-
@description('Azure Open AI Endpoint')
param AzureOpenAIEndpoint string = ''
@@ -116,15 +110,9 @@ param AzureOpenAIEmbeddingkey string = ''
@description('Azure Open AI Embedding Endpoint')
param AzureOpenAIEmbeddingEndpoint string = ''
-@description('Enable chat history by deploying a Cosmos DB instance')
-param WebAppEnableChatHistory string = 'False'
-
@description('Use Azure Function')
param USE_INTERNAL_STREAM string = 'True'
-@description('Azure Function Endpoint')
-param STREAMING_AZUREFUNCTION_ENDPOINT string = ''
-
@description('SQL Database Server Name')
param SQLDB_SERVER string = ''
@@ -163,8 +151,6 @@ param userassignedIdentityId string
param userassignedIdentityClientId string
param applicationInsightsId string
-@secure()
-param azureSearchAdminKey string
param azureSearchServiceEndpoint string
@description('Azure Function App SQL System Prompt')
@@ -178,6 +164,7 @@ param streamTextSystemPrompt string
param aiProjectConnectionString string
param useAIProjectClientFlag string = 'false'
param aiProjectName string
+param applicationInsightsConnectionString string
// var WebAppImageName = 'DOCKER|byoaiacontainer.azurecr.io/byoaia-app:latest'
@@ -215,6 +202,10 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'APPINSIGHTS_INSTRUMENTATIONKEY'
value: reference(applicationInsightsId, '2015-05-01').InstrumentationKey
}
+ {
+ name: 'APPLICATIONINSIGHTS_CONNECTION_STRING'
+ value: applicationInsightsConnectionString
+ }
{
name: 'AZURE_SEARCH_SERVICE'
value: AzureSearchService
@@ -235,10 +226,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG'
value: AzureSearchSemanticSearchConfig
}
- {
- name: 'AZURE_SEARCH_INDEX_IS_PRECHUNKED'
- value: AzureSearchIndexIsPrechunked
- }
{
name: 'AZURE_SEARCH_TOP_K'
value: AzureSearchTopK
@@ -279,10 +266,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'AZURE_OPENAI_KEY'
value: AzureOpenAIKey
}
- {
- name: 'AZURE_OPENAI_MODEL_NAME'
- value: AzureOpenAIModelName
- }
{
name: 'AZURE_OPENAI_TEMPERATURE'
value: AzureOpenAITemperature
@@ -341,11 +324,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
name: 'AZURE_OPENAI_EMBEDDING_ENDPOINT'
value: AzureOpenAIEmbeddingEndpoint
}
-
- {
- name: 'WEB_APP_ENABLE_CHAT_HISTORY'
- value: WebAppEnableChatHistory
- }
{name: 'SQLDB_SERVER'
value: SQLDB_SERVER
@@ -367,10 +345,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
value: USE_INTERNAL_STREAM
}
- {name: 'STREAMING_AZUREFUNCTION_ENDPOINT'
- value: STREAMING_AZUREFUNCTION_ENDPOINT
- }
-
{name: 'AZURE_COSMOSDB_ACCOUNT'
value: AZURE_COSMOSDB_ACCOUNT
}
@@ -386,30 +360,10 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = {
//{name: 'VITE_POWERBI_EMBED_URL'
// value: VITE_POWERBI_EMBED_URL
//}
- {
- name: 'SCM_DO_BUILD_DURING_DEPLOYMENT'
- value: 'true'
- }
- {
- name: 'UWSGI_PROCESSES'
- value: '2'
- }
- {
- name: 'UWSGI_THREADS'
- value: '2'
- }
{
name: 'SQLDB_USER_MID'
value: userassignedIdentityClientId
}
- {
- name: 'OPENAI_API_VERSION'
- value: AzureOpenAIApiVersion
- }
- {
- name: 'AZURE_AI_SEARCH_API_KEY'
- value: azureSearchAdminKey
- }
{
name: 'AZURE_AI_SEARCH_ENDPOINT'
value: azureSearchServiceEndpoint
diff --git a/infra/main.bicep b/infra/main.bicep
index 3e286e79f..a11faf2cc 100644
--- a/infra/main.bicep
+++ b/infra/main.bicep
@@ -6,6 +6,9 @@ targetScope = 'resourceGroup'
@description('A unique prefix for all resources in this deployment. This should be 3-20 characters long:')
param environmentName string
+@description('Optional: Existing Log Analytics Workspace Resource ID')
+param existingLogAnalyticsWorkspaceId string = ''
+
@description('CosmosDB Location')
param cosmosLocation string
@@ -140,6 +143,7 @@ module aifoundry 'deploy_ai_foundry.bicep' = {
embeddingModel: embeddingModel
embeddingDeploymentCapacity: embeddingDeploymentCapacity
managedIdentityObjectId:managedIdentityModule.outputs.managedIdentityOutput.objectId
+ existingLogAnalyticsWorkspaceId: existingLogAnalyticsWorkspaceId
}
scope: resourceGroup(resourceGroup().name)
}
@@ -200,17 +204,15 @@ module appserviceModule 'deploy_app_service.bicep' = {
AzureSearchKey:keyVault.getSecret('AZURE-SEARCH-KEY')
AzureSearchUseSemanticSearch:'True'
AzureSearchSemanticSearchConfig:'my-semantic-config'
- AzureSearchIndexIsPrechunked:'False'
AzureSearchTopK:'5'
AzureSearchContentColumns:'content'
AzureSearchFilenameColumn:'chunk_id'
AzureSearchTitleColumn:'client_id'
AzureSearchUrlColumn:'sourceurl'
- AzureOpenAIResource:aifoundry.outputs.aiServicesTarget
+ AzureOpenAIResource:aifoundry.outputs.aiServicesName
AzureOpenAIEndpoint:aifoundry.outputs.aiServicesTarget
AzureOpenAIModel:gptModelName
AzureOpenAIKey:keyVault.getSecret('AZURE-OPENAI-KEY')
- AzureOpenAIModelName:gptModelName
AzureOpenAITemperature:'0'
AzureOpenAITopP:'1'
AzureOpenAIMaxTokens:'1000'
@@ -239,13 +241,13 @@ module appserviceModule 'deploy_app_service.bicep' = {
userassignedIdentityClientId:managedIdentityModule.outputs.managedIdentityWebAppOutput.clientId
userassignedIdentityId:managedIdentityModule.outputs.managedIdentityWebAppOutput.id
applicationInsightsId: aifoundry.outputs.applicationInsightsId
- azureSearchAdminKey:keyVault.getSecret('AZURE-SEARCH-KEY')
azureSearchServiceEndpoint:aifoundry.outputs.aiSearchTarget
sqlSystemPrompt: functionAppSqlPrompt
callTranscriptSystemPrompt: functionAppCallTranscriptSystemPrompt
streamTextSystemPrompt: functionAppStreamTextSystemPrompt
aiProjectConnectionString:keyVault.getSecret('AZURE-AI-PROJECT-CONN-STRING')
aiProjectName:aifoundry.outputs.aiProjectName
+ applicationInsightsConnectionString:aifoundry.outputs.applicationInsightsConnectionString
}
scope: resourceGroup(resourceGroup().name)
}
diff --git a/infra/main.bicepparam b/infra/main.bicepparam
index 42c04971b..d61275246 100644
--- a/infra/main.bicepparam
+++ b/infra/main.bicepparam
@@ -4,8 +4,11 @@ param environmentName = readEnvironmentVariable('AZURE_ENV_NAME', 'byocatemplate
param cosmosLocation = readEnvironmentVariable('AZURE_ENV_COSMOS_LOCATION', 'eastus2')
param deploymentType = readEnvironmentVariable('AZURE_ENV_MODEL_DEPLOYMENT_TYPE', 'GlobalStandard')
param gptModelName = readEnvironmentVariable('AZURE_ENV_MODEL_NAME', 'gpt-4o-mini')
+param azureOpenaiAPIVersion = readEnvironmentVariable('AZURE_ENV_MODEL_VERSION', '2025-01-01-preview')
param gptDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_MODEL_CAPACITY', '30'))
-
+param embeddingModel = readEnvironmentVariable('AZURE_ENV_EMBEDDING_MODEL_NAME', 'text-embedding-ada-002')
param embeddingDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_EMBEDDING_MODEL_CAPACITY', '80'))
+param imageTag = readEnvironmentVariable('AZURE_ENV_IMAGETAG', 'latest')
param AzureOpenAILocation = readEnvironmentVariable('AZURE_ENV_OPENAI_LOCATION', 'eastus2')
param AZURE_LOCATION = readEnvironmentVariable('AZURE_LOCATION', '')
+param existingLogAnalyticsWorkspaceId = readEnvironmentVariable('AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID', '')
diff --git a/infra/main.json b/infra/main.json
index aaeb0dff4..fee4c39e0 100644
--- a/infra/main.json
+++ b/infra/main.json
@@ -5,7 +5,7 @@
"_generator": {
"name": "bicep",
"version": "0.35.1.17967",
- "templateHash": "12824324392196719415"
+ "templateHash": "10579732773480527563"
}
},
"parameters": {
@@ -339,9 +339,9 @@
"uniqueId": "[toLower(uniqueString(parameters('environmentName'), subscription().id, variables('solutionLocation')))]",
"solutionPrefix": "[format('ca{0}', padLeft(take(variables('uniqueId'), 12), 12, '0'))]",
"abbrs": "[variables('$fxv#0')]",
- "functionAppSqlPrompt": "Generate a valid T-SQL query to find {query} for tables and columns provided below:\r\n 1. Table: Clients\r\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\r\n 2. Table: InvestmentGoals\r\n Columns: ClientId, InvestmentGoal\r\n 3. Table: Assets\r\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\r\n 4. Table: ClientSummaries\r\n Columns: ClientId, ClientSummary\r\n 5. Table: InvestmentGoalsDetails\r\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\r\n 6. Table: Retirement\r\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\r\n 7. Table: ClientMeetings\r\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\r\n Always use the Investment column from the Assets table as the value.\r\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\r\n Do not use client name in filters.\r\n Do not include assets values unless asked for.\r\n ALWAYS use ClientId = {clientid} in the query filter.\r\n ALWAYS select Client Name (Column: Client) in the query.\r\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\r\n Only return the generated SQL query. Do not return anything else.",
- "functionAppCallTranscriptSystemPrompt": "You are an assistant who supports wealth advisors in preparing for client meetings. \r\n You have access to the client’s past meeting call transcripts. \r\n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \r\n If no data is available, state 'No relevant data found for previous meetings.",
- "functionAppStreamTextSystemPrompt": "You are a helpful assistant to a Wealth Advisor. \r\n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\r\n If no name is provided, assume the question is about '{SelectedClientName}'.\r\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\r\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \r\n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response."
+ "functionAppSqlPrompt": "Generate a valid T-SQL query to find {query} for tables and columns provided below:\n 1. Table: Clients\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\n 2. Table: InvestmentGoals\n Columns: ClientId, InvestmentGoal\n 3. Table: Assets\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\n 4. Table: ClientSummaries\n Columns: ClientId, ClientSummary\n 5. Table: InvestmentGoalsDetails\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\n 6. Table: Retirement\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\n 7. Table: ClientMeetings\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\n Always use the Investment column from the Assets table as the value.\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\n Do not use client name in filters.\n Do not include assets values unless asked for.\n ALWAYS use ClientId = {clientid} in the query filter.\n ALWAYS select Client Name (Column: Client) in the query.\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\n Only return the generated SQL query. Do not return anything else.",
+ "functionAppCallTranscriptSystemPrompt": "You are an assistant who supports wealth advisors in preparing for client meetings. \n You have access to the client’s past meeting call transcripts. \n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \n If no data is available, state 'No relevant data found for previous meetings.",
+ "functionAppStreamTextSystemPrompt": "You are a helpful assistant to a Wealth Advisor. \n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\n If no name is provided, assume the question is about '{SelectedClientName}'.\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response."
},
"resources": [
{
@@ -708,7 +708,7 @@
"_generator": {
"name": "bicep",
"version": "0.35.1.17967",
- "templateHash": "15504864984003912125"
+ "templateHash": "16963364971780216238"
}
},
"parameters": {
@@ -1492,6 +1492,10 @@
"storageAccountName": {
"type": "string",
"value": "[variables('storageNameCleaned')]"
+ },
+ "applicationInsightsConnectionString": {
+ "type": "string",
+ "value": "[reference(resourceId('Microsoft.Insights/components', variables('applicationInsightsName')), '2020-02-02').ConnectionString]"
}
}
}
@@ -2135,9 +2139,6 @@
"AzureSearchSemanticSearchConfig": {
"value": "my-semantic-config"
},
- "AzureSearchIndexIsPrechunked": {
- "value": "False"
- },
"AzureSearchTopK": {
"value": "5"
},
@@ -2154,7 +2155,7 @@
"value": "sourceurl"
},
"AzureOpenAIResource": {
- "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesTarget.value]"
+ "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesName.value]"
},
"AzureOpenAIEndpoint": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesTarget.value]"
@@ -2170,9 +2171,6 @@
"secretName": "AZURE-OPENAI-KEY"
}
},
- "AzureOpenAIModelName": {
- "value": "[parameters('gptModelName')]"
- },
"AzureOpenAITemperature": {
"value": "0"
},
@@ -2264,14 +2262,6 @@
"applicationInsightsId": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.applicationInsightsId.value]"
},
- "azureSearchAdminKey": {
- "reference": {
- "keyVault": {
- "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.KeyVault/vaults', reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.keyvaultName.value)]"
- },
- "secretName": "AZURE-SEARCH-KEY"
- }
- },
"azureSearchServiceEndpoint": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiSearchTarget.value]"
},
@@ -2294,6 +2284,9 @@
},
"aiProjectName": {
"value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiProjectName.value]"
+ },
+ "applicationInsightsConnectionString": {
+ "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.applicationInsightsConnectionString.value]"
}
},
"template": {
@@ -2303,7 +2296,7 @@
"_generator": {
"name": "bicep",
"version": "0.35.1.17967",
- "templateHash": "9862570739171059712"
+ "templateHash": "18358947382114771550"
}
},
"parameters": {
@@ -2376,13 +2369,6 @@
"description": "Semantic search config"
}
},
- "AzureSearchIndexIsPrechunked": {
- "type": "string",
- "defaultValue": "False",
- "metadata": {
- "description": "Is the index prechunked"
- }
- },
"AzureSearchTopK": {
"type": "string",
"defaultValue": "5",
@@ -2437,13 +2423,6 @@
"description": "Azure OpenAI Model Deployment Name"
}
},
- "AzureOpenAIModelName": {
- "type": "string",
- "defaultValue": "gpt-4o-mini",
- "metadata": {
- "description": "Azure OpenAI Model Name"
- }
- },
"AzureOpenAIEndpoint": {
"type": "string",
"defaultValue": "",
@@ -2569,13 +2548,6 @@
"description": "Azure Open AI Embedding Endpoint"
}
},
- "WebAppEnableChatHistory": {
- "type": "string",
- "defaultValue": "False",
- "metadata": {
- "description": "Enable chat history by deploying a Cosmos DB instance"
- }
- },
"USE_INTERNAL_STREAM": {
"type": "string",
"defaultValue": "True",
@@ -2583,13 +2555,6 @@
"description": "Use Azure Function"
}
},
- "STREAMING_AZUREFUNCTION_ENDPOINT": {
- "type": "string",
- "defaultValue": "",
- "metadata": {
- "description": "Azure Function Endpoint"
- }
- },
"SQLDB_SERVER": {
"type": "string",
"defaultValue": "",
@@ -2658,9 +2623,6 @@
"applicationInsightsId": {
"type": "string"
},
- "azureSearchAdminKey": {
- "type": "securestring"
- },
"azureSearchServiceEndpoint": {
"type": "string"
},
@@ -2691,6 +2653,9 @@
},
"aiProjectName": {
"type": "string"
+ },
+ "applicationInsightsConnectionString": {
+ "type": "string"
}
},
"variables": {
@@ -2730,6 +2695,10 @@
"name": "APPINSIGHTS_INSTRUMENTATIONKEY",
"value": "[reference(parameters('applicationInsightsId'), '2015-05-01').InstrumentationKey]"
},
+ {
+ "name": "APPLICATIONINSIGHTS_CONNECTION_STRING",
+ "value": "[parameters('applicationInsightsConnectionString')]"
+ },
{
"name": "AZURE_SEARCH_SERVICE",
"value": "[parameters('AzureSearchService')]"
@@ -2750,10 +2719,6 @@
"name": "AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG",
"value": "[parameters('AzureSearchSemanticSearchConfig')]"
},
- {
- "name": "AZURE_SEARCH_INDEX_IS_PRECHUNKED",
- "value": "[parameters('AzureSearchIndexIsPrechunked')]"
- },
{
"name": "AZURE_SEARCH_TOP_K",
"value": "[parameters('AzureSearchTopK')]"
@@ -2794,10 +2759,6 @@
"name": "AZURE_OPENAI_KEY",
"value": "[parameters('AzureOpenAIKey')]"
},
- {
- "name": "AZURE_OPENAI_MODEL_NAME",
- "value": "[parameters('AzureOpenAIModelName')]"
- },
{
"name": "AZURE_OPENAI_TEMPERATURE",
"value": "[parameters('AzureOpenAITemperature')]"
@@ -2854,10 +2815,6 @@
"name": "AZURE_OPENAI_EMBEDDING_ENDPOINT",
"value": "[parameters('AzureOpenAIEmbeddingEndpoint')]"
},
- {
- "name": "WEB_APP_ENABLE_CHAT_HISTORY",
- "value": "[parameters('WebAppEnableChatHistory')]"
- },
{
"name": "SQLDB_SERVER",
"value": "[parameters('SQLDB_SERVER')]"
@@ -2878,10 +2835,6 @@
"name": "USE_INTERNAL_STREAM",
"value": "[parameters('USE_INTERNAL_STREAM')]"
},
- {
- "name": "STREAMING_AZUREFUNCTION_ENDPOINT",
- "value": "[parameters('STREAMING_AZUREFUNCTION_ENDPOINT')]"
- },
{
"name": "AZURE_COSMOSDB_ACCOUNT",
"value": "[parameters('AZURE_COSMOSDB_ACCOUNT')]"
@@ -2898,30 +2851,10 @@
"name": "AZURE_COSMOSDB_ENABLE_FEEDBACK",
"value": "[parameters('AZURE_COSMOSDB_ENABLE_FEEDBACK')]"
},
- {
- "name": "SCM_DO_BUILD_DURING_DEPLOYMENT",
- "value": "true"
- },
- {
- "name": "UWSGI_PROCESSES",
- "value": "2"
- },
- {
- "name": "UWSGI_THREADS",
- "value": "2"
- },
{
"name": "SQLDB_USER_MID",
"value": "[parameters('userassignedIdentityClientId')]"
},
- {
- "name": "OPENAI_API_VERSION",
- "value": "[parameters('AzureOpenAIApiVersion')]"
- },
- {
- "name": "AZURE_AI_SEARCH_API_KEY",
- "value": "[parameters('azureSearchAdminKey')]"
- },
{
"name": "AZURE_AI_SEARCH_ENDPOINT",
"value": "[parameters('azureSearchServiceEndpoint')]"
diff --git a/src/App/.env.sample b/src/App/.env.sample
index 50f33c7e3..7dc66e86e 100644
--- a/src/App/.env.sample
+++ b/src/App/.env.sample
@@ -1,28 +1,19 @@
-# Chat
-DEBUG=True
+# Azure OpenAI settings
AZURE_OPENAI_RESOURCE=
-AZURE_OPENAI_MODEL=gpt-35-turbo-16k
+AZURE_OPENAI_MODEL="gpt-4o-mini"
AZURE_OPENAI_KEY=
-AZURE_OPENAI_MODEL_NAME=gpt-35-turbo-16k
-AZURE_OPENAI_TEMPERATURE=0
-AZURE_OPENAI_TOP_P=1.0
-AZURE_OPENAI_MAX_TOKENS=1000
+AZURE_OPENAI_TEMPERATURE="0"
+AZURE_OPENAI_TOP_P="1"
+AZURE_OPENAI_MAX_TOKENS="1000"
AZURE_OPENAI_STOP_SEQUENCE=
-AZURE_OPENAI_SEED=
-AZURE_OPENAI_CHOICES_COUNT=1
-AZURE_OPENAI_PRESENCE_PENALTY=0.0
-AZURE_OPENAI_FREQUENCY_PENALTY=0.0
-AZURE_OPENAI_LOGIT_BIAS=
-AZURE_OPENAI_USER=
-AZURE_OPENAI_TOOLS=
-AZURE_OPENAI_TOOL_CHOICE=
-AZURE_OPENAI_SYSTEM_MESSAGE=You are an AI assistant that helps people find information.
-AZURE_OPENAI_PREVIEW_API_VERSION=2024-05-01-preview
-AZURE_OPENAI_STREAM=True
+AZURE_OPENAI_SYSTEM_MESSAGE="You are a helpful Wealth Advisor assistant"
+AZURE_OPENAI_PREVIEW_API_VERSION="2025-01-01-preview"
+AZURE_OPENAI_STREAM="True"
AZURE_OPENAI_ENDPOINT=
-AZURE_OPENAI_EMBEDDING_NAME=text-embedding-ada-002
+AZURE_OPENAI_EMBEDDING_NAME="text-embedding-ada-002"
AZURE_OPENAI_EMBEDDING_ENDPOINT=
AZURE_OPENAI_EMBEDDING_KEY=
+
# User Interface
UI_TITLE=
UI_LOGO=
@@ -30,100 +21,49 @@ UI_CHAT_LOGO=
UI_CHAT_TITLE=
UI_CHAT_DESCRIPTION=
UI_FAVICON=
-# Chat history
+
+# Cosmos DB settings
AZURE_COSMOSDB_ACCOUNT=
-AZURE_COSMOSDB_DATABASE=db_conversation_history
-AZURE_COSMOSDB_CONVERSATIONS_CONTAINER=conversations
-AZURE_COSMOSDB_ACCOUNT_KEY=
-AZURE_COSMOSDB_ENABLE_FEEDBACK=True
-# Chat with data: common settings
-SEARCH_TOP_K=5
-SEARCH_STRICTNESS=3
-SEARCH_ENABLE_IN_DOMAIN=True
-# Chat with data: Azure AI Search
+AZURE_COSMOSDB_DATABASE="db_conversation_history"
+AZURE_COSMOSDB_CONVERSATIONS_CONTAINER="conversations"
+AZURE_COSMOSDB_ENABLE_FEEDBACK="True"
+
+# Azure Search settings
AZURE_SEARCH_SERVICE=
-AZURE_SEARCH_INDEX=
+AZURE_SEARCH_INDEX="transcripts_index"
AZURE_SEARCH_KEY=
-AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG=
-AZURE_SEARCH_INDEX_IS_PRECHUNKED=False
-AZURE_SEARCH_TOP_K=5
-AZURE_SEARCH_ENABLE_IN_DOMAIN=True
-AZURE_SEARCH_CONTENT_COLUMNS=content
-AZURE_SEARCH_FILENAME_COLUMN=sourceurl
-AZURE_SEARCH_TITLE_COLUMN=client_id
-AZURE_SEARCH_URL_COLUMN=sourceurl
-AZURE_SEARCH_VECTOR_COLUMNS=
-AZURE_SEARCH_QUERY_TYPE=simple
+AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG="my-semantic-config"
+AZURE_SEARCH_TOP_K="5"
+AZURE_SEARCH_ENABLE_IN_DOMAIN="False"
+AZURE_SEARCH_CONTENT_COLUMNS="content"
+AZURE_SEARCH_FILENAME_COLUMN="chunk_id"
+AZURE_SEARCH_TITLE_COLUMN="client_id"
+AZURE_SEARCH_URL_COLUMN="sourceurl"
+AZURE_SEARCH_VECTOR_COLUMNS="contentVector"
+AZURE_SEARCH_QUERY_TYPE="simple"
AZURE_SEARCH_PERMITTED_GROUPS_COLUMN=
-AZURE_SEARCH_STRICTNESS=3
-# Chat with data: Azure CosmosDB Mongo VCore
-AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING=
-AZURE_COSMOSDB_MONGO_VCORE_DATABASE=
-AZURE_COSMOSDB_MONGO_VCORE_CONTAINER=
-AZURE_COSMOSDB_MONGO_VCORE_INDEX=
-AZURE_COSMOSDB_MONGO_VCORE_INDEX=
-AZURE_COSMOSDB_MONGO_VCORE_TOP_K=
-AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS=
-AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN=
-AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS=
-AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN=
-AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN=
-AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN=
-AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS=
-# Chat with data: Elasticsearch
-ELASTICSEARCH_ENDPOINT=
-ELASTICSEARCH_ENCODED_API_KEY=
-ELASTICSEARCH_INDEX=
-ELASTICSEARCH_QUERY_TYPE=
-ELASTICSEARCH_TOP_K=
-ELASTICSEARCH_ENABLE_IN_DOMAIN=
-ELASTICSEARCH_CONTENT_COLUMNS=
-ELASTICSEARCH_FILENAME_COLUMN=
-ELASTICSEARCH_TITLE_COLUMN=
-ELASTICSEARCH_URL_COLUMN=
-ELASTICSEARCH_VECTOR_COLUMNS=
-ELASTICSEARCH_STRICTNESS=
-ELASTICSEARCH_EMBEDDING_MODEL_ID=
-# Chat with data: Pinecone
-PINECONE_ENVIRONMENT=
-PINECONE_API_KEY=
-PINECONE_INDEX_NAME=
-PINECONE_TOP_K=
-PINECONE_STRICTNESS=
-PINECONE_ENABLE_IN_DOMAIN=
-PINECONE_CONTENT_COLUMNS=
-PINECONE_FILENAME_COLUMN=
-PINECONE_TITLE_COLUMN=
-PINECONE_URL_COLUMN=
-PINECONE_VECTOR_COLUMNS=
-# Chat with data: Azure Machine Learning MLIndex
-AZURE_MLINDEX_NAME=
-AZURE_MLINDEX_VERSION=
-AZURE_ML_PROJECT_RESOURCE_ID=
-AZURE_MLINDEX_TOP_K=
-AZURE_MLINDEX_STRICTNESS=
-AZURE_MLINDEX_ENABLE_IN_DOMAIN=
-AZURE_MLINDEX_CONTENT_COLUMNS=
-AZURE_MLINDEX_FILENAME_COLUMN=
-AZURE_MLINDEX_TITLE_COLUMN=
-AZURE_MLINDEX_URL_COLUMN=
-AZURE_MLINDEX_VECTOR_COLUMNS=
-AZURE_MLINDEX_QUERY_TYPE=
-# Chat with data: Prompt flow API
-USE_PROMPTFLOW=False
-PROMPTFLOW_ENDPOINT=
-PROMPTFLOW_API_KEY=
-PROMPTFLOW_RESPONSE_TIMEOUT=120
-PROMPTFLOW_REQUEST_FIELD_NAME=query
-PROMPTFLOW_RESPONSE_FIELD_NAME=reply
-PROMPTFLOW_CITATIONS_FIELD_NAME=documents
-STREAMING_AZUREFUNCTION_ENDPOINT=
-USE_AZUREFUNCTION=True
-SQL_CONNECTION=
+AZURE_SEARCH_STRICTNESS="3"
+AZURE_SEARCH_USE_SEMANTIC_SEARCH="True"
+AZURE_AI_SEARCH_ENDPOINT=
+
+# Azure SQL settings
SQLDB_CONNECTION_STRING=
SQLDB_SERVER=
SQLDB_DATABASE=
SQLDB_USERNAME=
SQLDB_PASSWORD=
-SQLDB_DRIVER=
-VITE_POWERBI_EMBED_URL=
\ No newline at end of file
+SQLDB_USER_MID=
+
+# AI Project
+AZURE_AI_PROJECT_CONN_STRING=
+USE_AI_PROJECT_CLIENT="false"
+
+# Prompts
+AZURE_CALL_TRANSCRIPT_SYSTEM_PROMPT="You are an assistant who supports wealth advisors in preparing for client meetings. \n You have access to the client’s past meeting call transcripts. \n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \n If no data is available, state 'No relevant data found for previous meetings."
+AZURE_OPENAI_STREAM_TEXT_SYSTEM_PROMPT="You are a helpful assistant to a Wealth Advisor. \n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\n If no name is provided, assume the question is about '{SelectedClientName}'.\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response."
+AZURE_SQL_SYSTEM_PROMPT="Generate a valid T-SQL query to find {query} for tables and columns provided below:\n 1. Table: Clients\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\n 2. Table: InvestmentGoals\n Columns: ClientId, InvestmentGoal\n 3. Table: Assets\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\n 4. Table: ClientSummaries\n Columns: ClientId, ClientSummary\n 5. Table: InvestmentGoalsDetails\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\n 6. Table: Retirement\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\n 7. Table: ClientMeetings\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\n Always use the Investment column from the Assets table as the value.\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\n Do not use client name in filters.\n Do not include assets values unless asked for.\n ALWAYS use ClientId = {clientid} in the query filter.\n ALWAYS select Client Name (Column: Client) in the query.\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\n Only return the generated SQL query. Do not return anything else."
+
+# Misc
+APPINSIGHTS_INSTRUMENTATIONKEY=
+AUTH_ENABLED="false"
+USE_INTERNAL_STREAM="True"
\ No newline at end of file
diff --git a/src/App/WebApp.Dockerfile b/src/App/WebApp.Dockerfile
index f54e2e30c..48bcd5ff5 100644
--- a/src/App/WebApp.Dockerfile
+++ b/src/App/WebApp.Dockerfile
@@ -36,4 +36,4 @@ COPY --from=frontend /home/node/app/static /usr/src/app/static/
WORKDIR /usr/src/app
EXPOSE 80
-CMD ["gunicorn", "-b", "0.0.0.0:80", "app:app"]
+CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4", "--log-level", "info", "--access-log"]
diff --git a/src/App/app.py b/src/App/app.py
index 411829551..b1559eb25 100644
--- a/src/App/app.py
+++ b/src/App/app.py
@@ -6,8 +6,6 @@
import uuid
from types import SimpleNamespace
-import httpx
-import requests
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
from dotenv import load_dotenv
@@ -26,17 +24,18 @@
from backend.auth.auth_utils import get_authenticated_user_details, get_tenantid
from backend.history.cosmosdbservice import CosmosConversationClient
from backend.utils import (
- convert_to_pf_format,
- format_as_ndjson,
- format_pf_non_streaming_response,
format_stream_response,
generateFilterString,
- parse_multi_columns,
+ parse_multi_columns
)
from db import get_connection
from db import dict_cursor
from backend.chat_logic_handler import stream_response_from_wealth_assistant
+from backend.event_utils import track_event_if_configured
+from azure.monitor.opentelemetry import configure_azure_monitor
+from opentelemetry import trace
+from opentelemetry.trace import Status, StatusCode
bp = Blueprint("routes", __name__, static_folder="static", template_folder="static")
@@ -61,6 +60,30 @@
UI_FAVICON = os.environ.get("UI_FAVICON") or "/favicon.ico"
UI_SHOW_SHARE_BUTTON = os.environ.get("UI_SHOW_SHARE_BUTTON", "true").lower() == "true"
+# Check if the Application Insights Instrumentation Key is set in the environment variables
+instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")
+if instrumentation_key:
+ # Configure Application Insights if the Instrumentation Key is found
+ configure_azure_monitor(connection_string=instrumentation_key)
+ logging.info("Application Insights configured with the provided Instrumentation Key")
+else:
+ # Log a warning if the Instrumentation Key is not found
+ logging.warning("No Application Insights Instrumentation Key found. Skipping configuration")
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+
+# Suppress INFO logs from 'azure.core.pipeline.policies.http_logging_policy'
+logging.getLogger("azure.core.pipeline.policies.http_logging_policy").setLevel(
+ logging.WARNING
+)
+logging.getLogger("azure.identity.aio._internal").setLevel(logging.WARNING)
+
+# Suppress info logs from OpenTelemetry exporter
+logging.getLogger("azure.monitor.opentelemetry.exporter.export._base").setLevel(
+ logging.WARNING
+)
+
def create_app():
app = Quart(__name__)
@@ -95,9 +118,6 @@ async def assets(path):
# On Your Data Settings
DATASOURCE_TYPE = os.environ.get("DATASOURCE_TYPE", "AzureCognitiveSearch")
-SEARCH_TOP_K = os.environ.get("SEARCH_TOP_K", 5)
-SEARCH_STRICTNESS = os.environ.get("SEARCH_STRICTNESS", 3)
-SEARCH_ENABLE_IN_DOMAIN = os.environ.get("SEARCH_ENABLE_IN_DOMAIN", "true")
# ACS Integration Settings
AZURE_SEARCH_SERVICE = os.environ.get("AZURE_SEARCH_SERVICE")
@@ -109,9 +129,9 @@ async def assets(path):
AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG = os.environ.get(
"AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG", "default"
)
-AZURE_SEARCH_TOP_K = os.environ.get("AZURE_SEARCH_TOP_K", SEARCH_TOP_K)
+AZURE_SEARCH_TOP_K = os.environ.get("AZURE_SEARCH_TOP_K", 5)
AZURE_SEARCH_ENABLE_IN_DOMAIN = os.environ.get(
- "AZURE_SEARCH_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
+ "AZURE_SEARCH_ENABLE_IN_DOMAIN", "true"
)
AZURE_SEARCH_CONTENT_COLUMNS = os.environ.get("AZURE_SEARCH_CONTENT_COLUMNS")
AZURE_SEARCH_FILENAME_COLUMN = os.environ.get("AZURE_SEARCH_FILENAME_COLUMN")
@@ -122,7 +142,7 @@ async def assets(path):
AZURE_SEARCH_PERMITTED_GROUPS_COLUMN = os.environ.get(
"AZURE_SEARCH_PERMITTED_GROUPS_COLUMN"
)
-AZURE_SEARCH_STRICTNESS = os.environ.get("AZURE_SEARCH_STRICTNESS", SEARCH_STRICTNESS)
+AZURE_SEARCH_STRICTNESS = os.environ.get("AZURE_SEARCH_STRICTNESS", 3)
# AOAI Integration Settings
AZURE_OPENAI_RESOURCE = os.environ.get("AZURE_OPENAI_RESOURCE")
@@ -142,49 +162,10 @@ async def assets(path):
MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION,
)
AZURE_OPENAI_STREAM = os.environ.get("AZURE_OPENAI_STREAM", "true")
-AZURE_OPENAI_MODEL_NAME = os.environ.get(
- "AZURE_OPENAI_MODEL_NAME", "gpt-35-turbo-16k"
-) # Name of the model, e.g. 'gpt-35-turbo-16k' or 'gpt-4'
AZURE_OPENAI_EMBEDDING_ENDPOINT = os.environ.get("AZURE_OPENAI_EMBEDDING_ENDPOINT")
AZURE_OPENAI_EMBEDDING_KEY = os.environ.get("AZURE_OPENAI_EMBEDDING_KEY")
AZURE_OPENAI_EMBEDDING_NAME = os.environ.get("AZURE_OPENAI_EMBEDDING_NAME", "")
-# CosmosDB Mongo vcore vector db Settings
-AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING"
-) # This has to be secure string
-AZURE_COSMOSDB_MONGO_VCORE_DATABASE = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_DATABASE"
-)
-AZURE_COSMOSDB_MONGO_VCORE_CONTAINER = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_CONTAINER"
-)
-AZURE_COSMOSDB_MONGO_VCORE_INDEX = os.environ.get("AZURE_COSMOSDB_MONGO_VCORE_INDEX")
-AZURE_COSMOSDB_MONGO_VCORE_TOP_K = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_TOP_K", AZURE_SEARCH_TOP_K
-)
-AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS", AZURE_SEARCH_STRICTNESS
-)
-AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN", AZURE_SEARCH_ENABLE_IN_DOMAIN
-)
-AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS", ""
-)
-AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN"
-)
-AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN"
-)
-AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN"
-)
-AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS = os.environ.get(
- "AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS"
-)
-
SHOULD_STREAM = True if AZURE_OPENAI_STREAM.lower() == "true" else False
# Chat History CosmosDB Integration Settings
@@ -197,78 +178,7 @@ async def assets(path):
AZURE_COSMOSDB_ENABLE_FEEDBACK = (
os.environ.get("AZURE_COSMOSDB_ENABLE_FEEDBACK", "false").lower() == "true"
)
-
-# Elasticsearch Integration Settings
-ELASTICSEARCH_ENDPOINT = os.environ.get("ELASTICSEARCH_ENDPOINT")
-ELASTICSEARCH_ENCODED_API_KEY = os.environ.get("ELASTICSEARCH_ENCODED_API_KEY")
-ELASTICSEARCH_INDEX = os.environ.get("ELASTICSEARCH_INDEX")
-ELASTICSEARCH_QUERY_TYPE = os.environ.get("ELASTICSEARCH_QUERY_TYPE", "simple")
-ELASTICSEARCH_TOP_K = os.environ.get("ELASTICSEARCH_TOP_K", SEARCH_TOP_K)
-ELASTICSEARCH_ENABLE_IN_DOMAIN = os.environ.get(
- "ELASTICSEARCH_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
-)
-ELASTICSEARCH_CONTENT_COLUMNS = os.environ.get("ELASTICSEARCH_CONTENT_COLUMNS")
-ELASTICSEARCH_FILENAME_COLUMN = os.environ.get("ELASTICSEARCH_FILENAME_COLUMN")
-ELASTICSEARCH_TITLE_COLUMN = os.environ.get("ELASTICSEARCH_TITLE_COLUMN")
-ELASTICSEARCH_URL_COLUMN = os.environ.get("ELASTICSEARCH_URL_COLUMN")
-ELASTICSEARCH_VECTOR_COLUMNS = os.environ.get("ELASTICSEARCH_VECTOR_COLUMNS")
-ELASTICSEARCH_STRICTNESS = os.environ.get("ELASTICSEARCH_STRICTNESS", SEARCH_STRICTNESS)
-ELASTICSEARCH_EMBEDDING_MODEL_ID = os.environ.get("ELASTICSEARCH_EMBEDDING_MODEL_ID")
-
-# Pinecone Integration Settings
-PINECONE_ENVIRONMENT = os.environ.get("PINECONE_ENVIRONMENT")
-PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
-PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX_NAME")
-PINECONE_TOP_K = os.environ.get("PINECONE_TOP_K", SEARCH_TOP_K)
-PINECONE_STRICTNESS = os.environ.get("PINECONE_STRICTNESS", SEARCH_STRICTNESS)
-PINECONE_ENABLE_IN_DOMAIN = os.environ.get(
- "PINECONE_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
-)
-PINECONE_CONTENT_COLUMNS = os.environ.get("PINECONE_CONTENT_COLUMNS", "")
-PINECONE_FILENAME_COLUMN = os.environ.get("PINECONE_FILENAME_COLUMN")
-PINECONE_TITLE_COLUMN = os.environ.get("PINECONE_TITLE_COLUMN")
-PINECONE_URL_COLUMN = os.environ.get("PINECONE_URL_COLUMN")
-PINECONE_VECTOR_COLUMNS = os.environ.get("PINECONE_VECTOR_COLUMNS")
-
-# Azure AI MLIndex Integration Settings - for use with MLIndex data assets created in Azure AI Studio
-AZURE_MLINDEX_NAME = os.environ.get("AZURE_MLINDEX_NAME")
-AZURE_MLINDEX_VERSION = os.environ.get("AZURE_MLINDEX_VERSION")
-AZURE_ML_PROJECT_RESOURCE_ID = os.environ.get(
- "AZURE_ML_PROJECT_RESOURCE_ID"
-) # /subscriptions/{sub ID}/resourceGroups/{rg name}/providers/Microsoft.MachineLearningServices/workspaces/{AML project name}
-AZURE_MLINDEX_TOP_K = os.environ.get("AZURE_MLINDEX_TOP_K", SEARCH_TOP_K)
-AZURE_MLINDEX_STRICTNESS = os.environ.get("AZURE_MLINDEX_STRICTNESS", SEARCH_STRICTNESS)
-AZURE_MLINDEX_ENABLE_IN_DOMAIN = os.environ.get(
- "AZURE_MLINDEX_ENABLE_IN_DOMAIN", SEARCH_ENABLE_IN_DOMAIN
-)
-AZURE_MLINDEX_CONTENT_COLUMNS = os.environ.get("AZURE_MLINDEX_CONTENT_COLUMNS", "")
-AZURE_MLINDEX_FILENAME_COLUMN = os.environ.get("AZURE_MLINDEX_FILENAME_COLUMN")
-AZURE_MLINDEX_TITLE_COLUMN = os.environ.get("AZURE_MLINDEX_TITLE_COLUMN")
-AZURE_MLINDEX_URL_COLUMN = os.environ.get("AZURE_MLINDEX_URL_COLUMN")
-AZURE_MLINDEX_VECTOR_COLUMNS = os.environ.get("AZURE_MLINDEX_VECTOR_COLUMNS")
-AZURE_MLINDEX_QUERY_TYPE = os.environ.get("AZURE_MLINDEX_QUERY_TYPE")
-# Promptflow Integration Settings
-USE_PROMPTFLOW = os.environ.get("USE_PROMPTFLOW", "false").lower() == "true"
-PROMPTFLOW_ENDPOINT = os.environ.get("PROMPTFLOW_ENDPOINT")
-PROMPTFLOW_API_KEY = os.environ.get("PROMPTFLOW_API_KEY")
-PROMPTFLOW_RESPONSE_TIMEOUT = os.environ.get("PROMPTFLOW_RESPONSE_TIMEOUT", 30.0)
-# default request and response field names are input -> 'query' and output -> 'reply'
-PROMPTFLOW_REQUEST_FIELD_NAME = os.environ.get("PROMPTFLOW_REQUEST_FIELD_NAME", "query")
-PROMPTFLOW_RESPONSE_FIELD_NAME = os.environ.get(
- "PROMPTFLOW_RESPONSE_FIELD_NAME", "reply"
-)
-PROMPTFLOW_CITATIONS_FIELD_NAME = os.environ.get(
- "PROMPTFLOW_CITATIONS_FIELD_NAME", "documents"
-)
USE_INTERNAL_STREAM = os.environ.get("USE_INTERNAL_STREAM", "false").lower() == "true"
-FUNCTIONAPP_RESPONSE_FIELD_NAME = os.environ.get(
- "FUNCTIONAPP_RESPONSE_FIELD_NAME", "reply"
-)
-FUNCTIONAPP_CITATIONS_FIELD_NAME = os.environ.get(
- "FUNCTIONAPP_CITATIONS_FIELD_NAME", "documents"
-)
-AZUREFUNCTION_ENDPOINT = os.environ.get("AZUREFUNCTION_ENDPOINT")
-STREAMING_AZUREFUNCTION_ENDPOINT = os.environ.get("STREAMING_AZUREFUNCTION_ENDPOINT")
# Frontend Settings via Environment Variables
AUTH_ENABLED = os.environ.get("AUTH_ENABLED", "true").lower() == "true"
CHAT_HISTORY_ENABLED = (
@@ -293,7 +203,7 @@ async def assets(path):
# Enable Microsoft Defender for Cloud Integration
MS_DEFENDER_ENABLED = os.environ.get("MS_DEFENDER_ENABLED", "false").lower() == "true"
-VITE_POWERBI_EMBED_URL = os.environ.get("VITE_POWERBI_EMBED_URL")
+# VITE_POWERBI_EMBED_URL = os.environ.get("VITE_POWERBI_EMBED_URL")
def should_use_data():
@@ -303,31 +213,6 @@ def should_use_data():
logging.debug("Using Azure Cognitive Search")
return True
- if (
- AZURE_COSMOSDB_MONGO_VCORE_DATABASE
- and AZURE_COSMOSDB_MONGO_VCORE_CONTAINER
- and AZURE_COSMOSDB_MONGO_VCORE_INDEX
- and AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING
- ):
- DATASOURCE_TYPE = "AzureCosmosDB"
- logging.debug("Using Azure CosmosDB Mongo vcore")
- return True
-
- if ELASTICSEARCH_ENDPOINT and ELASTICSEARCH_ENCODED_API_KEY and ELASTICSEARCH_INDEX:
- DATASOURCE_TYPE = "Elasticsearch"
- logging.debug("Using Elasticsearch")
- return True
-
- if PINECONE_ENVIRONMENT and PINECONE_API_KEY and PINECONE_INDEX_NAME:
- DATASOURCE_TYPE = "Pinecone"
- logging.debug("Using Pinecone")
- return True
-
- if AZURE_MLINDEX_NAME and AZURE_MLINDEX_VERSION and AZURE_ML_PROJECT_RESOURCE_ID:
- DATASOURCE_TYPE = "AzureMLIndex"
- logging.debug("Using Azure ML Index")
- return True
-
return False
@@ -384,9 +269,19 @@ def init_openai_client(use_data=SHOULD_USE_DATA):
azure_endpoint=endpoint,
)
+ track_event_if_configured("AzureOpenAIClientInitialized", {
+ "status": "success",
+ "endpoint": endpoint,
+ "use_api_key": bool(aoai_api_key),
+ })
+
return azure_openai_client
except Exception as e:
logging.exception("Exception in Azure OpenAI initialization", e)
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
azure_openai_client = None
raise e
@@ -411,8 +306,20 @@ def init_cosmosdb_client():
container_name=AZURE_COSMOSDB_CONVERSATIONS_CONTAINER,
enable_message_feedback=AZURE_COSMOSDB_ENABLE_FEEDBACK,
)
+
+ track_event_if_configured("CosmosDBClientInitialized", {
+ "status": "success",
+ "endpoint": cosmos_endpoint,
+ "database": AZURE_COSMOSDB_DATABASE,
+ "container": AZURE_COSMOSDB_CONVERSATIONS_CONTAINER,
+ "feedback_enabled": AZURE_COSMOSDB_ENABLE_FEEDBACK,
+ })
except Exception as e:
logging.exception("Exception in CosmosDB initialization", e)
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
cosmos_conversation_client = None
raise e
else:
@@ -425,6 +332,7 @@ def get_configured_data_source():
data_source = {}
query_type = "simple"
if DATASOURCE_TYPE == "AzureCognitiveSearch":
+ track_event_if_configured("datasource_selected", {"type": "AzureCognitiveSearch"})
# Set query type
if AZURE_SEARCH_QUERY_TYPE:
query_type = AZURE_SEARCH_QUERY_TYPE
@@ -433,6 +341,7 @@ def get_configured_data_source():
and AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG
):
query_type = "semantic"
+ track_event_if_configured("query_type_determined", {"query_type": query_type})
# Set filter
filter = None
@@ -441,11 +350,13 @@ def get_configured_data_source():
userToken = request.headers.get("X-MS-TOKEN-AAD-ACCESS-TOKEN", "")
logging.debug(f"USER TOKEN is {'present' if userToken else 'not present'}")
if not userToken:
+ track_event_if_configured("user_token_missing", {})
raise Exception(
"Document-level access control is enabled, but user access token could not be fetched."
)
filter = generateFilterString(userToken)
+ track_event_if_configured("filter_generated", {"filter": filter})
logging.debug(f"FILTER: {filter}")
# Set authentication
@@ -455,6 +366,7 @@ def get_configured_data_source():
else:
# If key is not provided, assume AOAI resource identity has been granted access to the search service
authentication = {"type": "system_assigned_managed_identity"}
+ track_event_if_configured("authentication_set", {"auth_type": authentication["type"]})
data_source = {
"type": "azure_search",
@@ -489,7 +401,7 @@ def get_configured_data_source():
True if AZURE_SEARCH_ENABLE_IN_DOMAIN.lower() == "true" else False
),
"top_n_documents": (
- int(AZURE_SEARCH_TOP_K) if AZURE_SEARCH_TOP_K else int(SEARCH_TOP_K)
+ int(AZURE_SEARCH_TOP_K)
),
"query_type": query_type,
"semantic_configuration": (
@@ -501,221 +413,11 @@ def get_configured_data_source():
"filter": filter,
"strictness": (
int(AZURE_SEARCH_STRICTNESS)
- if AZURE_SEARCH_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- },
- }
- elif DATASOURCE_TYPE == "AzureCosmosDB":
- query_type = "vector"
-
- data_source = {
- "type": "azure_cosmos_db",
- "parameters": {
- "authentication": {
- "type": "connection_string",
- "connection_string": AZURE_COSMOSDB_MONGO_VCORE_CONNECTION_STRING,
- },
- "index_name": AZURE_COSMOSDB_MONGO_VCORE_INDEX,
- "database_name": AZURE_COSMOSDB_MONGO_VCORE_DATABASE,
- "container_name": AZURE_COSMOSDB_MONGO_VCORE_CONTAINER,
- "fields_mapping": {
- "content_fields": (
- parse_multi_columns(AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS)
- if AZURE_COSMOSDB_MONGO_VCORE_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN
- if AZURE_COSMOSDB_MONGO_VCORE_TITLE_COLUMN
- else None
- ),
- "url_field": (
- AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN
- if AZURE_COSMOSDB_MONGO_VCORE_URL_COLUMN
- else None
- ),
- "filepath_field": (
- AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN
- if AZURE_COSMOSDB_MONGO_VCORE_FILENAME_COLUMN
- else None
- ),
- "vector_fields": (
- parse_multi_columns(AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS)
- if AZURE_COSMOSDB_MONGO_VCORE_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True
- if AZURE_COSMOSDB_MONGO_VCORE_ENABLE_IN_DOMAIN.lower() == "true"
- else False
- ),
- "top_n_documents": (
- int(AZURE_COSMOSDB_MONGO_VCORE_TOP_K)
- if AZURE_COSMOSDB_MONGO_VCORE_TOP_K
- else int(SEARCH_TOP_K)
- ),
- "strictness": (
- int(AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS)
- if AZURE_COSMOSDB_MONGO_VCORE_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
- },
- }
- elif DATASOURCE_TYPE == "Elasticsearch":
- if ELASTICSEARCH_QUERY_TYPE:
- query_type = ELASTICSEARCH_QUERY_TYPE
-
- data_source = {
- "type": "elasticsearch",
- "parameters": {
- "endpoint": ELASTICSEARCH_ENDPOINT,
- "authentication": {
- "type": "encoded_api_key",
- "encoded_api_key": ELASTICSEARCH_ENCODED_API_KEY,
- },
- "index_name": ELASTICSEARCH_INDEX,
- "fields_mapping": {
- "content_fields": (
- parse_multi_columns(ELASTICSEARCH_CONTENT_COLUMNS)
- if ELASTICSEARCH_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- ELASTICSEARCH_TITLE_COLUMN
- if ELASTICSEARCH_TITLE_COLUMN
- else None
- ),
- "url_field": (
- ELASTICSEARCH_URL_COLUMN if ELASTICSEARCH_URL_COLUMN else None
- ),
- "filepath_field": (
- ELASTICSEARCH_FILENAME_COLUMN
- if ELASTICSEARCH_FILENAME_COLUMN
- else None
- ),
- "vector_fields": (
- parse_multi_columns(ELASTICSEARCH_VECTOR_COLUMNS)
- if ELASTICSEARCH_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True if ELASTICSEARCH_ENABLE_IN_DOMAIN.lower() == "true" else False
- ),
- "top_n_documents": (
- int(ELASTICSEARCH_TOP_K)
- if ELASTICSEARCH_TOP_K
- else int(SEARCH_TOP_K)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
- "strictness": (
- int(ELASTICSEARCH_STRICTNESS)
- if ELASTICSEARCH_STRICTNESS
- else int(SEARCH_STRICTNESS)
),
},
}
- elif DATASOURCE_TYPE == "AzureMLIndex":
- if AZURE_MLINDEX_QUERY_TYPE:
- query_type = AZURE_MLINDEX_QUERY_TYPE
-
- data_source = {
- "type": "azure_ml_index",
- "parameters": {
- "name": AZURE_MLINDEX_NAME,
- "version": AZURE_MLINDEX_VERSION,
- "project_resource_id": AZURE_ML_PROJECT_RESOURCE_ID,
- "fieldsMapping": {
- "content_fields": (
- parse_multi_columns(AZURE_MLINDEX_CONTENT_COLUMNS)
- if AZURE_MLINDEX_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- AZURE_MLINDEX_TITLE_COLUMN
- if AZURE_MLINDEX_TITLE_COLUMN
- else None
- ),
- "url_field": (
- AZURE_MLINDEX_URL_COLUMN if AZURE_MLINDEX_URL_COLUMN else None
- ),
- "filepath_field": (
- AZURE_MLINDEX_FILENAME_COLUMN
- if AZURE_MLINDEX_FILENAME_COLUMN
- else None
- ),
- "vector_fields": (
- parse_multi_columns(AZURE_MLINDEX_VECTOR_COLUMNS)
- if AZURE_MLINDEX_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True if AZURE_MLINDEX_ENABLE_IN_DOMAIN.lower() == "true" else False
- ),
- "top_n_documents": (
- int(AZURE_MLINDEX_TOP_K)
- if AZURE_MLINDEX_TOP_K
- else int(SEARCH_TOP_K)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
- "strictness": (
- int(AZURE_MLINDEX_STRICTNESS)
- if AZURE_MLINDEX_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- },
- }
- elif DATASOURCE_TYPE == "Pinecone":
- query_type = "vector"
-
- data_source = {
- "type": "pinecone",
- "parameters": {
- "environment": PINECONE_ENVIRONMENT,
- "authentication": {"type": "api_key", "key": PINECONE_API_KEY},
- "index_name": PINECONE_INDEX_NAME,
- "fields_mapping": {
- "content_fields": (
- parse_multi_columns(PINECONE_CONTENT_COLUMNS)
- if PINECONE_CONTENT_COLUMNS
- else []
- ),
- "title_field": (
- PINECONE_TITLE_COLUMN if PINECONE_TITLE_COLUMN else None
- ),
- "url_field": PINECONE_URL_COLUMN if PINECONE_URL_COLUMN else None,
- "filepath_field": (
- PINECONE_FILENAME_COLUMN if PINECONE_FILENAME_COLUMN else None
- ),
- "vector_fields": (
- parse_multi_columns(PINECONE_VECTOR_COLUMNS)
- if PINECONE_VECTOR_COLUMNS
- else []
- ),
- },
- "in_scope": (
- True if PINECONE_ENABLE_IN_DOMAIN.lower() == "true" else False
- ),
- "top_n_documents": (
- int(PINECONE_TOP_K) if PINECONE_TOP_K else int(SEARCH_TOP_K)
- ),
- "strictness": (
- int(PINECONE_STRICTNESS)
- if PINECONE_STRICTNESS
- else int(SEARCH_STRICTNESS)
- ),
- "query_type": query_type,
- "role_information": AZURE_OPENAI_SYSTEM_MESSAGE,
- },
- }
else:
+ track_event_if_configured("unknown_datasource_type", {"type": DATASOURCE_TYPE})
raise Exception(
f"DATASOURCE_TYPE is not configured or unknown: {DATASOURCE_TYPE}"
)
@@ -736,21 +438,27 @@ def get_configured_data_source():
"key": AZURE_OPENAI_EMBEDDING_KEY,
},
}
- elif DATASOURCE_TYPE == "Elasticsearch" and ELASTICSEARCH_EMBEDDING_MODEL_ID:
- embeddingDependency = {
- "type": "model_id",
- "model_id": ELASTICSEARCH_EMBEDDING_MODEL_ID,
- }
else:
+ track_event_if_configured("embedding_dependency_missing", {
+ "datasource_type": DATASOURCE_TYPE,
+ "query_type": query_type
+ })
raise Exception(
f"Vector query type ({query_type}) is selected for data source type {DATASOURCE_TYPE} but no embedding dependency is configured"
)
+ track_event_if_configured("embedding_dependency_set", {
+ "embedding_type": embeddingDependency.get("type")
+ })
data_source["parameters"]["embedding_dependency"] = embeddingDependency
-
+ track_event_if_configured("get_configured_data_source_complete", {
+ "datasource_type": DATASOURCE_TYPE,
+ "query_type": query_type
+ })
return data_source
def prepare_model_args(request_body, request_headers):
+ track_event_if_configured("prepare_model_args_start", {})
request_messages = request_body.get("messages", [])
messages = []
if not SHOULD_USE_DATA:
@@ -775,6 +483,7 @@ def prepare_model_args(request_body, request_headers):
),
}
user_json = json.dumps(user_args)
+ track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]})
model_args = {
"messages": messages,
@@ -792,6 +501,7 @@ def prepare_model_args(request_body, request_headers):
}
if SHOULD_USE_DATA:
+ track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]})
model_args["extra_body"] = {"data_sources": [get_configured_data_source()]}
model_args_clean = copy.deepcopy(model_args)
@@ -829,44 +539,13 @@ def prepare_model_args(request_body, request_headers):
]["authentication"][field] = "*****"
logging.debug(f"REQUEST BODY: {json.dumps(model_args_clean, indent=4)}")
+ track_event_if_configured("prepare_model_args_complete", {"model": AZURE_OPENAI_MODEL})
return model_args
-async def promptflow_request(request):
- try:
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {PROMPTFLOW_API_KEY}",
- }
- # Adding timeout for scenarios where response takes longer to come back
- logging.debug(f"Setting timeout to {PROMPTFLOW_RESPONSE_TIMEOUT}")
- async with httpx.AsyncClient(
- timeout=float(PROMPTFLOW_RESPONSE_TIMEOUT)
- ) as client:
- pf_formatted_obj = convert_to_pf_format(
- request, PROMPTFLOW_REQUEST_FIELD_NAME, PROMPTFLOW_RESPONSE_FIELD_NAME
- )
- # NOTE: This only support question and chat_history parameters
- # If you need to add more parameters, you need to modify the request body
- response = await client.post(
- PROMPTFLOW_ENDPOINT,
- json={
- f"{PROMPTFLOW_REQUEST_FIELD_NAME}": pf_formatted_obj[-1]["inputs"][
- PROMPTFLOW_REQUEST_FIELD_NAME
- ],
- "chat_history": pf_formatted_obj[:-1],
- },
- headers=headers,
- )
- resp = response.json()
- resp["id"] = request["messages"][-1]["id"]
- return resp
- except Exception as e:
- logging.error(f"An error occurred while making promptflow_request: {e}")
-
-
async def send_chat_request(request_body, request_headers):
+ track_event_if_configured("send_chat_request_start", {})
filtered_messages = []
messages = request_body.get("messages", [])
for message in messages:
@@ -885,97 +564,30 @@ async def send_chat_request(request_body, request_headers):
)
response = raw_response.parse()
apim_request_id = raw_response.headers.get("apim-request-id")
+
+ track_event_if_configured("send_chat_request_success", {"model": model_args.get("model")})
except Exception as e:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
logging.exception("Exception in send_chat_request")
raise e
return response, apim_request_id
-async def complete_chat_request(request_body, request_headers):
- if USE_PROMPTFLOW and PROMPTFLOW_ENDPOINT and PROMPTFLOW_API_KEY:
- response = await promptflow_request(request_body)
- history_metadata = request_body.get("history_metadata", {})
- return format_pf_non_streaming_response(
- response,
- history_metadata,
- PROMPTFLOW_RESPONSE_FIELD_NAME,
- PROMPTFLOW_CITATIONS_FIELD_NAME,
- )
- elif USE_INTERNAL_STREAM:
- request_body = await request.get_json()
- client_id = request_body.get("client_id")
- print(request_body)
-
- if client_id is None:
- return jsonify({"error": "No client ID provided"}), 400
- # client_id = '10005'
- print("Client ID in complete_chat_request: ", client_id)
- # answer = "Sample response from Azure Function"
- # Construct the URL of your Azure Function endpoint
- # function_url = STREAMING_AZUREFUNCTION_ENDPOINT
- # request_headers = {
- # "Content-Type": "application/json",
- # # 'Authorization': 'Bearer YOUR_TOKEN_HERE' # if applicable
- # }
- # print(request_body.get("messages")[-1].get("content"))
- # print(request_body)
-
- query = request_body.get("messages")[-1].get("content")
-
- print("Selected ClientId:", client_id)
- # print("Selected ClientName:", selected_client_name)
-
- # endpoint = STREAMING_AZUREFUNCTION_ENDPOINT + '?query=' + query + ' - for Client ' + selected_client_name + ':::' + selected_client_id
- endpoint = (
- STREAMING_AZUREFUNCTION_ENDPOINT + "?query=" + query + ":::" + client_id
- )
-
- print("Endpoint: ", endpoint)
- query_response = ""
- try:
- with requests.get(endpoint, stream=True) as r:
- for line in r.iter_lines(chunk_size=10):
- # query_response += line.decode('utf-8')
- query_response = query_response + "\n" + line.decode("utf-8")
- # print(line.decode('utf-8'))
- except Exception as e:
- print(format_as_ndjson({"error" + str(e)}))
-
- # print("query_response: " + query_response)
-
- history_metadata = request_body.get("history_metadata", {})
- response = {
- "id": "",
- "model": "",
- "created": 0,
- "object": "",
- "choices": [{"messages": []}],
- "apim-request-id": "",
- "history_metadata": history_metadata,
- }
-
- response["id"] = str(uuid.uuid4())
- response["model"] = AZURE_OPENAI_MODEL_NAME
- response["created"] = int(time.time())
- response["object"] = "extensions.chat.completion.chunk"
- # response["apim-request-id"] = headers.get("apim-request-id")
- response["choices"][0]["messages"].append(
- {"role": "assistant", "content": query_response}
- )
-
- return response
-
-
async def stream_chat_request(request_body, request_headers):
+ track_event_if_configured("stream_chat_request_start", {})
if USE_INTERNAL_STREAM:
history_metadata = request_body.get("history_metadata", {})
- # function_url = STREAMING_AZUREFUNCTION_ENDPOINT
apim_request_id = ""
client_id = request_body.get("client_id")
if client_id is None:
+ track_event_if_configured("client_id_missing", {})
return jsonify({"error": "No client ID provided"}), 400
query = request_body.get("messages")[-1].get("content")
+ track_event_if_configured("stream_internal_selected", {"client_id": client_id})
sk_response = await stream_response_from_wealth_assistant(query, client_id)
@@ -989,7 +601,7 @@ async def generate():
completionChunk = {
"id": chunk_id,
- "model": AZURE_OPENAI_MODEL_NAME,
+ "model": AZURE_OPENAI_MODEL,
"created": created_time,
"object": "extensions.chat.completion.chunk",
"choices": [
@@ -1028,11 +640,15 @@ async def generate():
yield format_stream_response(
completionChunk, history_metadata, apim_request_id
)
-
+ track_event_if_configured("stream_openai_selected", {})
return generate()
async def conversation_internal(request_body, request_headers):
+ track_event_if_configured("conversation_internal_start", {
+ "streaming": SHOULD_STREAM,
+ "internal_stream": USE_INTERNAL_STREAM
+ })
try:
if SHOULD_STREAM:
return await stream_chat_request(request_body, request_headers)
@@ -1040,11 +656,12 @@ async def conversation_internal(request_body, request_headers):
# response.timeout = None
# response.mimetype = "application/json-lines"
# return response
- else:
- result = await complete_chat_request(request_body, request_headers)
- return jsonify(result)
except Exception as ex:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(ex)
+ span.set_status(Status(StatusCode.ERROR, str(ex)))
logging.exception(ex)
if hasattr(ex, "status_code"):
return jsonify({"error": str(ex)}), ex.status_code
@@ -1055,9 +672,10 @@ async def conversation_internal(request_body, request_headers):
@bp.route("/conversation", methods=["POST"])
async def conversation():
if not request.is_json:
+ track_event_if_configured("invalid_request_format", {})
return jsonify({"error": "request must be json"}), 415
request_json = await request.get_json()
-
+ track_event_if_configured("conversation_api_invoked", {})
return await conversation_internal(request_json, request.headers)
@@ -1067,6 +685,10 @@ def get_frontend_settings():
return jsonify(frontend_settings), 200
except Exception as e:
logging.exception("Exception in /frontend_settings")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1075,6 +697,10 @@ def get_frontend_settings():
async def add_conversation():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
+ track_event_if_configured(
+ "HistoryGenerate_Start",
+ {"user_id": user_id}
+ )
# check request for conversation_id
request_json = await request.get_json()
@@ -1097,6 +723,15 @@ async def add_conversation():
history_metadata["title"] = title
history_metadata["date"] = conversation_dict["createdAt"]
+ track_event_if_configured(
+ "ConversationCreated",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "title": title
+ }
+ )
+
# Format the incoming message object in the "chat/completions" messages format
# then write it to the conversation history in cosmos
messages = request_json["messages"]
@@ -1113,6 +748,14 @@ async def add_conversation():
+ conversation_id
+ "."
)
+ track_event_if_configured(
+ "UserMessageAdded",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "message": messages[-1],
+ }
+ )
else:
raise Exception("No user message found")
@@ -1122,9 +765,28 @@ async def add_conversation():
request_body = await request.get_json()
history_metadata["conversation_id"] = conversation_id
request_body["history_metadata"] = history_metadata
+ track_event_if_configured(
+ "SendingToChatCompletions",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ }
+ )
+
+ track_event_if_configured(
+ "HistoryGenerate_Completed",
+ {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ }
+ )
return await conversation_internal(request_body, request.headers)
except Exception as e:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
logging.exception("Exception in /history/generate")
return jsonify({"error": str(e)}), 500
@@ -1138,6 +800,11 @@ async def update_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("UpdateConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
try:
# make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
@@ -1160,6 +827,10 @@ async def update_conversation():
user_id=user_id,
input_message=messages[-2],
)
+ track_event_if_configured("ToolMessageStored", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
# write the assistant message
await cosmos_conversation_client.create_message(
uuid=messages[-1]["id"],
@@ -1167,16 +838,28 @@ async def update_conversation():
user_id=user_id,
input_message=messages[-1],
)
+ track_event_if_configured("AssistantMessageStored", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "message": messages[-1]
+ })
else:
raise Exception("No bot messages found")
-
# Submit request to Chat Completions for response
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("UpdateConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
response = {"success": True}
return jsonify(response), 200
except Exception as e:
logging.exception("Exception in /history/update")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1190,6 +873,11 @@ async def update_message():
request_json = await request.get_json()
message_id = request_json.get("message_id", None)
message_feedback = request_json.get("message_feedback", None)
+
+ track_event_if_configured("MessageFeedback_Start", {
+ "user_id": user_id,
+ "message_id": message_id
+ })
try:
if not message_id:
return jsonify({"error": "message_id is required"}), 400
@@ -1202,6 +890,11 @@ async def update_message():
user_id, message_id, message_feedback
)
if updated_message:
+ track_event_if_configured("MessageFeedback_Updated", {
+ "user_id": user_id,
+ "message_id": message_id,
+ "feedback": message_feedback
+ })
return (
jsonify(
{
@@ -1212,6 +905,10 @@ async def update_message():
200,
)
else:
+ track_event_if_configured("MessageFeedback_NotFound", {
+ "user_id": user_id,
+ "message_id": message_id
+ })
return (
jsonify(
{
@@ -1223,6 +920,10 @@ async def update_message():
except Exception as e:
logging.exception("Exception in /history/message_feedback")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1236,6 +937,11 @@ async def delete_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("DeleteConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
try:
if not conversation_id:
return jsonify({"error": "conversation_id is required"}), 400
@@ -1253,6 +959,11 @@ async def delete_conversation():
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("DeleteConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
return (
jsonify(
{
@@ -1264,6 +975,10 @@ async def delete_conversation():
)
except Exception as e:
logging.exception("Exception in /history/delete")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1273,6 +988,11 @@ async def list_conversations():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
+ track_event_if_configured("ListConversations_Start", {
+ "user_id": user_id,
+ "offset": offset
+ })
+
# make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
@@ -1284,10 +1004,19 @@ async def list_conversations():
)
await cosmos_conversation_client.cosmosdb_client.close()
if not isinstance(conversations, list):
+ track_event_if_configured("ListConversations_Empty", {
+ "user_id": user_id,
+ "offset": offset
+ })
return jsonify({"error": f"No conversations for {user_id} were found"}), 404
# return the conversation ids
+ track_event_if_configured("ListConversations_Success", {
+ "user_id": user_id,
+ "conversation_count": len(conversations)
+ })
+
return jsonify(conversations), 200
@@ -1300,7 +1029,17 @@ async def get_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("GetConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ })
+
if not conversation_id:
+ track_event_if_configured("GetConversation_Failed", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "error": f"Conversation {conversation_id} not found",
+ })
return jsonify({"error": "conversation_id is required"}), 400
# make sure cosmos is configured
@@ -1341,6 +1080,11 @@ async def get_conversation():
]
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("GetConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "message_count": len(messages)
+ })
return jsonify({"conversation_id": conversation_id, "messages": messages}), 200
@@ -1353,7 +1097,17 @@ async def rename_conversation():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("RenameConversation_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
if not conversation_id:
+ track_event_if_configured("RenameConversation_Failed", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "error": f"Conversation {conversation_id} not found",
+ })
return jsonify({"error": "conversation_id is required"}), 400
# make sure cosmos is configured
@@ -1385,6 +1139,12 @@ async def rename_conversation():
)
await cosmos_conversation_client.cosmosdb_client.close()
+
+ track_event_if_configured("RenameConversation_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "new_title": title
+ })
return jsonify(updated_conversation), 200
@@ -1394,6 +1154,10 @@ async def delete_all_conversations():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
+ track_event_if_configured("DeleteAllConversations_Start", {
+ "user_id": user_id
+ })
+
# get conversations for user
try:
# make sure cosmos is configured
@@ -1405,6 +1169,9 @@ async def delete_all_conversations():
user_id, offset=0, limit=None
)
if not conversations:
+ track_event_if_configured("DeleteAllConversations_Empty", {
+ "user_id": user_id,
+ })
return jsonify({"error": f"No conversations for {user_id} were found"}), 404
# delete each conversation
@@ -1419,6 +1186,12 @@ async def delete_all_conversations():
user_id, conversation["id"]
)
await cosmos_conversation_client.cosmosdb_client.close()
+
+ track_event_if_configured("DeleteAllConversations_Success", {
+ "user_id": user_id,
+ "conversation_count": len(conversations)
+ })
+
return (
jsonify(
{
@@ -1430,6 +1203,10 @@ async def delete_all_conversations():
except Exception as e:
logging.exception("Exception in /history/delete_all")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@@ -1443,8 +1220,18 @@ async def clear_messages():
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
+ track_event_if_configured("ClearConversationMessages_Start", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ })
+
try:
if not conversation_id:
+ track_event_if_configured("ClearConversationMessages_Failed", {
+ "user_id": user_id,
+ "conversation_id": conversation_id,
+ "error": "conversation_id is required"
+ })
return jsonify({"error": "conversation_id is required"}), 400
# make sure cosmos is configured
@@ -1455,6 +1242,11 @@ async def clear_messages():
# delete the conversation messages from cosmos
await cosmos_conversation_client.delete_messages(conversation_id, user_id)
+ track_event_if_configured("ClearConversationMessages_Success", {
+ "user_id": user_id,
+ "conversation_id": conversation_id
+ })
+
return (
jsonify(
{
@@ -1466,12 +1258,19 @@ async def clear_messages():
)
except Exception as e:
logging.exception("Exception in /history/clear_messages")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
@bp.route("/history/ensure", methods=["GET"])
async def ensure_cosmos():
if not AZURE_COSMOSDB_ACCOUNT:
+ track_event_if_configured("EnsureCosmosDB_Failed", {
+ "error": "CosmosDB is not configured",
+ })
return jsonify({"error": "CosmosDB is not configured"}), 404
try:
@@ -1479,13 +1278,23 @@ async def ensure_cosmos():
success, err = await cosmos_conversation_client.ensure()
if not cosmos_conversation_client or not success:
if err:
+ track_event_if_configured("EnsureCosmosDB_Failed", {
+ "error": err,
+ })
return jsonify({"error": err}), 422
return jsonify({"error": "CosmosDB is not configured or not working"}), 500
await cosmos_conversation_client.cosmosdb_client.close()
+ track_event_if_configured("EnsureCosmosDB_Failed", {
+ "error": "CosmosDB is not configured or not working",
+ })
return jsonify({"message": "CosmosDB is configured and working"}), 200
except Exception as e:
logging.exception("Exception in /history/ensure")
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
cosmos_exception = str(e)
if "Invalid credentials" in cosmos_exception:
return jsonify({"error": cosmos_exception}), 401
@@ -1512,6 +1321,7 @@ async def ensure_cosmos():
async def generate_title(conversation_messages):
+
# make sure the messages are sorted by _ts descending
title_prompt = 'Summarize the conversation so far into a 4-word or less title. Do not use any quotation marks or punctuation. Respond with a json object in the format {{"title": string}}. Do not include any other commentary or description.'
@@ -1540,6 +1350,8 @@ async def generate_title(conversation_messages):
@bp.route("/api/users", methods=["GET"])
def get_users():
+
+ track_event_if_configured("UserFetch_Start", {})
conn = None
try:
conn = get_connection()
@@ -1594,6 +1406,9 @@ def get_users():
rows = dict_cursor(cursor)
if len(rows) <= 6:
+ track_event_if_configured("UserFetch_SampleUpdate", {
+ "rows_count": len(rows),
+ })
# update ClientMeetings,Assets,Retirement tables sample data to current date
cursor = conn.cursor()
combined_stmt = """
@@ -1678,9 +1493,17 @@ def get_users():
}
users.append(user)
+ track_event_if_configured("UserFetch_Success", {
+ "user_count": len(users),
+ })
+
return jsonify(users)
except Exception as e:
+ span = trace.get_current_span()
+ if span is not None:
+ span.record_exception(e)
+ span.set_status(Status(StatusCode.ERROR, str(e)))
print("Exception occurred:", e)
return str(e), 500
finally:
diff --git a/src/App/backend/chat_logic_handler.py b/src/App/backend/chat_logic_handler.py
index f848a011f..8d04a2384 100644
--- a/src/App/backend/chat_logic_handler.py
+++ b/src/App/backend/chat_logic_handler.py
@@ -17,10 +17,10 @@
# --------------------------
endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
api_key = os.environ.get("AZURE_OPENAI_KEY")
-api_version = os.environ.get("OPENAI_API_VERSION")
+api_version = os.environ.get("AZURE_OPENAI_PREVIEW_API_VERSION")
deployment = os.environ.get("AZURE_OPENAI_MODEL")
search_endpoint = os.environ.get("AZURE_AI_SEARCH_ENDPOINT")
-search_key = os.environ.get("AZURE_AI_SEARCH_API_KEY")
+search_key = os.environ.get("AZURE_SEARCH_KEY")
project_connection_string = os.environ.get("AZURE_AI_PROJECT_CONN_STRING")
use_ai_project_client = os.environ.get("USE_AI_PROJECT_CLIENT", "false").lower() == "true"
diff --git a/src/App/backend/event_utils.py b/src/App/backend/event_utils.py
new file mode 100644
index 000000000..c04214b64
--- /dev/null
+++ b/src/App/backend/event_utils.py
@@ -0,0 +1,29 @@
+import logging
+import os
+from azure.monitor.events.extension import track_event
+
+
+def track_event_if_configured(event_name: str, event_data: dict):
+ """Track an event if Application Insights is configured.
+
+ This function safely wraps the Azure Monitor track_event function
+ to handle potential errors with the ProxyLogger.
+
+ Args:
+ event_name: The name of the event to track
+ event_data: Dictionary of event data/dimensions
+ """
+ try:
+ instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")
+ if instrumentation_key:
+ track_event(event_name, event_data)
+ else:
+ logging.warning(
+ f"Skipping track_event for {event_name} as Application Insights is not configured"
+ )
+ except AttributeError as e:
+ # Handle the 'ProxyLogger' object has no attribute 'resource' error
+ logging.warning(f"ProxyLogger error in track_event: {e}")
+ except Exception as e:
+ # Catch any other exceptions to prevent them from bubbling up
+ logging.warning(f"Error in track_event: {e}")
diff --git a/src/App/gunicorn.conf.py b/src/App/gunicorn.conf.py
deleted file mode 100644
index b1aded069..000000000
--- a/src/App/gunicorn.conf.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import multiprocessing
-
-max_requests = 1000
-max_requests_jitter = 50
-log_file = "-"
-bind = "0.0.0.0"
-
-timeout = 230
-# https://learn.microsoft.com/en-us/troubleshoot/azure/app-service/web-apps-performance-faqs#why-does-my-request-time-out-after-230-seconds
-
-num_cpus = multiprocessing.cpu_count()
-workers = (num_cpus * 2) + 1
-worker_class = "uvicorn.workers.UvicornWorker"
diff --git a/src/App/requirements-dev.txt b/src/App/requirements-dev.txt
index aacba54f0..302b39b8b 100644
--- a/src/App/requirements-dev.txt
+++ b/src/App/requirements-dev.txt
@@ -7,7 +7,6 @@ python-dotenv==1.0.1
azure-cosmos==4.9.0
quart==0.20.0
uvicorn==0.34.0
-gunicorn==23.0.0
aiohttp==3.11.12
quart-session==3.0.0
pymssql==2.3.2
diff --git a/src/App/requirements.txt b/src/App/requirements.txt
index 1a87b8001..a02606dfd 100644
--- a/src/App/requirements.txt
+++ b/src/App/requirements.txt
@@ -8,7 +8,6 @@ python-dotenv==1.0.1
azure-cosmos==4.9.0
quart==0.20.0
uvicorn==0.34.0
-gunicorn==23.0.0
aiohttp==3.11.12
quart-session==3.0.0
pymssql==2.3.2
@@ -31,4 +30,14 @@ pyodbc==5.2.0
semantic_kernel==1.21.3
azure-search-documents==11.6.0b9
azure-ai-projects==1.0.0b9
-azure-ai-inference==1.0.0b9
\ No newline at end of file
+azure-ai-inference==1.0.0b9
+
+opentelemetry-exporter-otlp-proto-grpc
+opentelemetry-exporter-otlp-proto-http
+opentelemetry-exporter-otlp-proto-grpc
+azure-monitor-events-extension
+opentelemetry-sdk==1.31.1
+opentelemetry-api==1.31.1
+opentelemetry-semantic-conventions==0.52b1
+opentelemetry-instrumentation==0.52b1
+azure-monitor-opentelemetry==1.6.8
\ No newline at end of file
diff --git a/src/App/tests/backend/test_utils.py b/src/App/tests/backend/test_utils.py
index 1585cd7fb..cf6c293e3 100644
--- a/src/App/tests/backend/test_utils.py
+++ b/src/App/tests/backend/test_utils.py
@@ -37,7 +37,7 @@ def test_parse_multi_columns(input_str, expected):
assert parse_multi_columns(input_str) == expected
-@patch("app.requests.get")
+@patch("backend.utils.requests.get")
def test_fetch_user_groups(mock_get):
mock_response = MagicMock()
mock_response.status_code = 200
diff --git a/src/App/tests/test_app.py b/src/App/tests/test_app.py
index bf82ccf3a..ff0ef42c2 100644
--- a/src/App/tests/test_app.py
+++ b/src/App/tests/test_app.py
@@ -1218,15 +1218,12 @@ async def test_conversation_route(client):
with patch("app.stream_chat_request", new_callable=AsyncMock) as mock_stream:
mock_stream.return_value = ["chunk1", "chunk2"]
- with patch(
- "app.complete_chat_request", new_callable=AsyncMock
- ) as mock_complete:
- mock_complete.return_value = {"response": "test response"}
- response = await client.post(
- "/conversation", json=request_body, headers=request_headers
- )
- assert response.status_code == 200
+ response = await client.post(
+ "/conversation", json=request_body, headers=request_headers
+ )
+
+ assert response.status_code == 200
@pytest.mark.asyncio
diff --git a/tests/e2e-test/.gitignore b/tests/e2e-test/.gitignore
new file mode 100644
index 000000000..de16f2df0
--- /dev/null
+++ b/tests/e2e-test/.gitignore
@@ -0,0 +1,166 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+microsoft/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+.idea/
+archive/
+report/
+screenshots/
diff --git a/tests/e2e-test/README.md b/tests/e2e-test/README.md
new file mode 100644
index 000000000..453eb273a
--- /dev/null
+++ b/tests/e2e-test/README.md
@@ -0,0 +1,41 @@
+# Automation Proof Of Concept for BYOc Client Advisor Accelerator
+
+
+
+Write end-to-end tests for your web apps with [Playwright](https://github.com/microsoft/playwright-python) and [pytest](https://docs.pytest.org/en/stable/).
+
+- Support for **all modern browsers** including Chromium, WebKit and Firefox.
+- Support for **headless and headed** execution.
+- **Built-in fixtures** that provide browser primitives to test functions.
+
+Pre-Requisites:
+- Install Visual Studio Code: Download and Install Visual Studio Code(VSCode).
+- Install NodeJS: Download and Install Node JS
+
+Create and Activate Python Virtual Environment
+- From your directory open and run cmd : "python -m venv microsoft"
+This will create a virtual environment directory named microsoft inside your current directory
+- To enable virtual environment, copy location for "microsoft\Scripts\activate.bat" and run from cmd
+
+
+Installing Playwright Pytest from Virtual Environment
+- To install libraries run "pip install -r requirements.txt"
+- Install the required browsers "playwright install"
+
+Run test cases
+- To run test cases from your 'tests' folder : "pytest --headed --html=report/report.html"
+
+Steps need to be followed to enable Access Token and Client Credentials
+- Go to App Service from the resource group and select the Access Tokens check box in 'Manage->Authentication' tab
+
+- Go to Manage->Certificates & secrets tab to generate Client Secret value
+
+- Go to Overview tab to get the client id and tenant id.
+
+Create .env file in project root level with web app url and client credentials
+- create a .env file in project root level and add your user_name, pass_word, client_id,client_secret,
+ tenant_id and url for the resource group. please refer 'sample_dotenv_file.txt' file.
+
+## Documentation
+
+See on [playwright.dev](https://playwright.dev/python/docs/test-runners) for examples and more detailed information.
\ No newline at end of file
diff --git a/tests/e2e-test/base/__init__.py b/tests/e2e-test/base/__init__.py
new file mode 100644
index 000000000..cf50d1ccd
--- /dev/null
+++ b/tests/e2e-test/base/__init__.py
@@ -0,0 +1 @@
+from . import base
\ No newline at end of file
diff --git a/tests/e2e-test/base/base.py b/tests/e2e-test/base/base.py
new file mode 100644
index 000000000..e36c7534e
--- /dev/null
+++ b/tests/e2e-test/base/base.py
@@ -0,0 +1,140 @@
+from config.constants import *
+import requests
+import json
+from dotenv import load_dotenv
+import os
+import re
+from datetime import datetime
+import uuid
+
+
+class BasePage:
+ def __init__(self, page):
+ self.page = page
+
+ def scroll_into_view(self,locator,text):
+ elements = locator.all()
+ for element in elements:
+ client_e = element.text_content()
+ if client_e == text:
+ element.scroll_into_view_if_needed()
+ break
+
+ def select_an_element(self,locator,text):
+ elements = locator.all()
+ for element in elements:
+ clientele = element.text_content()
+ if clientele == text:
+ element.click()
+ break
+
+ def is_visible(self,locator):
+ locator.is_visible()
+
+ def validate_response_status(self):
+ load_dotenv()
+ # client_id = os.getenv('client_id')
+ # client_secret = os.getenv('client_secret')
+ # tenant_id = os.getenv('tenant_id')
+ # token_url = f"https://login.microsoft.com/{tenant_id}/oauth2/v2.0/token"
+ # The URL of the API endpoint you want to access
+ url = f"{URL}/history/update"
+
+ # Generate unique IDs for the messages
+ user_message_id = str(uuid.uuid4())
+ assistant_message_id = str(uuid.uuid4())
+ conversation_id = str(uuid.uuid4())
+
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "*/*"
+ }
+ payload = {
+ "conversation_id": conversation_id,
+ "messages": [
+ {
+ "id": user_message_id,
+ "role": "user",
+ "content":""
+ },
+ {
+ "id": assistant_message_id,
+ "role": "assistant",
+ "content":""
+ }
+ ]
+ }
+ # Make the POST request
+ response = self.page.request.post(url, headers=headers,data=json.dumps(payload))
+ # Check the response status code
+ assert response.status == 200, "response code is "+str(response.status)+" "+str(response.json())
+
+ # data = {
+ # 'grant_type': 'client_credentials',
+ # 'client_id': client_id,
+ # 'client_secret': client_secret,
+ # 'scope': f'api://{client_id}/.default'
+ # }
+ # response = requests.post(token_url, data=data)
+ # if response.status_code == 200:
+ # token_info = response.json()
+ # access_token = token_info['access_token']
+ # # Set the headers, including the access token
+ # headers = {
+ # "Content-Type": "application/json",
+ # "Authorization": f"Bearer {access_token}",
+ # "Accept": "*/*"
+ # }
+ # payload = {
+ # "conversation_id": conversation_id,
+ # "messages": [
+ # {
+ # "id": user_message_id,
+ # "role": "user",
+ # "content":""
+ # },
+ # {
+ # "id": assistant_message_id,
+ # "role": "assistant",
+ # "content":""
+ # }
+ # ]
+ # }
+ # # Make the POST request
+ # response = self.page.request.post(url, headers=headers,data=json.dumps(payload))
+ # # Check the response status code
+ # assert response.status == 200, "response code is "+str(response.status)+" "+str(response.json())
+ # else:
+ # assert response.status_code == 200,"Failed to get token "+response.text
+
+ def compare_raw_date_time(self,response_text,sidepanel_text):
+ # Extract date and time from response_text using regex
+ match = re.search(r"((\d{4}-\d{2}-\d{2}) from (\d{2}:\d{2}:\d{2}))|((\w+ \d{1,2}, \d{4}),? from (\d{2}:\d{2}))",response_text)
+ if match:
+ # check for YYYY-MM-DD format in response_text
+ if match.group(2) and match.group(3):
+ date1_str = match.group(2)
+ time1_str = match.group(3)
+ date_time1 = datetime.strptime(f"{date1_str} {time1_str}","%Y-%m-%d %H:%M:%S")
+
+ # check for 'Month DD, YYYY' format in response_text
+ elif match.group(5) and match.group(6):
+ date1_str = match.group(5)
+ time1_str = match.group(6)
+ date_time1 = datetime.strptime(f"{date1_str} {time1_str}", "%B %d, %Y %H:%M")
+
+ else:
+ raise ValueError("Date and time format not found in response_text: " + response_text)
+ # remove special chars in raw sidepanel_text
+ sidepanel_text_cleaned = re.sub(r"[\ue000-\uf8ff]", "",sidepanel_text)
+
+ # Extract date and time from sidepanel_text using regex
+ match2 = re.search(r"(\w+ \w+ \d{1,2}, \d{4})\s*(\d{2}:\d{2})",sidepanel_text_cleaned)
+ if match2:
+ date2_str = match2.group(1)
+ time2_str = match2.group(2)
+ date_time2 = datetime.strptime(f"{date2_str} {time2_str}", "%A %B %d, %Y %H:%M")
+ else:
+ raise ValueError("Date and time format not found in sidepanel_text: "+sidepanel_text)
+ # Compare the two datetime objects
+ assert date_time1 == date_time2
diff --git a/tests/e2e-test/config/constants.py b/tests/e2e-test/config/constants.py
new file mode 100644
index 000000000..004a55f5d
--- /dev/null
+++ b/tests/e2e-test/config/constants.py
@@ -0,0 +1,21 @@
+from dotenv import load_dotenv
+import os
+
+load_dotenv()
+URL = os.getenv('url')
+if URL.endswith('/'):
+ URL = URL[:-1]
+
+# HomePage input data
+homepage_title = "Woodgrove Bank"
+client_name = "Karen Berg"
+# next_meeting_question = "when is the next meeting scheduled with this client?"
+golden_path_question1 = "What were karen's concerns during our last meeting?"
+golden_path_question2 = "Did karen express any concerns over market fluctuation in prior meetings?"
+golden_path_question3 = "What type of asset does karen own ?"
+golden_path_question4 = "Show latest asset value by asset type?"
+golden_path_question5 = "How did equities asset value change in the last six months?"
+# golden_path_question6 = "Give summary of previous meetings?"
+golden_path_question7 = "Summarize Arun sharma previous meetings?"
+invalid_response = "No data found for that client."
+# invalid_response = "I cannot answer this question from the data available. Please rephrase or add more details."
diff --git a/tests/e2e-test/img.png b/tests/e2e-test/img.png
new file mode 100644
index 000000000..c7c891ad7
Binary files /dev/null and b/tests/e2e-test/img.png differ
diff --git a/tests/e2e-test/img_1.png b/tests/e2e-test/img_1.png
new file mode 100644
index 000000000..5dffd4eeb
Binary files /dev/null and b/tests/e2e-test/img_1.png differ
diff --git a/tests/e2e-test/pages/__init__.py b/tests/e2e-test/pages/__init__.py
new file mode 100644
index 000000000..ba8308cff
--- /dev/null
+++ b/tests/e2e-test/pages/__init__.py
@@ -0,0 +1,2 @@
+from. import loginPage
+from. import homePage
diff --git a/tests/e2e-test/pages/homePage.py b/tests/e2e-test/pages/homePage.py
new file mode 100644
index 000000000..7301a8eda
--- /dev/null
+++ b/tests/e2e-test/pages/homePage.py
@@ -0,0 +1,83 @@
+from base.base import BasePage
+
+
+class HomePage(BasePage):
+ HOME_PAGE_TITLE = "//h2[text()='Woodgrove Bank']"
+ SIDE_PANEL_CLIENT_NAMES ="//div[contains(@class,'cardContainer')]//div[contains(@class,'clientName')]"
+ SELECTED_CLIENT_NAME_LABEL = "//span[contains(@class,'selectedName')]"
+ MORE_DETAILS_LINKS = "//div[contains(@class,'cardContainer')]//div[text()='More details']"
+ LESS_DETAILS_LINK = "//div[contains(@class,'cardContainer')]//div[text()='Less details']"
+ SIDE_PANEL_NEXT_MEETING_DETAILS = "//div[contains(@class,'selected')]/div[contains(@class,'nextMeeting')]"
+ TYPE_QUESTION_TEXT_AREA = "//textarea[contains(@placeholder,'Type a new question')]"
+ SEND_BUTTON = "div[role='button'][aria-label='Ask question button']"
+ ANSWER_TEXT = "//div[contains(@class,'answerText')]/p"
+ SHOW_CHAT_HISTORY_BUTTON="//span[text()='Show chat history']"
+ SAVE_CHATHISTORY_PLUS_ICON="//i[@data-icon-name='Add']"
+ SAVE_CHAT_CONFIRMATION_POPUPTEXT= "//div[contains(@class,'headerText')]"
+ SHOW_CHAT_HISTORY_DELETE_ICON="//span/i[@data-icon-name='Delete']"
+ SAVED_CHAT_LABEL="(//div[contains(@class,'chatTitle')])[1]"
+ CLEAR_CHAT_ICON = "//i[@data-icon-name='Broom']"
+ HIDE_CHAT_HISTORY_BUTTON = "//span[text()='Hide chat history']"
+ USER_CHAT_MESSAGE = "(//div[contains(@class,'chatMessageUserMessage')])[1]"
+ STOP_GENERATING_LABEL = "//span[text()='Stop generating']"
+
+ def __init__(self, page):
+ self.page = page
+
+ def select_a_client(self,client_name):
+ # scroll to the client on Home page
+ BasePage.scroll_into_view(self,self.page.locator(self.SIDE_PANEL_CLIENT_NAMES),client_name)
+ self.page.wait_for_timeout(2000)
+ # click on desired client name
+ BasePage.select_an_element(self,self.page.locator(self.SIDE_PANEL_CLIENT_NAMES),client_name)
+ self.page.wait_for_timeout(5000)
+
+ def enter_a_question(self, text):
+ # Type a question in the text area
+ self.page.locator(self.TYPE_QUESTION_TEXT_AREA).fill(text)
+ self.page.wait_for_timeout(2000)
+
+ def click_send_button(self):
+ # Click on send button in question area
+ self.page.locator(self.SEND_BUTTON).click()
+ self.page.locator(self.STOP_GENERATING_LABEL).wait_for(state='hidden')
+
+ def validate_next_meeting_date_time(self):
+ # validate next meeting date and time in side panel with response data
+ date_times = self.page.locator(self.SIDE_PANEL_NEXT_MEETING_DETAILS)
+ sidepanel_raw_datetime =""
+ for i in range(date_times.count()):
+ date_time = date_times.nth(i)
+ text = date_time.inner_text()
+ sidepanel_raw_datetime = sidepanel_raw_datetime + " " + text
+
+ response_raw_datetime = self.page.locator(self.ANSWER_TEXT).text_content()
+ BasePage.compare_raw_date_time(self,response_raw_datetime,sidepanel_raw_datetime)
+
+ def click_on_save_chat_plus_icon(self):
+ self.page.wait_for_selector(self.SAVE_CHATHISTORY_PLUS_ICON)
+ self.page.locator(self.SAVE_CHATHISTORY_PLUS_ICON).click()
+ self.page.wait_for_timeout(1000)
+
+ def click_on_show_chat_history_button(self):
+ self.page.wait_for_selector(self.SHOW_CHAT_HISTORY_BUTTON)
+ self.page.locator(self.SHOW_CHAT_HISTORY_BUTTON).click()
+ self.page.wait_for_timeout(1000)
+
+ def click_send_button_for_chat_history_response(self):
+ # Click on send button in question area
+ self.page.locator(self.SEND_BUTTON).click()
+
+ def click_on_saved_chat(self):
+ #click on saved chat in the show chat history section
+ self.page.wait_for_selector(self.SAVED_CHAT_LABEL)
+ self.page.locator(self.SAVED_CHAT_LABEL).click()
+
+ def click_clear_chat_icon(self):
+ # Click on clear chat icon in question area
+ if self.page.locator(self.USER_CHAT_MESSAGE).is_visible():
+ self.page.locator(self.CLEAR_CHAT_ICON).click()
+
+ def click_hide_chat_history_button(self):
+ # Click on hide chat history button in question area
+ self.page.locator(self.HIDE_CHAT_HISTORY_BUTTON).click()
\ No newline at end of file
diff --git a/tests/e2e-test/pages/loginPage.py b/tests/e2e-test/pages/loginPage.py
new file mode 100644
index 000000000..52617d337
--- /dev/null
+++ b/tests/e2e-test/pages/loginPage.py
@@ -0,0 +1,43 @@
+from asyncio import timeout
+from playwright.sync_api import sync_playwright,TimeoutError as PlaywightTimeoutError
+from base.base import BasePage
+
+
+class LoginPage(BasePage):
+
+ EMAIL_TEXT_BOX = "//input[@type='email']"
+ NEXT_BUTTON = "//input[@type='submit']"
+ PASSWORD_TEXT_BOX = "//input[@type='password']"
+ SIGNIN_BUTTON = "//input[@id='idSIButton9']"
+ YES_BUTTON = "//input[@id='idSIButton9']"
+ PERMISSION_ACCEPT_BUTTON = "//input[@type='submit']"
+
+ def __init__(self, page):
+ self.page = page
+
+ def authenticate(self, username,password):
+ # login with username and password in web url
+ self.page.locator(self.EMAIL_TEXT_BOX).fill(username)
+ self.page.locator(self.NEXT_BUTTON).click()
+
+ # Wait for the password input field to be available and fill it
+ self.page.wait_for_load_state('networkidle')
+ # Enter password
+ self.page.locator(self.PASSWORD_TEXT_BOX).fill(password)
+ # Click on SignIn button
+ self.page.locator(self.SIGNIN_BUTTON).click()
+ self.page.wait_for_load_state('networkidle')
+ try:
+ self.page.locator(self.YES_BUTTON).wait_for(state='visible',timeout=30000)
+ # Click on YES button
+ self.page.locator(self.YES_BUTTON).click()
+ except PlaywightTimeoutError:
+ pass
+ self.page.wait_for_load_state('networkidle')
+ try:
+ self.page.locator(self.PERMISSION_ACCEPT_BUTTON).wait_for(state='visible',timeout=5000)
+ # Click on Permissions ACCEPT button
+ self.page.locator(self.PERMISSION_ACCEPT_BUTTON).click()
+ except PlaywightTimeoutError:
+ pass
+ self.page.wait_for_load_state('networkidle')
diff --git a/tests/e2e-test/requirements.txt b/tests/e2e-test/requirements.txt
new file mode 100644
index 000000000..0d70ceecc
--- /dev/null
+++ b/tests/e2e-test/requirements.txt
@@ -0,0 +1,3 @@
+pytest-playwright
+pytest-html
+python-dotenv
\ No newline at end of file
diff --git a/tests/e2e-test/sample_dotenv_file.txt b/tests/e2e-test/sample_dotenv_file.txt
new file mode 100644
index 000000000..bf415f8b1
--- /dev/null
+++ b/tests/e2e-test/sample_dotenv_file.txt
@@ -0,0 +1,6 @@
+user_name = 'Your login username'
+pass_word = 'Your login password'
+client_id = 'client id'
+client_secret = 'client secret'
+tenant_id = 'tenant id'
+url = 'web app url'
\ No newline at end of file
diff --git a/tests/e2e-test/tests/__init__.py b/tests/e2e-test/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/e2e-test/tests/conftest.py b/tests/e2e-test/tests/conftest.py
new file mode 100644
index 000000000..79d6f6387
--- /dev/null
+++ b/tests/e2e-test/tests/conftest.py
@@ -0,0 +1,59 @@
+from pathlib import Path
+import pytest
+from playwright.sync_api import sync_playwright
+from config.constants import *
+from slugify import slugify
+from pages.homePage import HomePage
+from pages.loginPage import LoginPage
+from dotenv import load_dotenv
+import os
+
+
+@pytest.fixture(scope="session")
+def login_logout():
+ # perform login and browser close once in a session
+ with sync_playwright() as p:
+ browser = p.chromium.launch(headless=False)
+ context = browser.new_context()
+ context.set_default_timeout(80000)
+ page = context.new_page()
+ # Navigate to the login URL
+ page.goto(URL)
+ # Wait for the login form to appear
+ page.wait_for_load_state('networkidle')
+ page.wait_for_timeout(5000)
+ # # login to web url with username and password
+ # login_page = LoginPage(page)
+ # load_dotenv()
+ # login_page.authenticate(os.getenv('user_name'), os.getenv('pass_word'))
+
+ yield page
+
+ # perform close the browser
+ browser.close()
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_html_report_title(report):
+ report.title = "Automation_BYOc_ClientAdvisor"
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ pytest_html = item.config.pluginmanager.getplugin("html")
+ outcome = yield
+ screen_file=""
+ report = outcome.get_result()
+ extra = getattr(report, "extra", [])
+ if report.when == "call":
+ if report.failed and "page" in item.funcargs:
+ page = item.funcargs["page"]
+ screenshot_dir = Path("screenshots")
+ screenshot_dir.mkdir(exist_ok=True)
+ screen_file = str(screenshot_dir / f"{slugify(item.nodeid)}.png")
+ page.screenshot(path=screen_file)
+ xfail = hasattr(report, "wasxfail")
+ if (report.skipped and xfail) or (report.failed and not xfail):
+ # add the screenshots to the html report
+ extra.append(pytest_html.extras.png(screen_file))
+ report.extras = extra
diff --git a/tests/e2e-test/tests/test_poc_byoc_client_advisor.py b/tests/e2e-test/tests/test_poc_byoc_client_advisor.py
new file mode 100644
index 000000000..18aa7cd08
--- /dev/null
+++ b/tests/e2e-test/tests/test_poc_byoc_client_advisor.py
@@ -0,0 +1,141 @@
+from config.constants import *
+from pages.homePage import HomePage
+
+
+# def test_chatbot_responds_with_upcoming_meeting_schedule_date(login_logout):
+# page = login_logout
+# home_page = HomePage(page)
+# # validate page title
+# assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content()
+# # select a client
+# home_page.select_a_client(client_name)
+# # validate selected client name
+# assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content()
+# # ask a question
+# home_page.enter_a_question(next_meeting_question)
+# # click send button
+# home_page.click_send_button()
+# # Validate response status code
+# home_page.validate_response_status()
+# # validate the upcoming meeting date-time in both side panel and response
+# home_page.validate_next_meeting_date_time()
+
+def test_save_chat_confirmation_popup(login_logout):
+ page = login_logout
+ home_page = HomePage(page)
+ # validate page title
+ assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content()
+ # select a client
+ home_page.select_a_client(client_name)
+ # validate selected client name
+ assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content()
+ # clear the chat if any
+ home_page.click_clear_chat_icon()
+ # ask a question
+ home_page.enter_a_question(golden_path_question1)
+ # click send button
+ home_page.click_send_button()
+ # Validate response status code
+ home_page.validate_response_status()
+ #click on the plus button
+ home_page.click_on_save_chat_plus_icon()
+ assert page.locator(home_page.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible()
+
+def test_delete_chat_history_during_response(login_logout):
+ page = login_logout
+ home_page = HomePage(page)
+ # validate page title
+ assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content()
+ # select a client
+ home_page.select_a_client(client_name)
+ # validate selected client name
+ assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content()
+ # ask a question
+ home_page.enter_a_question(golden_path_question1)
+ # click send button
+ home_page.click_send_button()
+ #click on the plus button
+ home_page.click_on_save_chat_plus_icon()
+ assert page.locator(home_page.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible()
+ #click on show chat history button
+ home_page.click_on_show_chat_history_button()
+ #click on saved chat history
+ home_page.click_on_saved_chat()
+ #ask the question
+ home_page.enter_a_question(golden_path_question1)
+ #click on click_send_button_for_chat_history_response
+ home_page.click_send_button_for_chat_history_response()
+ # validate the delete icon disabled
+ assert page.locator(home_page.SHOW_CHAT_HISTORY_DELETE_ICON).is_disabled()
+ # click on hide chat history button
+ home_page.click_hide_chat_history_button()
+ # clear the chat
+ home_page.click_clear_chat_icon()
+
+def test_golden_path_demo_script(login_logout):
+ page = login_logout
+ home_page = HomePage(page)
+ # validate page title
+ assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content()
+ # select a client
+ home_page.select_a_client(client_name)
+ # validate selected client name
+ assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content()
+ # ask a question
+ home_page.enter_a_question(golden_path_question1)
+ # click send button
+ home_page.click_send_button()
+ # Validate response status code
+ home_page.validate_response_status()
+ response_text = page.locator(home_page.ANSWER_TEXT)
+ # validate the response
+ assert response_text.nth(response_text.count()-1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question1
+ # ask a question
+ home_page.enter_a_question(golden_path_question2)
+ # click send button
+ home_page.click_send_button()
+ # Validate response status code
+ home_page.validate_response_status()
+ # validate the response
+ assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question2
+ # ask a question
+ home_page.enter_a_question(golden_path_question3)
+ # click send button
+ home_page.click_send_button()
+ # Validate response status code
+ home_page.validate_response_status()
+ # validate the response
+ assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question3
+ # ask a question
+ home_page.enter_a_question(golden_path_question4)
+ # click send button
+ home_page.click_send_button()
+ # Validate response status code
+ home_page.validate_response_status()
+ # validate the response
+ assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question4
+ # ask a question
+ home_page.enter_a_question(golden_path_question5)
+ # click send button
+ home_page.click_send_button()
+ # Validate response status code
+ home_page.validate_response_status()
+ # validate the response
+ assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question5
+ # # ask a question
+ # home_page.enter_a_question(golden_path_question6)
+ # # click send button
+ # home_page.click_send_button()
+ # # Validate response status code
+ # home_page.validate_response_status()
+ # # validate the response
+ # assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question6
+ # ask a question
+ home_page.enter_a_question(golden_path_question7)
+ # click send button
+ home_page.click_send_button()
+ # Validate response status code
+ home_page.validate_response_status()
+ # validate the response
+ assert (response_text.nth(response_text.count() - 1).text_content().lower()).find("arun sharma") == -1,"Other client information in response for client: "+client_name
+ assert (response_text.nth(response_text.count() - 1).text_content().lower()).find(client_name) == -1,"Response is generated for selected client "+client_name+" even client name is different in question: "+golden_path_question7
\ No newline at end of file