Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
6d9f894
Added .dockerignore file to exclude unnecessary files from Docker bui…
Prasanjeet-Microsoft May 7, 2025
b4ba302
fixed date time response issue (#542)
AjitPadhi-Microsoft May 14, 2025
03417fe
Update main.bicepparam
Abdul-Microsoft May 19, 2025
bee82e8
Merge branch 'main' into dev
May 19, 2025
43a464b
Update src/App/backend/chat_logic_handler.py
Roopan-Microsoft May 21, 2025
cb49449
Update src/.dockerignore
Roopan-Microsoft May 21, 2025
72eb23a
feat: added opentelemetry log (#545)
Priyanka-Microsoft May 21, 2025
6d0f493
Merge branch 'main' into dev
Harsh-Microsoft May 27, 2025
58a3f42
Update main.json
Harsh-Microsoft May 27, 2025
99738aa
fix: Replace Gunicorn with Uvicorn for the backend server (#555)
Harsh-Microsoft May 29, 2025
2cc84de
EXP environment changes for Log Analytics workspace
Vamshi-Microsoft May 30, 2025
b88f111
refactor: Cleanup the unused variables in all the files (#557)
Abdul-Microsoft Jun 2, 2025
3053ffc
Merge branch 'dev' into exp-changes
Vamshi-Microsoft Jun 3, 2025
d15c956
Merge pull request #558 from microsoft/exp-changes
Prajwal-Microsoft Jun 3, 2025
7ae9718
EXP environment changes for Existing Fabric workspace
Vamshi-Microsoft Jun 3, 2025
17f48fa
Updated Heading
Vamshi-Microsoft Jun 3, 2025
afe869f
Merge pull request #559 from microsoft/exp-changes-fabric
Prajwal-Microsoft Jun 3, 2025
562533d
To reuse Log Analytics across subscriptions
Vamshi-Microsoft Jun 4, 2025
e6cbb5a
Merge pull request #560 from microsoft/exp-changes-sub
Vamshi-Microsoft Jun 5, 2025
1a996f1
automate
Vemarthula-Microsoft Jun 6, 2025
67d857c
Update test_automation.yml
Vemarthula-Microsoft Jun 6, 2025
8f99693
Update test_automation.yml
Vemarthula-Microsoft Jun 6, 2025
df678b6
Merge branch 'main' into dev
Roopan-Microsoft Jun 6, 2025
f80c49a
Update test_automation.yml
Vemarthula-Microsoft Jun 6, 2025
ea64dd1
Update test_automation.yml
Vemarthula-Microsoft Jun 6, 2025
2f5a18e
test: Migrate test automation scripts, pipeline for Client advisor
Avijit-Microsoft Jun 6, 2025
23d4e64
feat: Standardize Bicep Parameters for Client Advisor (#563)
VishalS-Microsoft Jun 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 4 additions & 18 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ updates:
patterns:
- "*"

# 2. Python dependencies App
# 2. Python dependencies – App
- package-ecosystem: "pip"
directory: "/src/App"
schedule:
Expand All @@ -28,22 +28,8 @@ updates:
backend-deps:
patterns:
- "*"

# 3. Python dependencies – Azure Function
- package-ecosystem: "pip"
directory: "/src/AzureFunction"
schedule:
interval: "monthly"
commit-message:
prefix: "build"
target-branch: "dependabotchanges"
open-pull-requests-limit: 10
groups:
backend-deps:
patterns:
- "*"

# 4. Python dependencies – Fabric Scripts

# 3. Python dependencies – Fabric Scripts
- package-ecosystem: "pip"
directory: "/src/infra/scripts/fabric_scripts"
schedule:
Expand All @@ -57,7 +43,7 @@ updates:
patterns:
- "*"

# 5. Python dependencies Index Scripts
# 4. Python dependencies – Index Scripts
- package-ecosystem: "pip"
directory: "/src/infra/scripts/index_scripts"
schedule:
Expand Down
111 changes: 111 additions & 0 deletions .github/workflows/test_automation.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
name: Test Automation ClientAdvisor

on:
push:
branches:
- main
- dev

paths:
- 'tests/e2e-test/**'
schedule:
- cron: '0 13 * * *' # Runs at 1 PM UTC
workflow_dispatch:

env:
url: ${{ vars.CLIENT_ADVISOR_URL }}
accelerator_name: "Client Advisor"

jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.13'

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r tests/e2e-test/requirements.txt

- name: Ensure browsers are installed
run: python -m playwright install --with-deps chromium

- name: Run tests(1)
id: test1
run: |
xvfb-run pytest --headed --html=report/report.html --self-contained-html
working-directory: tests/e2e-test
continue-on-error: true

- name: Sleep for 30 seconds
if: ${{ steps.test1.outcome == 'failure' }}
run: sleep 30s
shell: bash

- name: Run tests(2)
if: ${{ steps.test1.outcome == 'failure' }}
id: test2
run: |
xvfb-run pytest --headed --html=report/report.html --self-contained-html
working-directory: tests/e2e-test
continue-on-error: true

- name: Sleep for 60 seconds
if: ${{ steps.test2.outcome == 'failure' }}
run: sleep 60s
shell: bash

- name: Run tests(3)
if: ${{ steps.test2.outcome == 'failure' }}
id: test3
run: |
xvfb-run pytest --headed --html=report/report.html --self-contained-html
working-directory: tests/e2e-test

- name: Upload test report
id: upload_report
uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}
with:
name: test-report
path: tests/e2e-test/report/*

- name: Send Notification
if: always()
run: |
RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
REPORT_URL=${{ steps.upload_report.outputs.artifact-url }}
IS_SUCCESS=${{ steps.test1.outcome == 'success' || steps.test2.outcome == 'success' || steps.test3.outcome == 'success' }}
# Construct the email body
if [ "$IS_SUCCESS" = "true" ]; then
EMAIL_BODY=$(cat <<EOF
{
"body": "<p>Dear Team,</p><p>We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has completed successfully.</p><p><strong>Run URL:</strong> <a href=\"${RUN_URL}\">${RUN_URL}</a>
</p><p><strong>Test Report:</strong> <a href=\"${REPORT_URL}\">${REPORT_URL}</a></p><p>Best regards,
Your Automation Team</p>",
"subject": "${{ env.accelerator_name }} Test Automation - Success"
}
EOF
)
else
EMAIL_BODY=$(cat <<EOF
{
"body": "<p>Dear Team,</p><p>We would like to inform you that the ${{ env.accelerator_name }} Test Automation process has encountered an issue and has failed to complete successfully.</p><p><strong>Run URL:</strong> <a href=\"${RUN_URL}\">${RUN_URL}</a>
${OUTPUT}</p><p><strong>Test Report:</strong> <a href=\"${REPORT_URL}\">${REPORT_URL}</a></p><p>Please investigate the matter at your earliest convenience.</p><p>Best regards,
Your Automation Team</p>",
"subject": "${{ env.accelerator_name }} Test Automation - Failure"
}
EOF
)
fi

# Send the notification
curl -X POST "${{ secrets.EMAILNOTIFICATION_LOGICAPP_URL_TA}}" \
-H "Content-Type: application/json" \
-d "$EMAIL_BODY" || echo "Failed to send notification"
60 changes: 27 additions & 33 deletions docs/CustomizingAzdParameters.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,41 +3,35 @@
By default this template will use the environment name as the prefix to prevent naming collisions within Azure. The parameters below show the default values. You only need to run the statements below if you need to change the values.


> To override any of the parameters, run `azd env set <key> <value>` before running `azd up`. On the first azd command, it will prompt you for the environment name. Be sure to choose 3-20 characters alphanumeric unique name.
> To override any of the parameters, run `azd env set <PARAMETER_NAME> <VALUE>` before running `azd up`. On the first azd command, it will prompt you for the environment name. Be sure to choose 3-20 charaters alphanumeric unique name.

## Parameters

| Name | Type | Default Value | Purpose |
| -----------------------------| ------- | ------------------- | ---------------------------------------------------------------------------------------------------- |
| `AZURE_ENV_NAME` | string | `azdtemp` | Used as a prefix for all resource names to ensure uniqueness across environments. |
| `AZURE_ENV_COSMOS_LOCATION` | string | `eastus2` | Location of the Cosmos DB instance. Choose from (allowed values: `swedencentral`, `australiaeast`). |
| `AZURE_ENV_MODEL_DEPLOYMENT_TYPE` | string | `GlobalStandard` | Change the Model Deployment Type (allowed values: Standard, GlobalStandard). |
| `AZURE_ENV_MODEL_NAME` | string | `gpt-4o-mini` | Set the GPT model name (allowed values: `gpt-4o`). |
| `AZURE_ENV_MODEL_VERSION` | string | `2025-01-01-preview` | Set the Azure OpenAI API version (allowed values: 2024-08-06). |
| `AZURE_ENV_MODEL_CAPACITY` | integer | `30` | Set the model capacity for GPT deployment. Choose based on your Azure quota and usage needs. |
| `AZURE_ENV_EMBEDDING_MODEL_NAME` | string | `text-embedding-ada-002` | Set the model name used for embeddings. |
| `AZURE_ENV_EMBEDDING_MODEL_CAPACITY` | integer | `80` | Set the capacity for embedding model deployment. |
| `AZURE_ENV_IMAGETAG` | string | `latest` | Set the image tag (allowed values: `latest`, `dev`, `hotfix`). |
| `AZURE_ENV_OPENAI_LOCATION` | string | `eastus2` | Location of the Azure OpenAI resource. Choose from (allowed values: `swedencentral`, `australiaeast`). |
| `AZURE_LOCATION` | string | `japaneast` | Sets the Azure region for resource deployment. |
| `AZURE_ENV_LOG_ANALYTICS_WORKSPACE_ID` | string | `<Existing Workspace Id>` | Reuses an existing Log Analytics Workspace instead of provisioning a new one. |

## How to Set a Parameter
To customize any of the above values, run the following command **before** `azd up`:

```bash
azd env set <PARAMETER_NAME> <VALUE>


Change the Secondary Location (example: eastus2, westus2, etc.)

```shell
azd env set AZURE_ENV_SECONDARY_LOCATION eastus2
```

Change the Model Deployment Type (allowed values: Standard, GlobalStandard)

```shell
azd env set AZURE_ENV_MODEL_DEPLOYMENT_TYPE Standard
```

Set the Model Name (allowed values: gpt-4, gpt-4o)

```shell
azd env set AZURE_ENV_MODEL_NAME gpt-4o
```

Change the Model Capacity (choose a number based on available GPT model capacity in your subscription)

```shell
azd env set AZURE_ENV_MODEL_CAPACITY 30
```

Change the Embedding Model

```shell
azd env set AZURE_ENV_EMBEDDING_MODEL_NAME text-embedding-ada-002
```

Change the Embedding Deployment Capacity (choose a number based on available embedding model capacity in your subscription)
**Example:**

```shell
azd env set AZURE_ENV_EMBEDDING_MODEL_CAPACITY 80
```bash
azd env set AZURE_LOCATION westus2
```
28 changes: 18 additions & 10 deletions docs/DeploymentGuide.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,16 +104,24 @@ Consider the following settings during your deployment to modify specific settin

When you start the deployment, most parameters will have **default values**, but you can update the below settings by following the steps [here](CustomizingAzdParameters.md):

| **Setting** | **Description** | **Default value** |
|------------|----------------| ------------|
| **Azure OpenAI Location** | The region where OpenAI deploys | eastus2 |
| **Environment Name** | A **3-20 character alphanumeric value** used to generate a unique ID to prefix the resources. | byocatemplate |
| **Cosmos Location** | A **less busy** region for **CosmosDB**, useful in case of availability constraints. | eastus2 |
| **Deployment Type** | Select from a drop-down list. | Global Standard |
| **GPT Model** | OpenAI GPT model | gpt-4o-mini |
| **GPT Model Deployment Capacity** | Configure capacity for **GPT models**. | 30k |
| **Embedding Model** | OpenAI embedding model | text-embedding-ada-002 |
| **Embedding Model Capacity** | Set the capacity for **embedding models**. | 80k |

| **Setting** | **Description** | **Default value** |
| ------------------------------------ | -------------------------------------------------------------------------------------------------- | ------------------------ |
| **Azure OpenAI Location** | The region where Azure OpenAI deploys. Choose from `swedencentral`, `australiaeast`, etc. | `eastus2` |
| **Environment Name** | A **3-20 character alphanumeric value** used to generate a unique ID to prefix the resources. | `azdtemp` |
| **Cosmos Location** | A **less busy** region for **CosmosDB**, useful in case of availability constraints. | `eastus2` |
| **Deployment Type** | Select from a drop-down list (`Standard`, `GlobalStandard`). | `GlobalStandard` |
| **GPT Model** | Azure OpenAI GPT model to deploy. | `gpt-4o-mini` |
| **GPT Model Deployment Capacity** | Configure capacity for **GPT models**. Choose based on Azure OpenAI quota. | `30` |
| **Embedding Model** | OpenAI embedding model used for vector similarity. | `text-embedding-ada-002` |
| **Embedding Model Capacity** | Set the capacity for **embedding models**. Choose based on usage and quota. | `80` |
| **Image Tag** | The version of the Docker image to use (e.g., `latest`, `dev`, `hotfix`). | `latest` |
| **Azure OpenAI API Version** | Set the API version for OpenAI model deployments. | `2025-01-01-preview` |
| **AZURE\_LOCATION** | Sets the Azure region for resource deployment. | `japaneast` |
| **Existing Log Analytics Workspace** | To reuse an existing Log Analytics Workspace ID instead of creating a new one. | *(empty)* |




</details>

Expand Down
7 changes: 5 additions & 2 deletions docs/FabricDeployment.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
## Fabric Deployment
## Step 1: Create Fabric workspace
## Step 1: Create or Use an Existing Microsoft Fabric Workspace

ℹ️ Note: If you already have an existing Microsoft Fabric Workspace, you can **skip this step** and proceed to Step 2. To retrieve an existing Workspace ID, check **Point 5 below**.

1. Navigate to ([Fabric Workspace](https://app.fabric.microsoft.com/))
2. Click on Workspaces from left Navigation
3. Click on + New Workspace
Expand All @@ -19,7 +22,7 @@
- ```cd ./Build-your-own-copilot-Solution-Accelerator/infra/scripts/fabric_scripts```
- ```sh ./run_fabric_items_scripts.sh keyvault_param workspaceid_param solutionprefix_param```
1. keyvault_param - the name of the keyvault that was created in Step 1
2. workspaceid_param - the workspaceid created in Step 2
2. workspaceid_param - Existing Workspaceid or workspaceid created in Step 2
3. solutionprefix_param - prefix used to append to lakehouse upon creation
4. Get Fabric Lakehouse connection details:
5. Once deployment is complete, navigate to Fabric Workspace
Expand Down
8 changes: 5 additions & 3 deletions docs/LocalSetupAndDeploy.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,11 @@ Follow these steps to deploy the application to Azure App Service:
If this is your first time deploying the app, use the `az webapp up` command. Run the following commands from the `App` folder, replacing the placeholders with your desired values:

```sh
az webapp up --runtime PYTHON:3.11 --sku B1 --name <new-app-name> --resource-group <resource-group-name> --location <azure-region> --subscription <subscription-name>
az webapp up --runtime PYTHON:3.11 --sku B1 --name <new-app-name> --resource-group <resource-group-name> --location <azure-region> --subscription <subscription-id>

az webapp config set --startup-file "python3 -m gunicorn app:app" --name <new-app-name> --resource-group <resource-group-name>
az webapp config set --startup-file "python3 -m uvicorn app:app --host 0.0.0.0 --port 8000" --name <new-app-name> --resource-group <resource-group-name>

az webapp config appsettings set --resource-group <resource-group-name> --name <new-app-name> --settings WEBSITES_PORT=8000
```

Next, configure the required environment variables in the deployed app to ensure it functions correctly.
Expand Down Expand Up @@ -83,7 +85,7 @@ az webapp up \
--resource-group <resource-group-name>

az webapp config set \
--startup-file "python3 -m gunicorn app:app" \
--startup-file "python3 -m uvicorn app:app --host 0.0.0.0 --port 8000" \
--name <existing-app-name> --resource-group <resource-group-name>
```

Expand Down
22 changes: 19 additions & 3 deletions infra/deploy_ai_foundry.bicep
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ param gptDeploymentCapacity int
param embeddingModel string
param embeddingDeploymentCapacity int
param managedIdentityObjectId string
param existingLogAnalyticsWorkspaceId string = ''

// Load the abbrevations file required to name the azure resources.
var abbrs = loadJsonContent('./abbreviations.json')
Expand Down Expand Up @@ -54,7 +55,17 @@ resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = {
name: keyVaultName
}

resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2023-09-01' = {
var useExisting = !empty(existingLogAnalyticsWorkspaceId)
var existingLawSubscription = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[2] : ''
var existingLawResourceGroup = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[4] : ''
var existingLawName = useExisting ? split(existingLogAnalyticsWorkspaceId, '/')[8] : ''

resource existingLogAnalyticsWorkspace 'Microsoft.OperationalInsights/workspaces@2023-09-01' existing = if (useExisting) {
name: existingLawName
scope: resourceGroup(existingLawSubscription, existingLawResourceGroup)
}

resource logAnalytics 'Microsoft.OperationalInsights/workspaces@2023-09-01' = if (!useExisting) {
name: workspaceName
location: location
tags: {}
Expand Down Expand Up @@ -93,7 +104,7 @@ resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = {
Application_Type: 'web'
publicNetworkAccessForIngestion: 'Enabled'
publicNetworkAccessForQuery: 'Enabled'
WorkspaceResourceId: logAnalytics.id
WorkspaceResourceId: useExisting ? existingLogAnalyticsWorkspace.id : logAnalytics.id
}
}

Expand Down Expand Up @@ -490,5 +501,10 @@ output aiSearchService string = aiSearch.name
output aiProjectName string = aiHubProject.name

output applicationInsightsId string = applicationInsights.id
output logAnalyticsWorkspaceResourceName string = logAnalytics.name
output logAnalyticsWorkspaceResourceName string = useExisting ? existingLogAnalyticsWorkspace.name : logAnalytics.name
output logAnalyticsWorkspaceResourceGroup string = useExisting ? existingLawResourceGroup : resourceGroup().name


output storageAccountName string = storageNameCleaned
output applicationInsightsConnectionString string = applicationInsights.properties.ConnectionString

Loading
Loading