diff --git a/.github/workflows/CAdeploy.yml b/.github/workflows/CAdeploy.yml
new file mode 100644
index 000000000..9dc156edc
--- /dev/null
+++ b/.github/workflows/CAdeploy.yml
@@ -0,0 +1,128 @@
+name: CI-Validate Deployment-Client Advisor
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'ClientAdvisor/**'
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v3
+
+ - name: Setup Azure CLI
+ run: |
+ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ az --version # Verify installation
+
+ - name: Login to Azure
+ run: |
+ az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
+
+ - name: Install Bicep CLI
+ run: az bicep install
+
+ - name: Generate Resource Group Name
+ id: generate_rg_name
+ run: |
+ echo "Generating a unique resource group name..."
+ TIMESTAMP=$(date +%Y%m%d%H%M%S)
+ COMMON_PART="pslautomationCli"
+ UNIQUE_RG_NAME="${COMMON_PART}${TIMESTAMP}"
+ echo "RESOURCE_GROUP_NAME=${UNIQUE_RG_NAME}" >> $GITHUB_ENV
+ echo "Generated RESOURCE_GROUP_PREFIX: ${UNIQUE_RG_NAME}"
+
+ - name: Check and Create Resource Group
+ id: check_create_rg
+ run: |
+ echo "RESOURCE_GROUP: ${{ env.RESOURCE_GROUP_NAME }}"
+ set -e
+ echo "Checking if resource group exists..."
+ rg_exists=$(az group exists --name ${{ env.RESOURCE_GROUP_NAME }})
+ if [ "$rg_exists" = "false" ]; then
+ echo "Resource group does not exist. Creating..."
+ az group create --name ${{ env.RESOURCE_GROUP_NAME }} --location uksouth || { echo "Error creating resource group"; exit 1; }
+ else
+ echo "Resource group already exists."
+ fi
+
+ - name: Generate Unique Solution Prefix
+ id: generate_solution_prefix
+ run: |
+ set -e
+ COMMON_PART="pslc"
+ TIMESTAMP=$(date +%s)
+ UPDATED_TIMESTAMP=$(echo $TIMESTAMP | tail -c 3)
+ UNIQUE_SOLUTION_PREFIX="${COMMON_PART}${UPDATED_TIMESTAMP}"
+ echo "SOLUTION_PREFIX=${UNIQUE_SOLUTION_PREFIX}" >> $GITHUB_ENV
+ echo "Generated SOLUTION_PREFIX: ${UNIQUE_SOLUTION_PREFIX}"
+
+ - name: Deploy Bicep Template
+ id: deploy
+ run: |
+ set -e
+ az deployment group create \
+ --resource-group ${{ env.RESOURCE_GROUP_NAME }} \
+ --template-file ClientAdvisor/Deployment/bicep/main.bicep \
+ --parameters solutionPrefix=${{ env.SOLUTION_PREFIX }} cosmosLocation=eastus2
+
+ - name: Update PowerBI URL
+ if: success()
+ run: |
+ set -e
+
+ COMMON_PART="-app-service"
+ application_name="${{ env.SOLUTION_PREFIX }}${COMMON_PART}"
+ echo "Updating application: $application_name"
+
+ # Log the Power BI URL being set
+ echo "Setting Power BI URL: ${{ vars.VITE_POWERBI_EMBED_URL }}"
+
+ # Update the application settings
+ az webapp config appsettings set --name "$application_name" --resource-group "${{ env.RESOURCE_GROUP_NAME }}" --settings VITE_POWERBI_EMBED_URL="${{ vars.VITE_POWERBI_EMBED_URL }}"
+
+ # Restart the web app
+ az webapp restart --resource-group "${{ env.RESOURCE_GROUP_NAME }}" --name "$application_name"
+
+ echo "Power BI URL updated successfully for application: $application_name."
+
+ - name: Delete Bicep Deployment
+ if: success()
+ run: |
+ set -e
+ echo "Checking if resource group exists..."
+ rg_exists=$(az group exists --name ${{ env.RESOURCE_GROUP_NAME }})
+ if [ "$rg_exists" = "true" ]; then
+ echo "Resource group exist. Cleaning..."
+ az group delete \
+ --name ${{ env.RESOURCE_GROUP_NAME }} \
+ --yes \
+ --no-wait
+ echo "Resource group deleted... ${{ env.RESOURCE_GROUP_NAME }}"
+ else
+ echo "Resource group does not exists."
+ fi
+
+ - name: Send Notification on Failure
+ if: failure()
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+
+ # Construct the email body
+ EMAIL_BODY=$(cat <Dear Team,
We would like to inform you that the Client Advisor Automation process has encountered an issue and has failed to complete successfully.
Build URL: ${RUN_URL} ${OUTPUT}
Please investigate the matter at your earliest convenience.
Best regards, Your Automation Team
"
+ }
+ EOF
+ )
+
+ # Send the notification
+ curl -X POST "${{ secrets.LOGIC_APP_URL }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send notification"
+
diff --git a/.github/workflows/RAdeploy.yml b/.github/workflows/RAdeploy.yml
new file mode 100644
index 000000000..61bdf0e71
--- /dev/null
+++ b/.github/workflows/RAdeploy.yml
@@ -0,0 +1,105 @@
+name: CI-Validate Deployment-Research Assistant
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'ResearchAssistant/**'
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v3
+
+ - name: Setup Azure CLI
+ run: |
+ curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ az --version # Verify installation
+
+ - name: Login to Azure
+ run: |
+ az login --service-principal -u ${{ secrets.AZURE_CLIENT_ID }} -p ${{ secrets.AZURE_CLIENT_SECRET }} --tenant ${{ secrets.AZURE_TENANT_ID }}
+
+ - name: Install Bicep CLI
+ run: az bicep install
+
+ - name: Generate Resource Group Name
+ id: generate_rg_name
+ run: |
+ echo "Generating a unique resource group name..."
+ TIMESTAMP=$(date +%Y%m%d%H%M%S)
+ COMMON_PART="pslautomationRes"
+ UNIQUE_RG_NAME="${COMMON_PART}${TIMESTAMP}"
+ echo "RESOURCE_GROUP_NAME=${UNIQUE_RG_NAME}" >> $GITHUB_ENV
+ echo "Generated Resource_GROUP_PREFIX: ${UNIQUE_RG_NAME}"
+
+ - name: Check and Create Resource Group
+ id: check_create_rg
+ run: |
+ set -e
+ echo "Checking if resource group exists..."
+ rg_exists=$(az group exists --name ${{ env.RESOURCE_GROUP_NAME }})
+ if [ "$rg_exists" = "false" ]; then
+ echo "Resource group does not exist. Creating..."
+ az group create --name ${{ env.RESOURCE_GROUP_NAME }} --location eastus2 || { echo "Error creating resource group"; exit 1; }
+ else
+ echo "Resource group already exists."
+ fi
+
+ - name: Generate Unique Solution Prefix
+ id: generate_solution_prefix
+ run: |
+ set -e
+ COMMON_PART="pslr"
+ TIMESTAMP=$(date +%s)
+ UPDATED_TIMESTAMP=$(echo $TIMESTAMP | tail -c 3)
+ UNIQUE_SOLUTION_PREFIX="${COMMON_PART}${UPDATED_TIMESTAMP}"
+ echo "SOLUTION_PREFIX=${UNIQUE_SOLUTION_PREFIX}" >> $GITHUB_ENV
+ echo "Generated SOLUTION_PREFIX: ${UNIQUE_SOLUTION_PREFIX}"
+
+ - name: Deploy Bicep Template
+ id: deploy
+ run: |
+ set -e
+ az deployment group create \
+ --resource-group ${{ env.RESOURCE_GROUP_NAME }} \
+ --template-file ResearchAssistant/Deployment/bicep/main.bicep \
+ --parameters solutionPrefix=${{ env.SOLUTION_PREFIX }}
+
+ - name: Delete Bicep Deployment
+ if: success()
+ run: |
+ set -e
+ echo "Checking if resource group exists..."
+ rg_exists=$(az group exists --name ${{ env.RESOURCE_GROUP_NAME }})
+ if [ "$rg_exists" = "true" ]; then
+ echo "Resource group exist. Cleaning..."
+ az group delete \
+ --name ${{ env.RESOURCE_GROUP_NAME }} \
+ --yes \
+ --no-wait
+ echo "Resource group deleted... ${{ env.RESOURCE_GROUP_NAME }}"
+ else
+ echo "Resource group does not exists."
+ fi
+
+ - name: Send Notification on Failure
+ if: failure()
+ run: |
+ RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+
+ # Construct the email body
+ EMAIL_BODY=$(cat <Dear Team,
We would like to inform you that the Research Assistant Automation process has encountered an issue and has failed to complete successfully.
Build URL: ${RUN_URL} ${OUTPUT}
Please investigate the matter at your earliest convenience.
Best regards, Your Automation Team
"
+ }
+ EOF
+ )
+
+ # Send the notification
+ curl -X POST "${{ secrets.LOGIC_APP_URL }}" \
+ -H "Content-Type: application/json" \
+ -d "$EMAIL_BODY" || echo "Failed to send notification"
\ No newline at end of file
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 000000000..5f6ba6220
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,94 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL Advanced"
+
+on:
+ push:
+ branches: [ "main" ]
+ pull_request:
+ branches: [ "main" ]
+ schedule:
+ - cron: '22 13 * * 0'
+
+jobs:
+ analyze:
+ name: Analyze (${{ matrix.language }})
+ # Runner size impacts CodeQL analysis time. To learn more, please see:
+ # - https://gh.io/recommended-hardware-resources-for-running-codeql
+ # - https://gh.io/supported-runners-and-hardware-resources
+ # - https://gh.io/using-larger-runners (GitHub.com only)
+ # Consider using larger runners or machines with greater resources for possible analysis time improvements.
+ runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
+ permissions:
+ # required for all workflows
+ security-events: write
+
+ # required to fetch internal or private CodeQL packs
+ packages: read
+
+ # only required for workflows in private repositories
+ actions: read
+ contents: read
+
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - language: javascript-typescript
+ build-mode: none
+ - language: python
+ build-mode: none
+ # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
+ # Use `c-cpp` to analyze code written in C, C++ or both
+ # Use 'java-kotlin' to analyze code written in Java, Kotlin or both
+ # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
+ # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
+ # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
+ # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
+ # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v3
+ with:
+ languages: ${{ matrix.language }}
+ build-mode: ${{ matrix.build-mode }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+
+ # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
+
+ # If the analyze step fails for one of the languages you are analyzing with
+ # "We were unable to automatically build your code", modify the matrix above
+ # to set the build mode to "manual" for that language. Then modify this step
+ # to build your code.
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+ - if: matrix.build-mode == 'manual'
+ shell: bash
+ run: |
+ echo 'If you are using a "manual" build mode for one or more of the' \
+ 'languages you are analyzing, replace this with the commands to build' \
+ 'your code, for example:'
+ echo ' make bootstrap'
+ echo ' make release'
+ exit 1
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v3
+ with:
+ category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml
new file mode 100644
index 000000000..989f73871
--- /dev/null
+++ b/.github/workflows/pylint.yml
@@ -0,0 +1,22 @@
+name: Pylint
+
+on: [push]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.11"]
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r ClientAdvisor/App/requirements.txt
+ - name: Run flake8
+ run: flake8 --config=ClientAdvisor/App/.flake8 ClientAdvisor/App
diff --git a/.github/workflows/test_client_advisor.yml b/.github/workflows/test_client_advisor.yml
new file mode 100644
index 000000000..f9a29716b
--- /dev/null
+++ b/.github/workflows/test_client_advisor.yml
@@ -0,0 +1,55 @@
+name: Tests
+
+on:
+ push:
+ branches: main
+ # Trigger on changes in these specific paths
+ paths:
+ - 'ClientAdvisor/**'
+ pull_request:
+ branches: main
+ types:
+ - opened
+ - ready_for_review
+ - reopened
+ - synchronize
+ paths:
+ - 'ClientAdvisor/**'
+
+jobs:
+ test_client_advisor:
+
+ name: Client Advisor Tests
+ runs-on: ubuntu-latest
+ # The if condition ensures that this job only runs if changes are in the ClientAdvisor folder
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+ - name: Install Backend Dependencies
+ run: |
+ cd ClientAdvisor/App
+ python -m pip install -r requirements.txt
+ python -m pip install coverage pytest-cov
+
+ - name: Set up Node.js
+ uses: actions/setup-node@v3
+ with:
+ node-version: '20'
+ - name: Install Frontend Dependencies
+ run: |
+ cd ClientAdvisor/App/frontend
+ npm install
+ - name: Run Frontend Tests with Coverage
+ run: |
+ cd ClientAdvisor/App/frontend
+ npm run test -- --coverage
+ - uses: actions/upload-artifact@v4
+ with:
+ name: client-advisor-frontend-coverage
+ path: |
+ ClientAdvisor/App/frontend/coverage/
+ ClientAdvisor/App/frontend/coverage/lcov-report/
\ No newline at end of file
diff --git a/.github/workflows/test_research_assistant.yml b/.github/workflows/test_research_assistant.yml
new file mode 100644
index 000000000..ec31819ba
--- /dev/null
+++ b/.github/workflows/test_research_assistant.yml
@@ -0,0 +1,54 @@
+name: Tests
+
+on:
+ push:
+ branches: main
+ # Trigger on changes in these specific paths
+ paths:
+ - 'ResearchAssistant/**'
+ pull_request:
+ branches: main
+ types:
+ - opened
+ - ready_for_review
+ - reopened
+ - synchronize
+ paths:
+ - 'ResearchAssistant/**'
+
+jobs:
+ test_research_assistant:
+ name: Research Assistant Tests
+ runs-on: ubuntu-latest
+ # The if condition ensures that this job only runs if changes are in the ResearchAssistant folder
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+ - name: Install Backend Dependencies
+ run: |
+ cd ResearchAssistant/App
+ python -m pip install -r requirements.txt
+ python -m pip install coverage pytest-cov
+
+ - name: Set up Node.js
+ uses: actions/setup-node@v3
+ with:
+ node-version: '20'
+ - name: Install Frontend Dependencies
+ run: |
+ cd ResearchAssistant/App/frontend
+ npm install
+ - name: Run Frontend Tests with Coverage
+ run: |
+ cd ResearchAssistant/App/frontend
+ npm run test -- --coverage
+ - uses: actions/upload-artifact@v4
+ with:
+ name: research-assistant-frontend-coverage
+ path: |
+ ResearchAssistant/App/frontend/coverage/
+ ResearchAssistant/App/frontend/coverage/lcov-report/
\ No newline at end of file
diff --git a/ClientAdvisor/App/.flake8 b/ClientAdvisor/App/.flake8
new file mode 100644
index 000000000..c462975ac
--- /dev/null
+++ b/ClientAdvisor/App/.flake8
@@ -0,0 +1,4 @@
+[flake8]
+max-line-length = 88
+extend-ignore = E501, E203
+exclude = .venv, frontend,
\ No newline at end of file
diff --git a/ClientAdvisor/App/.gitignore b/ClientAdvisor/App/.gitignore
index cf6d66c97..bb12c4b8b 100644
--- a/ClientAdvisor/App/.gitignore
+++ b/ClientAdvisor/App/.gitignore
@@ -17,6 +17,7 @@ lib/
.venv
frontend/node_modules
+frontend/coverage
.env
# static
.azure/
diff --git a/ClientAdvisor/App/app.py b/ClientAdvisor/App/app.py
index ff5647552..a32c72b27 100644
--- a/ClientAdvisor/App/app.py
+++ b/ClientAdvisor/App/app.py
@@ -7,7 +7,6 @@
import httpx
import time
import requests
-import pymssql
from types import SimpleNamespace
from db import get_connection
from quart import (
@@ -18,23 +17,20 @@
request,
send_from_directory,
render_template,
- session
)
+
# from quart.sessions import SecureCookieSessionInterface
from openai import AsyncAzureOpenAI
from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
from backend.auth.auth_utils import get_authenticated_user_details, get_tenantid
from backend.history.cosmosdbservice import CosmosConversationClient
-# from flask import Flask
-# from flask_cors import CORS
-import secrets
+
from backend.utils import (
format_as_ndjson,
format_stream_response,
generateFilterString,
parse_multi_columns,
- format_non_streaming_response,
convert_to_pf_format,
format_pf_non_streaming_response,
)
@@ -297,6 +293,7 @@ async def assets(path):
VITE_POWERBI_EMBED_URL = os.environ.get("VITE_POWERBI_EMBED_URL")
+
def should_use_data():
global DATASOURCE_TYPE
if AZURE_SEARCH_SERVICE and AZURE_SEARCH_INDEX:
@@ -762,16 +759,18 @@ def prepare_model_args(request_body, request_headers):
messages.append({"role": message["role"], "content": message["content"]})
user_json = None
- if (MS_DEFENDER_ENABLED):
+ if MS_DEFENDER_ENABLED:
authenticated_user_details = get_authenticated_user_details(request_headers)
tenantId = get_tenantid(authenticated_user_details.get("client_principal_b64"))
- conversation_id = request_body.get("conversation_id", None)
+ conversation_id = request_body.get("conversation_id", None)
user_args = {
- "EndUserId": authenticated_user_details.get('user_principal_id'),
- "EndUserIdType": 'Entra',
+ "EndUserId": authenticated_user_details.get("user_principal_id"),
+ "EndUserIdType": "Entra",
"EndUserTenantId": tenantId,
"ConversationId": conversation_id,
- "SourceIp": request_headers.get('X-Forwarded-For', request_headers.get('Remote-Addr', '')),
+ "SourceIp": request_headers.get(
+ "X-Forwarded-For", request_headers.get("Remote-Addr", "")
+ ),
}
user_json = json.dumps(user_args)
@@ -831,6 +830,7 @@ def prepare_model_args(request_body, request_headers):
return model_args
+
async def promptflow_request(request):
try:
headers = {
@@ -864,70 +864,78 @@ async def promptflow_request(request):
logging.error(f"An error occurred while making promptflow_request: {e}")
-
async def send_chat_request(request_body, request_headers):
filtered_messages = []
messages = request_body.get("messages", [])
for message in messages:
- if message.get("role") != 'tool':
+ if message.get("role") != "tool":
filtered_messages.append(message)
-
- request_body['messages'] = filtered_messages
+
+ request_body["messages"] = filtered_messages
model_args = prepare_model_args(request_body, request_headers)
try:
azure_openai_client = init_openai_client()
- raw_response = await azure_openai_client.chat.completions.with_raw_response.create(**model_args)
+ raw_response = (
+ await azure_openai_client.chat.completions.with_raw_response.create(
+ **model_args
+ )
+ )
response = raw_response.parse()
- apim_request_id = raw_response.headers.get("apim-request-id")
+ apim_request_id = raw_response.headers.get("apim-request-id")
except Exception as e:
logging.exception("Exception in send_chat_request")
raise e
return response, apim_request_id
+
async def complete_chat_request(request_body, request_headers):
if USE_PROMPTFLOW and PROMPTFLOW_ENDPOINT and PROMPTFLOW_API_KEY:
response = await promptflow_request(request_body)
history_metadata = request_body.get("history_metadata", {})
return format_pf_non_streaming_response(
- response, history_metadata, PROMPTFLOW_RESPONSE_FIELD_NAME, PROMPTFLOW_CITATIONS_FIELD_NAME
+ response,
+ history_metadata,
+ PROMPTFLOW_RESPONSE_FIELD_NAME,
+ PROMPTFLOW_CITATIONS_FIELD_NAME,
)
elif USE_AZUREFUNCTION:
request_body = await request.get_json()
- client_id = request_body.get('client_id')
+ client_id = request_body.get("client_id")
print(request_body)
if client_id is None:
return jsonify({"error": "No client ID provided"}), 400
# client_id = '10005'
print("Client ID in complete_chat_request: ", client_id)
- answer = "Sample response from Azure Function"
- # Construct the URL of your Azure Function endpoint
- function_url = STREAMING_AZUREFUNCTION_ENDPOINT
-
- request_headers = {
- 'Content-Type': 'application/json',
- # 'Authorization': 'Bearer YOUR_TOKEN_HERE' # if applicable
- }
+ # answer = "Sample response from Azure Function"
+ # Construct the URL of your Azure Function endpoint
+ # function_url = STREAMING_AZUREFUNCTION_ENDPOINT
+
+ # request_headers = {
+ # "Content-Type": "application/json",
+ # # 'Authorization': 'Bearer YOUR_TOKEN_HERE' # if applicable
+ # }
# print(request_body.get("messages")[-1].get("content"))
# print(request_body)
query = request_body.get("messages")[-1].get("content")
-
print("Selected ClientId:", client_id)
# print("Selected ClientName:", selected_client_name)
# endpoint = STREAMING_AZUREFUNCTION_ENDPOINT + '?query=' + query + ' - for Client ' + selected_client_name + ':::' + selected_client_id
- endpoint = STREAMING_AZUREFUNCTION_ENDPOINT + '?query=' + query + ':::' + client_id
+ endpoint = (
+ STREAMING_AZUREFUNCTION_ENDPOINT + "?query=" + query + ":::" + client_id
+ )
print("Endpoint: ", endpoint)
- query_response = ''
+ query_response = ""
try:
- with requests.get(endpoint,stream=True) as r:
+ with requests.get(endpoint, stream=True) as r:
for line in r.iter_lines(chunk_size=10):
# query_response += line.decode('utf-8')
- query_response = query_response + '\n' + line.decode('utf-8')
+ query_response = query_response + "\n" + line.decode("utf-8")
# print(line.decode('utf-8'))
except Exception as e:
print(format_as_ndjson({"error" + str(e)}))
@@ -940,11 +948,9 @@ async def complete_chat_request(request_body, request_headers):
"model": "",
"created": 0,
"object": "",
- "choices": [{
- "messages": []
- }],
+ "choices": [{"messages": []}],
"apim-request-id": "",
- 'history_metadata': history_metadata
+ "history_metadata": history_metadata,
}
response["id"] = str(uuid.uuid4())
@@ -952,76 +958,84 @@ async def complete_chat_request(request_body, request_headers):
response["created"] = int(time.time())
response["object"] = "extensions.chat.completion.chunk"
# response["apim-request-id"] = headers.get("apim-request-id")
- response["choices"][0]["messages"].append({
- "role": "assistant",
- "content": query_response
- })
-
+ response["choices"][0]["messages"].append(
+ {"role": "assistant", "content": query_response}
+ )
return response
+
async def stream_chat_request(request_body, request_headers):
if USE_AZUREFUNCTION:
history_metadata = request_body.get("history_metadata", {})
function_url = STREAMING_AZUREFUNCTION_ENDPOINT
- apim_request_id = ''
-
- client_id = request_body.get('client_id')
+ apim_request_id = ""
+
+ client_id = request_body.get("client_id")
if client_id is None:
return jsonify({"error": "No client ID provided"}), 400
query = request_body.get("messages")[-1].get("content")
+ query = query.strip()
async def generate():
- deltaText = ''
- #async for completionChunk in response:
+ deltaText = ""
+ # async for completionChunk in response:
timeout = httpx.Timeout(10.0, read=None)
- async with httpx.AsyncClient(verify=False,timeout=timeout) as client: # verify=False for development purposes
- query_url = function_url + '?query=' + query + ':::' + client_id
- async with client.stream('GET', query_url) as response:
+ async with httpx.AsyncClient(
+ verify=False, timeout=timeout
+ ) as client: # verify=False for development purposes
+ query_url = function_url + "?query=" + query + ":::" + client_id
+ async with client.stream("GET", query_url) as response:
async for chunk in response.aiter_text():
- deltaText = ''
+ deltaText = ""
deltaText = chunk
completionChunk1 = {
"id": "",
"model": "",
"created": 0,
"object": "",
- "choices": [{
- "messages": [],
- "delta": {}
- }],
+ "choices": [{"messages": [], "delta": {}}],
"apim-request-id": "",
- 'history_metadata': history_metadata
+ "history_metadata": history_metadata,
}
completionChunk1["id"] = str(uuid.uuid4())
completionChunk1["model"] = AZURE_OPENAI_MODEL_NAME
completionChunk1["created"] = int(time.time())
completionChunk1["object"] = "extensions.chat.completion.chunk"
- completionChunk1["apim-request-id"] = request_headers.get("apim-request-id")
- completionChunk1["choices"][0]["messages"].append({
- "role": "assistant",
- "content": deltaText
- })
+ completionChunk1["apim-request-id"] = request_headers.get(
+ "apim-request-id"
+ )
+ completionChunk1["choices"][0]["messages"].append(
+ {"role": "assistant", "content": deltaText}
+ )
completionChunk1["choices"][0]["delta"] = {
"role": "assistant",
- "content": deltaText
+ "content": deltaText,
}
- completionChunk2 = json.loads(json.dumps(completionChunk1), object_hook=lambda d: SimpleNamespace(**d))
- yield format_stream_response(completionChunk2, history_metadata, apim_request_id)
+ completionChunk2 = json.loads(
+ json.dumps(completionChunk1),
+ object_hook=lambda d: SimpleNamespace(**d),
+ )
+ yield format_stream_response(
+ completionChunk2, history_metadata, apim_request_id
+ )
return generate()
-
+
else:
- response, apim_request_id = await send_chat_request(request_body, request_headers)
+ response, apim_request_id = await send_chat_request(
+ request_body, request_headers
+ )
history_metadata = request_body.get("history_metadata", {})
-
+
async def generate():
async for completionChunk in response:
- yield format_stream_response(completionChunk, history_metadata, apim_request_id)
+ yield format_stream_response(
+ completionChunk, history_metadata, apim_request_id
+ )
return generate()
-
async def conversation_internal(request_body, request_headers):
@@ -1060,15 +1074,15 @@ def get_frontend_settings():
except Exception as e:
logging.exception("Exception in /frontend_settings")
return jsonify({"error": str(e)}), 500
-
-## Conversation History API ##
+
+# Conversation History API #
@bp.route("/history/generate", methods=["POST"])
async def add_conversation():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
- ## check request for conversation_id
+ # check request for conversation_id
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
@@ -1089,8 +1103,8 @@ async def add_conversation():
history_metadata["title"] = title
history_metadata["date"] = conversation_dict["createdAt"]
- ## Format the incoming message object in the "chat/completions" messages format
- ## then write it to the conversation history in cosmos
+ # Format the incoming message object in the "chat/completions" messages format
+ # then write it to the conversation history in cosmos
messages = request_json["messages"]
if len(messages) > 0 and messages[-1]["role"] == "user":
createdMessageValue = await cosmos_conversation_client.create_message(
@@ -1126,7 +1140,7 @@ async def update_conversation():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
- ## check request for conversation_id
+ # check request for conversation_id
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
@@ -1140,8 +1154,8 @@ async def update_conversation():
if not conversation_id:
raise Exception("No conversation_id found")
- ## Format the incoming message object in the "chat/completions" messages format
- ## then write it to the conversation history in cosmos
+ # Format the incoming message object in the "chat/completions" messages format
+ # then write it to the conversation history in cosmos
messages = request_json["messages"]
if len(messages) > 0 and messages[-1]["role"] == "assistant":
if len(messages) > 1 and messages[-2].get("role", None) == "tool":
@@ -1178,7 +1192,7 @@ async def update_message():
user_id = authenticated_user["user_principal_id"]
cosmos_conversation_client = init_cosmosdb_client()
- ## check request for message_id
+ # check request for message_id
request_json = await request.get_json()
message_id = request_json.get("message_id", None)
message_feedback = request_json.get("message_feedback", None)
@@ -1189,7 +1203,7 @@ async def update_message():
if not message_feedback:
return jsonify({"error": "message_feedback is required"}), 400
- ## update the message in cosmos
+ # update the message in cosmos
updated_message = await cosmos_conversation_client.update_message_feedback(
user_id, message_id, message_feedback
)
@@ -1220,11 +1234,11 @@ async def update_message():
@bp.route("/history/delete", methods=["DELETE"])
async def delete_conversation():
- ## get the user id from the request headers
+ # get the user id from the request headers
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
- ## check request for conversation_id
+ # check request for conversation_id
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
@@ -1232,20 +1246,16 @@ async def delete_conversation():
if not conversation_id:
return jsonify({"error": "conversation_id is required"}), 400
- ## make sure cosmos is configured
+ # make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
raise Exception("CosmosDB is not configured or not working")
- ## delete the conversation messages from cosmos first
- deleted_messages = await cosmos_conversation_client.delete_messages(
- conversation_id, user_id
- )
+ # delete the conversation messages from cosmos first
+ await cosmos_conversation_client.delete_messages(conversation_id, user_id)
- ## Now delete the conversation
- deleted_conversation = await cosmos_conversation_client.delete_conversation(
- user_id, conversation_id
- )
+ # Now delete the conversation
+ await cosmos_conversation_client.delete_conversation(user_id, conversation_id)
await cosmos_conversation_client.cosmosdb_client.close()
@@ -1269,12 +1279,12 @@ async def list_conversations():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
- ## make sure cosmos is configured
+ # make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
raise Exception("CosmosDB is not configured or not working")
- ## get the conversations from cosmos
+ # get the conversations from cosmos
conversations = await cosmos_conversation_client.get_conversations(
user_id, offset=offset, limit=25
)
@@ -1282,7 +1292,7 @@ async def list_conversations():
if not isinstance(conversations, list):
return jsonify({"error": f"No conversations for {user_id} were found"}), 404
- ## return the conversation ids
+ # return the conversation ids
return jsonify(conversations), 200
@@ -1292,23 +1302,23 @@ async def get_conversation():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
- ## check request for conversation_id
+ # check request for conversation_id
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
if not conversation_id:
return jsonify({"error": "conversation_id is required"}), 400
- ## make sure cosmos is configured
+ # make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
raise Exception("CosmosDB is not configured or not working")
- ## get the conversation object and the related messages from cosmos
+ # get the conversation object and the related messages from cosmos
conversation = await cosmos_conversation_client.get_conversation(
user_id, conversation_id
)
- ## return the conversation id and the messages in the bot frontend format
+ # return the conversation id and the messages in the bot frontend format
if not conversation:
return (
jsonify(
@@ -1324,7 +1334,7 @@ async def get_conversation():
user_id, conversation_id
)
- ## format the messages in the bot frontend format
+ # format the messages in the bot frontend format
messages = [
{
"id": msg["id"],
@@ -1345,19 +1355,19 @@ async def rename_conversation():
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
- ## check request for conversation_id
+ # check request for conversation_id
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
if not conversation_id:
return jsonify({"error": "conversation_id is required"}), 400
- ## make sure cosmos is configured
+ # make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
raise Exception("CosmosDB is not configured or not working")
- ## get the conversation from cosmos
+ # get the conversation from cosmos
conversation = await cosmos_conversation_client.get_conversation(
user_id, conversation_id
)
@@ -1371,7 +1381,7 @@ async def rename_conversation():
404,
)
- ## update the title
+ # update the title
title = request_json.get("title", None)
if not title:
return jsonify({"error": "title is required"}), 400
@@ -1386,13 +1396,13 @@ async def rename_conversation():
@bp.route("/history/delete_all", methods=["DELETE"])
async def delete_all_conversations():
- ## get the user id from the request headers
+ # get the user id from the request headers
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
# get conversations for user
try:
- ## make sure cosmos is configured
+ # make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
raise Exception("CosmosDB is not configured or not working")
@@ -1405,13 +1415,13 @@ async def delete_all_conversations():
# delete each conversation
for conversation in conversations:
- ## delete the conversation messages from cosmos first
- deleted_messages = await cosmos_conversation_client.delete_messages(
+ # delete the conversation messages from cosmos first
+ await cosmos_conversation_client.delete_messages(
conversation["id"], user_id
)
- ## Now delete the conversation
- deleted_conversation = await cosmos_conversation_client.delete_conversation(
+ # Now delete the conversation
+ await cosmos_conversation_client.delete_conversation(
user_id, conversation["id"]
)
await cosmos_conversation_client.cosmosdb_client.close()
@@ -1431,11 +1441,11 @@ async def delete_all_conversations():
@bp.route("/history/clear", methods=["POST"])
async def clear_messages():
- ## get the user id from the request headers
+ # get the user id from the request headers
authenticated_user = get_authenticated_user_details(request_headers=request.headers)
user_id = authenticated_user["user_principal_id"]
- ## check request for conversation_id
+ # check request for conversation_id
request_json = await request.get_json()
conversation_id = request_json.get("conversation_id", None)
@@ -1443,15 +1453,13 @@ async def clear_messages():
if not conversation_id:
return jsonify({"error": "conversation_id is required"}), 400
- ## make sure cosmos is configured
+ # make sure cosmos is configured
cosmos_conversation_client = init_cosmosdb_client()
if not cosmos_conversation_client:
raise Exception("CosmosDB is not configured or not working")
- ## delete the conversation messages from cosmos
- deleted_messages = await cosmos_conversation_client.delete_messages(
- conversation_id, user_id
- )
+ # delete the conversation messages from cosmos
+ await cosmos_conversation_client.delete_messages(conversation_id, user_id)
return (
jsonify(
@@ -1510,7 +1518,7 @@ async def ensure_cosmos():
async def generate_title(conversation_messages):
- ## make sure the messages are sorted by _ts descending
+ # make sure the messages are sorted by _ts descending
title_prompt = 'Summarize the conversation so far into a 4-word or less title. Do not use any quotation marks or punctuation. Respond with a json object in the format {{"title": string}}. Do not include any other commentary or description.'
messages = [
@@ -1527,34 +1535,36 @@ async def generate_title(conversation_messages):
title = json.loads(response.choices[0].message.content)["title"]
return title
- except Exception as e:
+ except Exception:
return messages[-2]["content"]
-
-@bp.route("/api/pbi", methods=['GET'])
+
+
+@bp.route("/api/pbi", methods=["GET"])
def get_pbiurl():
return VITE_POWERBI_EMBED_URL
-
-@bp.route("/api/users", methods=['GET'])
+
+
+@bp.route("/api/users", methods=["GET"])
def get_users():
- conn = None
+ conn = None
try:
conn = get_connection()
cursor = conn.cursor()
sql_stmt = """
- SELECT
- ClientId,
- Client,
- Email,
+ SELECT
+ ClientId,
+ Client,
+ Email,
FORMAT(AssetValue, 'N0') AS AssetValue,
ClientSummary,
CAST(LastMeeting AS DATE) AS LastMeetingDate,
FORMAT(CAST(LastMeeting AS DATE), 'dddd MMMM d, yyyy') AS LastMeetingDateFormatted,
- FORMAT(LastMeeting, 'hh:mm tt') AS LastMeetingStartTime,
- FORMAT(LastMeetingEnd, 'hh:mm tt') AS LastMeetingEndTime,
+ FORMAT(LastMeeting, 'HH:mm ') AS LastMeetingStartTime,
+ FORMAT(LastMeetingEnd, 'HH:mm') AS LastMeetingEndTime,
CAST(NextMeeting AS DATE) AS NextMeetingDate,
FORMAT(CAST(NextMeeting AS DATE), 'dddd MMMM d, yyyy') AS NextMeetingFormatted,
- FORMAT(NextMeeting, 'hh:mm tt') AS NextMeetingStartTime,
- FORMAT(NextMeetingEnd, 'hh:mm tt') AS NextMeetingEndTime
+ FORMAT(NextMeeting, 'HH:mm') AS NextMeetingStartTime,
+ FORMAT(NextMeetingEnd, 'HH:mm') AS NextMeetingEndTime
FROM (
SELECT ca.ClientId, Client, Email, AssetValue, ClientSummary, LastMeeting, LastMeetingEnd, NextMeeting, NextMeetingEnd
FROM (
@@ -1573,7 +1583,7 @@ def get_users():
JOIN ClientSummaries cs ON c.ClientId = cs.ClientId
) ca
JOIN (
- SELECT cm.ClientId,
+ SELECT cm.ClientId,
MAX(CASE WHEN StartTime < GETDATE() THEN StartTime END) AS LastMeeting,
DATEADD(MINUTE, 30, MAX(CASE WHEN StartTime < GETDATE() THEN StartTime END)) AS LastMeetingEnd,
MIN(CASE WHEN StartTime > GETDATE() AND StartTime < GETDATE() + 7 THEN StartTime END) AS NextMeeting,
@@ -1588,23 +1598,28 @@ def get_users():
cursor.execute(sql_stmt)
rows = cursor.fetchall()
+
if len(rows) <= 6:
- #update ClientMeetings,Assets,Retirement tables sample data to current date
+ # update ClientMeetings,Assets,Retirement tables sample data to current date
cursor = conn.cursor()
- cursor.execute("""select DATEDIFF(d,CAST(max(StartTime) AS Date),CAST(GETDATE() AS Date)) + 3 as ndays from ClientMeetings""")
+ cursor.execute(
+ """select DATEDIFF(d,CAST(max(StartTime) AS Date),CAST(GETDATE() AS Date)) + 3 as ndays from ClientMeetings"""
+ )
rows = cursor.fetchall()
for row in rows:
- ndays = row['ndays']
- sql_stmt1 = f'UPDATE ClientMeetings SET StartTime = dateadd(day,{ndays},StartTime), EndTime = dateadd(day,{ndays},EndTime)'
+ ndays = row["ndays"]
+ sql_stmt1 = f"UPDATE ClientMeetings SET StartTime = dateadd(day,{ndays},StartTime), EndTime = dateadd(day,{ndays},EndTime)"
cursor.execute(sql_stmt1)
conn.commit()
- nmonths = int(ndays/30)
+ nmonths = int(ndays / 30)
if nmonths > 0:
- sql_stmt1 = f'UPDATE Assets SET AssetDate = dateadd(MONTH,{nmonths},AssetDate)'
+ sql_stmt1 = (
+ f"UPDATE Assets SET AssetDate = dateadd(MONTH,{nmonths},AssetDate)"
+ )
cursor.execute(sql_stmt1)
conn.commit()
-
- sql_stmt1 = f'UPDATE Retirement SET StatusDate = dateadd(MONTH,{nmonths},StatusDate)'
+
+ sql_stmt1 = f"UPDATE Retirement SET StatusDate = dateadd(MONTH,{nmonths},StatusDate)"
cursor.execute(sql_stmt1)
conn.commit()
@@ -1616,29 +1631,29 @@ def get_users():
for row in rows:
# print(row)
user = {
- 'ClientId': row['ClientId'],
- 'ClientName': row['Client'],
- 'ClientEmail': row['Email'],
- 'AssetValue': row['AssetValue'],
- 'NextMeeting': row['NextMeetingFormatted'],
- 'NextMeetingTime': row['NextMeetingStartTime'],
- 'NextMeetingEndTime': row['NextMeetingEndTime'],
- 'LastMeeting': row['LastMeetingDateFormatted'],
- 'LastMeetingStartTime': row['LastMeetingStartTime'],
- 'LastMeetingEndTime': row['LastMeetingEndTime'],
- 'ClientSummary': row['ClientSummary']
- }
+ "ClientId": row["ClientId"],
+ "ClientName": row["Client"],
+ "ClientEmail": row["Email"],
+ "AssetValue": row["AssetValue"],
+ "NextMeeting": row["NextMeetingFormatted"],
+ "NextMeetingTime": row["NextMeetingStartTime"],
+ "NextMeetingEndTime": row["NextMeetingEndTime"],
+ "LastMeeting": row["LastMeetingDateFormatted"],
+ "LastMeetingStartTime": row["LastMeetingStartTime"],
+ "LastMeetingEndTime": row["LastMeetingEndTime"],
+ "ClientSummary": row["ClientSummary"],
+ }
users.append(user)
# print(users)
-
+
return jsonify(users)
-
-
+
except Exception as e:
print("Exception occurred:", e)
return str(e), 500
finally:
if conn:
conn.close()
-
+
+
app = create_app()
diff --git a/ClientAdvisor/App/backend/auth/auth_utils.py b/ClientAdvisor/App/backend/auth/auth_utils.py
index 3a97e610a..31e01dff7 100644
--- a/ClientAdvisor/App/backend/auth/auth_utils.py
+++ b/ClientAdvisor/App/backend/auth/auth_utils.py
@@ -2,38 +2,41 @@
import json
import logging
+
def get_authenticated_user_details(request_headers):
user_object = {}
- ## check the headers for the Principal-Id (the guid of the signed in user)
+ # check the headers for the Principal-Id (the guid of the signed in user)
if "X-Ms-Client-Principal-Id" not in request_headers.keys():
- ## if it's not, assume we're in development mode and return a default user
+ # if it's not, assume we're in development mode and return a default user
from . import sample_user
+
raw_user_object = sample_user.sample_user
else:
- ## if it is, get the user details from the EasyAuth headers
- raw_user_object = {k:v for k,v in request_headers.items()}
+ # if it is, get the user details from the EasyAuth headers
+ raw_user_object = {k: v for k, v in request_headers.items()}
- user_object['user_principal_id'] = raw_user_object.get('X-Ms-Client-Principal-Id')
- user_object['user_name'] = raw_user_object.get('X-Ms-Client-Principal-Name')
- user_object['auth_provider'] = raw_user_object.get('X-Ms-Client-Principal-Idp')
- user_object['auth_token'] = raw_user_object.get('X-Ms-Token-Aad-Id-Token')
- user_object['client_principal_b64'] = raw_user_object.get('X-Ms-Client-Principal')
- user_object['aad_id_token'] = raw_user_object.get('X-Ms-Token-Aad-Id-Token')
+ user_object["user_principal_id"] = raw_user_object.get("X-Ms-Client-Principal-Id")
+ user_object["user_name"] = raw_user_object.get("X-Ms-Client-Principal-Name")
+ user_object["auth_provider"] = raw_user_object.get("X-Ms-Client-Principal-Idp")
+ user_object["auth_token"] = raw_user_object.get("X-Ms-Token-Aad-Id-Token")
+ user_object["client_principal_b64"] = raw_user_object.get("X-Ms-Client-Principal")
+ user_object["aad_id_token"] = raw_user_object.get("X-Ms-Token-Aad-Id-Token")
return user_object
+
def get_tenantid(client_principal_b64):
- tenant_id = ''
- if client_principal_b64:
+ tenant_id = ""
+ if client_principal_b64:
try:
# Decode the base64 header to get the JSON string
decoded_bytes = base64.b64decode(client_principal_b64)
- decoded_string = decoded_bytes.decode('utf-8')
+ decoded_string = decoded_bytes.decode("utf-8")
# Convert the JSON string1into a Python dictionary
user_info = json.loads(decoded_string)
# Extract the tenant ID
- tenant_id = user_info.get('tid') # 'tid' typically holds the tenant ID
+ tenant_id = user_info.get("tid") # 'tid' typically holds the tenant ID
except Exception as ex:
logging.exception(ex)
- return tenant_id
\ No newline at end of file
+ return tenant_id
diff --git a/ClientAdvisor/App/backend/auth/sample_user.py b/ClientAdvisor/App/backend/auth/sample_user.py
index 0b10d9ab5..9353bcc1b 100644
--- a/ClientAdvisor/App/backend/auth/sample_user.py
+++ b/ClientAdvisor/App/backend/auth/sample_user.py
@@ -1,39 +1,39 @@
sample_user = {
- "Accept": "*/*",
- "Accept-Encoding": "gzip, deflate, br",
- "Accept-Language": "en",
- "Client-Ip": "22.222.222.2222:64379",
- "Content-Length": "192",
- "Content-Type": "application/json",
- "Cookie": "AppServiceAuthSession=/AuR5ENU+pmpoN3jnymP8fzpmVBgphx9uPQrYLEWGcxjIITIeh8NZW7r3ePkG8yBcMaItlh1pX4nzg5TFD9o2mxC/5BNDRe/uuu0iDlLEdKecROZcVRY7QsFdHLjn9KB90Z3d9ZeLwfVIf0sZowWJt03BO5zKGB7vZgL+ofv3QY3AaYn1k1GtxSE9HQWJpWar7mOA64b7Lsy62eY3nxwg3AWDsP3/rAta+MnDCzpdlZMFXcJLj+rsCppW+w9OqGhKQ7uCs03BPeon3qZOdmE8cOJW3+i96iYlhneNQDItHyQqEi1CHbBTSkqwpeOwWP4vcwGM22ynxPp7YFyiRw/X361DGYy+YkgYBkXq1AEIDZ44BCBz9EEaEi0NU+m6yUOpNjEaUtrJKhQywcM2odojdT4XAY+HfTEfSqp0WiAkgAuE/ueCu2JDOfvxGjCgJ4DGWCoYdOdXAN1c+MenT4OSvkMO41YuPeah9qk9ixkJI5s80lv8rUu1J26QF6pstdDkYkAJAEra3RQiiO1eAH7UEb3xHXn0HW5lX8ZDX3LWiAFGOt5DIKxBKFymBKJGzbPFPYjfczegu0FD8/NQPLl2exAX3mI9oy/tFnATSyLO2E8DxwP5wnYVminZOQMjB/I4g3Go14betm0MlNXlUbU1fyS6Q6JxoCNLDZywCoU9Y65UzimWZbseKsXlOwYukCEpuQ5QPT55LuEAWhtYier8LSh+fvVUsrkqKS+bg0hzuoX53X6aqUr7YB31t0Z2zt5TT/V3qXpdyD8Xyd884PqysSkJYa553sYx93ETDKSsfDguanVfn2si9nvDpvUWf6/R02FmQgXiaaaykMgYyIuEmE77ptsivjH3hj/MN4VlePFWokcchF4ciqqzonmICmjEHEx5zpjU2Kwa+0y7J5ROzVVygcnO1jH6ZKDy9bGGYL547bXx/iiYBYqSIQzleOAkCeULrGN2KEHwckX5MpuRaqTpoxdZH9RJv0mIWxbDA0kwGsbMICQd0ZODBkPUnE84qhzvXInC+TL7MbutPEnGbzgxBAS1c2Ct4vxkkjykOeOxTPxqAhxoefwUfIwZZax6A9LbeYX2bsBpay0lScHcA==",
- "Disguised-Host": "your_app_service.azurewebsites.net",
- "Host": "your_app_service.azurewebsites.net",
- "Max-Forwards": "10",
- "Origin": "https://your_app_service.azurewebsites.net",
- "Referer": "https://your_app_service.azurewebsites.net/",
- "Sec-Ch-Ua": "\"Microsoft Edge\";v=\"113\", \"Chromium\";v=\"113\", \"Not-A.Brand\";v=\"24\"",
- "Sec-Ch-Ua-Mobile": "?0",
- "Sec-Ch-Ua-Platform": "\"Windows\"",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "Traceparent": "00-24e9a8d1b06f233a3f1714845ef971a9-3fac69f81ca5175c-00",
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.42",
- "Was-Default-Hostname": "your_app_service.azurewebsites.net",
- "X-Appservice-Proto": "https",
- "X-Arr-Log-Id": "4102b832-6c88-4c7c-8996-0edad9e4358f",
- "X-Arr-Ssl": "2048|256|CN=Microsoft Azure TLS Issuing CA 02, O=Microsoft Corporation, C=US|CN=*.azurewebsites.net, O=Microsoft Corporation, L=Redmond, S=WA, C=US",
- "X-Client-Ip": "22.222.222.222",
- "X-Client-Port": "64379",
- "X-Forwarded-For": "22.222.222.22:64379",
- "X-Forwarded-Proto": "https",
- "X-Forwarded-Tlsversion": "1.2",
- "X-Ms-Client-Principal": "your_base_64_encoded_token",
- "X-Ms-Client-Principal-Id": "00000000-0000-0000-0000-000000000000",
- "X-Ms-Client-Principal-Idp": "aad",
- "X-Ms-Client-Principal-Name": "testusername@constoso.com",
- "X-Ms-Token-Aad-Id-Token": "your_aad_id_token",
- "X-Original-Url": "/chatgpt",
- "X-Site-Deployment-Id": "your_app_service",
- "X-Waws-Unencoded-Url": "/chatgpt"
+ "Accept": "*/*",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Accept-Language": "en",
+ "Client-Ip": "22.222.222.2222:64379",
+ "Content-Length": "192",
+ "Content-Type": "application/json",
+ "Cookie": "AppServiceAuthSession=/AuR5ENU+pmpoN3jnymP8fzpmVBgphx9uPQrYLEWGcxjIITIeh8NZW7r3ePkG8yBcMaItlh1pX4nzg5TFD9o2mxC/5BNDRe/uuu0iDlLEdKecROZcVRY7QsFdHLjn9KB90Z3d9ZeLwfVIf0sZowWJt03BO5zKGB7vZgL+ofv3QY3AaYn1k1GtxSE9HQWJpWar7mOA64b7Lsy62eY3nxwg3AWDsP3/rAta+MnDCzpdlZMFXcJLj+rsCppW+w9OqGhKQ7uCs03BPeon3qZOdmE8cOJW3+i96iYlhneNQDItHyQqEi1CHbBTSkqwpeOwWP4vcwGM22ynxPp7YFyiRw/X361DGYy+YkgYBkXq1AEIDZ44BCBz9EEaEi0NU+m6yUOpNjEaUtrJKhQywcM2odojdT4XAY+HfTEfSqp0WiAkgAuE/ueCu2JDOfvxGjCgJ4DGWCoYdOdXAN1c+MenT4OSvkMO41YuPeah9qk9ixkJI5s80lv8rUu1J26QF6pstdDkYkAJAEra3RQiiO1eAH7UEb3xHXn0HW5lX8ZDX3LWiAFGOt5DIKxBKFymBKJGzbPFPYjfczegu0FD8/NQPLl2exAX3mI9oy/tFnATSyLO2E8DxwP5wnYVminZOQMjB/I4g3Go14betm0MlNXlUbU1fyS6Q6JxoCNLDZywCoU9Y65UzimWZbseKsXlOwYukCEpuQ5QPT55LuEAWhtYier8LSh+fvVUsrkqKS+bg0hzuoX53X6aqUr7YB31t0Z2zt5TT/V3qXpdyD8Xyd884PqysSkJYa553sYx93ETDKSsfDguanVfn2si9nvDpvUWf6/R02FmQgXiaaaykMgYyIuEmE77ptsivjH3hj/MN4VlePFWokcchF4ciqqzonmICmjEHEx5zpjU2Kwa+0y7J5ROzVVygcnO1jH6ZKDy9bGGYL547bXx/iiYBYqSIQzleOAkCeULrGN2KEHwckX5MpuRaqTpoxdZH9RJv0mIWxbDA0kwGsbMICQd0ZODBkPUnE84qhzvXInC+TL7MbutPEnGbzgxBAS1c2Ct4vxkkjykOeOxTPxqAhxoefwUfIwZZax6A9LbeYX2bsBpay0lScHcA==",
+ "Disguised-Host": "your_app_service.azurewebsites.net",
+ "Host": "your_app_service.azurewebsites.net",
+ "Max-Forwards": "10",
+ "Origin": "https://your_app_service.azurewebsites.net",
+ "Referer": "https://your_app_service.azurewebsites.net/",
+ "Sec-Ch-Ua": '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+ "Sec-Ch-Ua-Mobile": "?0",
+ "Sec-Ch-Ua-Platform": '"Windows"',
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Traceparent": "00-24e9a8d1b06f233a3f1714845ef971a9-3fac69f81ca5175c-00",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.42",
+ "Was-Default-Hostname": "your_app_service.azurewebsites.net",
+ "X-Appservice-Proto": "https",
+ "X-Arr-Log-Id": "4102b832-6c88-4c7c-8996-0edad9e4358f",
+ "X-Arr-Ssl": "2048|256|CN=Microsoft Azure TLS Issuing CA 02, O=Microsoft Corporation, C=US|CN=*.azurewebsites.net, O=Microsoft Corporation, L=Redmond, S=WA, C=US",
+ "X-Client-Ip": "22.222.222.222",
+ "X-Client-Port": "64379",
+ "X-Forwarded-For": "22.222.222.22:64379",
+ "X-Forwarded-Proto": "https",
+ "X-Forwarded-Tlsversion": "1.2",
+ "X-Ms-Client-Principal": "your_base_64_encoded_token",
+ "X-Ms-Client-Principal-Id": "00000000-0000-0000-0000-000000000000",
+ "X-Ms-Client-Principal-Idp": "aad",
+ "X-Ms-Client-Principal-Name": "testusername@constoso.com",
+ "X-Ms-Token-Aad-Id-Token": "your_aad_id_token",
+ "X-Original-Url": "/chatgpt",
+ "X-Site-Deployment-Id": "your_app_service",
+ "X-Waws-Unencoded-Url": "/chatgpt",
}
diff --git a/ClientAdvisor/App/backend/history/cosmosdbservice.py b/ClientAdvisor/App/backend/history/cosmosdbservice.py
index 737c23d9a..e9fba5204 100644
--- a/ClientAdvisor/App/backend/history/cosmosdbservice.py
+++ b/ClientAdvisor/App/backend/history/cosmosdbservice.py
@@ -2,17 +2,27 @@
from datetime import datetime
from azure.cosmos.aio import CosmosClient
from azure.cosmos import exceptions
-
-class CosmosConversationClient():
-
- def __init__(self, cosmosdb_endpoint: str, credential: any, database_name: str, container_name: str, enable_message_feedback: bool = False):
+
+
+class CosmosConversationClient:
+
+ def __init__(
+ self,
+ cosmosdb_endpoint: str,
+ credential: any,
+ database_name: str,
+ container_name: str,
+ enable_message_feedback: bool = False,
+ ):
self.cosmosdb_endpoint = cosmosdb_endpoint
self.credential = credential
self.database_name = database_name
self.container_name = container_name
self.enable_message_feedback = enable_message_feedback
try:
- self.cosmosdb_client = CosmosClient(self.cosmosdb_endpoint, credential=credential)
+ self.cosmosdb_client = CosmosClient(
+ self.cosmosdb_endpoint, credential=credential
+ )
except exceptions.CosmosHttpResponseError as e:
if e.status_code == 401:
raise ValueError("Invalid credentials") from e
@@ -20,48 +30,58 @@ def __init__(self, cosmosdb_endpoint: str, credential: any, database_name: str,
raise ValueError("Invalid CosmosDB endpoint") from e
try:
- self.database_client = self.cosmosdb_client.get_database_client(database_name)
+ self.database_client = self.cosmosdb_client.get_database_client(
+ database_name
+ )
except exceptions.CosmosResourceNotFoundError:
- raise ValueError("Invalid CosmosDB database name")
-
+ raise ValueError("Invalid CosmosDB database name")
+
try:
- self.container_client = self.database_client.get_container_client(container_name)
+ self.container_client = self.database_client.get_container_client(
+ container_name
+ )
except exceptions.CosmosResourceNotFoundError:
- raise ValueError("Invalid CosmosDB container name")
-
+ raise ValueError("Invalid CosmosDB container name")
async def ensure(self):
- if not self.cosmosdb_client or not self.database_client or not self.container_client:
+ if (
+ not self.cosmosdb_client
+ or not self.database_client
+ or not self.container_client
+ ):
return False, "CosmosDB client not initialized correctly"
-
+
try:
- database_info = await self.database_client.read()
- except:
- return False, f"CosmosDB database {self.database_name} on account {self.cosmosdb_endpoint} not found"
-
+ await self.database_client.read()
+ except Exception:
+ return (
+ False,
+ f"CosmosDB database {self.database_name} on account {self.cosmosdb_endpoint} not found",
+ )
+
try:
- container_info = await self.container_client.read()
- except:
+ await self.container_client.read()
+ except Exception:
return False, f"CosmosDB container {self.container_name} not found"
-
+
return True, "CosmosDB client initialized successfully"
- async def create_conversation(self, user_id, title = ''):
+ async def create_conversation(self, user_id, title=""):
conversation = {
- 'id': str(uuid.uuid4()),
- 'type': 'conversation',
- 'createdAt': datetime.utcnow().isoformat(),
- 'updatedAt': datetime.utcnow().isoformat(),
- 'userId': user_id,
- 'title': title
+ "id": str(uuid.uuid4()),
+ "type": "conversation",
+ "createdAt": datetime.utcnow().isoformat(),
+ "updatedAt": datetime.utcnow().isoformat(),
+ "userId": user_id,
+ "title": title,
}
- ## TODO: add some error handling based on the output of the upsert_item call
- resp = await self.container_client.upsert_item(conversation)
+ # TODO: add some error handling based on the output of the upsert_item call
+ resp = await self.container_client.upsert_item(conversation)
if resp:
return resp
else:
return False
-
+
async def upsert_conversation(self, conversation):
resp = await self.container_client.upsert_item(conversation)
if resp:
@@ -70,95 +90,94 @@ async def upsert_conversation(self, conversation):
return False
async def delete_conversation(self, user_id, conversation_id):
- conversation = await self.container_client.read_item(item=conversation_id, partition_key=user_id)
+ conversation = await self.container_client.read_item(
+ item=conversation_id, partition_key=user_id
+ )
if conversation:
- resp = await self.container_client.delete_item(item=conversation_id, partition_key=user_id)
+ resp = await self.container_client.delete_item(
+ item=conversation_id, partition_key=user_id
+ )
return resp
else:
return True
-
async def delete_messages(self, conversation_id, user_id):
- ## get a list of all the messages in the conversation
+ # get a list of all the messages in the conversation
messages = await self.get_messages(user_id, conversation_id)
response_list = []
if messages:
for message in messages:
- resp = await self.container_client.delete_item(item=message['id'], partition_key=user_id)
+ resp = await self.container_client.delete_item(
+ item=message["id"], partition_key=user_id
+ )
response_list.append(resp)
return response_list
-
- async def get_conversations(self, user_id, limit, sort_order = 'DESC', offset = 0):
- parameters = [
- {
- 'name': '@userId',
- 'value': user_id
- }
- ]
+ async def get_conversations(self, user_id, limit, sort_order="DESC", offset=0):
+ parameters = [{"name": "@userId", "value": user_id}]
query = f"SELECT * FROM c where c.userId = @userId and c.type='conversation' order by c.updatedAt {sort_order}"
if limit is not None:
- query += f" offset {offset} limit {limit}"
-
+ query += f" offset {offset} limit {limit}"
+
conversations = []
- async for item in self.container_client.query_items(query=query, parameters=parameters):
+ async for item in self.container_client.query_items(
+ query=query, parameters=parameters
+ ):
conversations.append(item)
-
+
return conversations
async def get_conversation(self, user_id, conversation_id):
parameters = [
- {
- 'name': '@conversationId',
- 'value': conversation_id
- },
- {
- 'name': '@userId',
- 'value': user_id
- }
+ {"name": "@conversationId", "value": conversation_id},
+ {"name": "@userId", "value": user_id},
]
- query = f"SELECT * FROM c where c.id = @conversationId and c.type='conversation' and c.userId = @userId"
+ query = "SELECT * FROM c where c.id = @conversationId and c.type='conversation' and c.userId = @userId"
conversations = []
- async for item in self.container_client.query_items(query=query, parameters=parameters):
+ async for item in self.container_client.query_items(
+ query=query, parameters=parameters
+ ):
conversations.append(item)
- ## if no conversations are found, return None
+ # if no conversations are found, return None
if len(conversations) == 0:
return None
else:
return conversations[0]
-
+
async def create_message(self, uuid, conversation_id, user_id, input_message: dict):
message = {
- 'id': uuid,
- 'type': 'message',
- 'userId' : user_id,
- 'createdAt': datetime.utcnow().isoformat(),
- 'updatedAt': datetime.utcnow().isoformat(),
- 'conversationId' : conversation_id,
- 'role': input_message['role'],
- 'content': input_message['content']
+ "id": uuid,
+ "type": "message",
+ "userId": user_id,
+ "createdAt": datetime.utcnow().isoformat(),
+ "updatedAt": datetime.utcnow().isoformat(),
+ "conversationId": conversation_id,
+ "role": input_message["role"],
+ "content": input_message["content"],
}
if self.enable_message_feedback:
- message['feedback'] = ''
-
- resp = await self.container_client.upsert_item(message)
+ message["feedback"] = ""
+
+ resp = await self.container_client.upsert_item(message)
if resp:
- ## update the parent conversations's updatedAt field with the current message's createdAt datetime value
+ # update the parent conversations's updatedAt field with the current message's createdAt datetime value
conversation = await self.get_conversation(user_id, conversation_id)
if not conversation:
return "Conversation not found"
- conversation['updatedAt'] = message['createdAt']
+ conversation["updatedAt"] = message["createdAt"]
await self.upsert_conversation(conversation)
return resp
else:
return False
-
+
async def update_message_feedback(self, user_id, message_id, feedback):
- message = await self.container_client.read_item(item=message_id, partition_key=user_id)
+ message = await self.container_client.read_item(
+ item=message_id, partition_key=user_id
+ )
if message:
- message['feedback'] = feedback
+ message["feedback"] = feedback
resp = await self.container_client.upsert_item(message)
return resp
else:
@@ -166,19 +185,14 @@ async def update_message_feedback(self, user_id, message_id, feedback):
async def get_messages(self, user_id, conversation_id):
parameters = [
- {
- 'name': '@conversationId',
- 'value': conversation_id
- },
- {
- 'name': '@userId',
- 'value': user_id
- }
+ {"name": "@conversationId", "value": conversation_id},
+ {"name": "@userId", "value": user_id},
]
- query = f"SELECT * FROM c WHERE c.conversationId = @conversationId AND c.type='message' AND c.userId = @userId ORDER BY c.timestamp ASC"
+ query = "SELECT * FROM c WHERE c.conversationId = @conversationId AND c.type='message' AND c.userId = @userId ORDER BY c.timestamp ASC"
messages = []
- async for item in self.container_client.query_items(query=query, parameters=parameters):
+ async for item in self.container_client.query_items(
+ query=query, parameters=parameters
+ ):
messages.append(item)
return messages
-
diff --git a/ClientAdvisor/App/backend/utils.py b/ClientAdvisor/App/backend/utils.py
index 5c53bd001..ca7f325b0 100644
--- a/ClientAdvisor/App/backend/utils.py
+++ b/ClientAdvisor/App/backend/utils.py
@@ -104,6 +104,7 @@ def format_non_streaming_response(chatCompletion, history_metadata, apim_request
return {}
+
def format_stream_response(chatCompletionChunk, history_metadata, apim_request_id):
response_obj = {
"id": chatCompletionChunk.id,
@@ -142,7 +143,11 @@ def format_stream_response(chatCompletionChunk, history_metadata, apim_request_i
def format_pf_non_streaming_response(
- chatCompletion, history_metadata, response_field_name, citations_field_name, message_uuid=None
+ chatCompletion,
+ history_metadata,
+ response_field_name,
+ citations_field_name,
+ message_uuid=None,
):
if chatCompletion is None:
logging.error(
@@ -159,15 +164,13 @@ def format_pf_non_streaming_response(
try:
messages = []
if response_field_name in chatCompletion:
- messages.append({
- "role": "assistant",
- "content": chatCompletion[response_field_name]
- })
+ messages.append(
+ {"role": "assistant", "content": chatCompletion[response_field_name]}
+ )
if citations_field_name in chatCompletion:
- messages.append({
- "role": "tool",
- "content": chatCompletion[citations_field_name]
- })
+ messages.append(
+ {"role": "tool", "content": chatCompletion[citations_field_name]}
+ )
response_obj = {
"id": chatCompletion["id"],
"model": "",
@@ -178,7 +181,7 @@ def format_pf_non_streaming_response(
"messages": messages,
"history_metadata": history_metadata,
}
- ]
+ ],
}
return response_obj
except Exception as e:
diff --git a/ClientAdvisor/App/db.py b/ClientAdvisor/App/db.py
index 03de12ffa..ab7dc375e 100644
--- a/ClientAdvisor/App/db.py
+++ b/ClientAdvisor/App/db.py
@@ -5,19 +5,15 @@
load_dotenv()
-server = os.environ.get('SQLDB_SERVER')
-database = os.environ.get('SQLDB_DATABASE')
-username = os.environ.get('SQLDB_USERNAME')
-password = os.environ.get('SQLDB_PASSWORD')
+server = os.environ.get("SQLDB_SERVER")
+database = os.environ.get("SQLDB_DATABASE")
+username = os.environ.get("SQLDB_USERNAME")
+password = os.environ.get("SQLDB_PASSWORD")
+
def get_connection():
conn = pymssql.connect(
- server=server,
- user=username,
- password=password,
- database=database,
- as_dict=True
- )
+ server=server, user=username, password=password, database=database, as_dict=True
+ )
return conn
-
\ No newline at end of file
diff --git a/ClientAdvisor/App/frontend/__mocks__/dompurify.ts b/ClientAdvisor/App/frontend/__mocks__/dompurify.ts
new file mode 100644
index 000000000..02ccb1e8c
--- /dev/null
+++ b/ClientAdvisor/App/frontend/__mocks__/dompurify.ts
@@ -0,0 +1,5 @@
+const DOMPurify = {
+ sanitize: jest.fn((input: string) => input), // Mock implementation that returns the input
+};
+
+export default DOMPurify; // Use default export
diff --git a/ClientAdvisor/App/frontend/__mocks__/fileMock.ts b/ClientAdvisor/App/frontend/__mocks__/fileMock.ts
new file mode 100644
index 000000000..398045fc4
--- /dev/null
+++ b/ClientAdvisor/App/frontend/__mocks__/fileMock.ts
@@ -0,0 +1,4 @@
+// __mocks__/fileMock.ts
+const fileMock = 'test-file-stub';
+
+export default fileMock;
diff --git a/ClientAdvisor/App/frontend/__mocks__/mockAPIData.ts b/ClientAdvisor/App/frontend/__mocks__/mockAPIData.ts
new file mode 100644
index 000000000..721a9c922
--- /dev/null
+++ b/ClientAdvisor/App/frontend/__mocks__/mockAPIData.ts
@@ -0,0 +1,164 @@
+export const conversationResponseWithCitations = {
+ answer: {
+ answer:
+ "Microsoft AI encompasses a wide range of technologies and solutions that leverage artificial intelligence to empower individuals and organizations. Microsoft's AI platform, Azure AI, helps organizations transform by bringing intelligence and insights to solve their most pressing challenges[doc2]. Azure AI offers enterprise-level and responsible AI protections, enabling organizations to achieve more at scale[doc8]. Microsoft has a long-term partnership with OpenAI and deploys OpenAI's models across its consumer and enterprise products[doc5]. The company is committed to making the promise of AI real and doing it responsibly, guided by principles such as fairness, reliability and safety, privacy and security, inclusiveness, transparency, and accountability[doc1]. Microsoft's AI offerings span various domains, including productivity services, cloud computing, mixed reality, conversational AI, data analytics, and more[doc3][doc6][doc4]. These AI solutions aim to enhance productivity, improve customer experiences, optimize business functions, and drive innovation[doc9][doc7]. However, the adoption of AI also presents challenges and risks, such as biased datasets, ethical considerations, and potential legal and reputational harm[doc11]. Microsoft is committed to addressing these challenges and ensuring the responsible development and deployment of AI technologies[doc10].",
+ citations: [
+ {
+ content: "someContent",
+ id: "doc_7ff8f57d63e2eebb0a3372db05153822fdee65e6",
+ chunk_id: 7,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_7ff8f57d63e2eebb0a3372db05153822fdee65e6",
+ chunk_id: 7,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_7ff8f57d63e2eebb0a3372db05153822fdee65e6",
+ chunk_id: 7,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_14b4ad620c24c5a472f0c4505019c5370b814e17",
+ chunk_id: 4,
+ title:
+ "/documents/MSFT_FY23Q4_10K_DOCUMENT_FOLDER_SRC_IMPORTANT_CHUNKS_LIST_VALID_CHUNKS_ACCESS_TO_MSFT_WINDOWS_BLOBS_CORE_WINDOWS.docx",
+ filepath:
+ "MSFT_FY23Q4_10K_DOCUMENT_FOLDER_SRC_IMPORTANT_CHUNKS_LIST_VALID_CHUNKS_ACCESS_TO_MSFT_WINDOWS_BLOBS_CORE_WINDOWS.docx",
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_7ff8f57d63e2eebb0a3372db05153822fdee65e6",
+ chunk_id: 7,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_d85da45581d92f2ff59e261197d2c70c2b6f8802",
+ chunk_id: 8,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_3a2261beeaf7820dfdcc3b0d51a58bd981555b92",
+ chunk_id: 6,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: null,
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_7ff8f57d63e2eebb0a3372db05153822fdee65e6",
+ chunk_id: 7,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_3a2261beeaf7820dfdcc3b0d51a58bd981555b92",
+ chunk_id: 6,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: null,
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_0b803fe4ec1406115ee7f35a9dd9060ad5d905f5",
+ chunk_id: 57,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+ metadata: null,
+ },
+ {
+ content: "someContent",
+ id: "doc_0b803fe4ec1406115ee7f35a9dd9060ad5d905f5",
+ chunk_id: 57,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "document url",
+ metadata: null,
+ },
+ ],
+ },
+ isActive: false,
+ index: 2,
+ };
+
+ export const decodedConversationResponseWithCitations = {
+ choices: [
+ {
+ messages: [
+ {
+ content:
+ '{"citations": [{"content": "[/documents/MSFT_FY23Q4_10K.docx](https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx?se=2024-10-01T05%3A23%3A06Z&sp=r&sv=2024-05-04&sr=c&sig=cIyn1/%2Bk5pCX7Liy8PgDiytzArIx/9Vq7GA2eGkmyik%3D)\\n\\n\\n
Our AI platform, Azure AI, is helping organizations transform, bringing intelligence and insights to the hands of their employees and customers to solve their most pressing challenges. Organizations large and small are deploying Azure AI solutions to achieve more at scale, more easily, with the proper enterprise-level and responsible AI protections.
\\n
We have a long-term partnership with OpenAI, a leading AI research and deployment company. We deploy OpenAI\\u2019s models across our consumer and enterprise products. As OpenAI\\u2019s exclusive cloud provider, Azure powers all of OpenAI\'s workloads. We have also increased our investments in the development and deployment of specialized supercomputing systems to accelerate OpenAI\\u2019s research.
\\n
Our hybrid infrastructure offers integrated, end-to-end security, compliance, identity, and management capabilities to support the real-world needs and evolving regulatory requirements of commercial customers and enterprises. Our industry clouds bring together capabilities across the entire Microsoft Cloud, along with industry-specific customizations. Azure Arc simplifies governance and management by delivering a consistent multi-cloud and on-premises management platform.
\\n
Nuance, a leader in conversational AI and ambient intelligence across industries including healthcare, financial services, retail, and telecommunications, joined Microsoft in 2022. Microsoft and Nuance enable organizations to accelerate their business goals with security-focused, cloud-based solutions infused with AI.
\\n
We are accelerating our development of mixed reality solutions with new Azure services and devices. Microsoft Mesh enables organizations to create custom, immersive experiences for the workplace to help bring remote and hybrid workers and teams together.
\\n
The ability to convert data into AI drives our competitive advantage. The Microsoft Intelligent Data Platform is a leading cloud data platform that fully integrates databases, analytics, and governance. The platform empowers organizations to invest more time creating value rather than integrating and managing their data. Microsoft Fabric is an end-to-end, unified analytics platform that brings together all the data and analytics tools that organizations need.
Azure AI offerings provide a competitive advantage as companies seek ways to optimize and scale their business with machine learning. Azure\\u2019s purpose-built, AI-optimized infrastructure allows advanced models, including GPT-4 services designed for developers and data scientists, to do more with less. Customers can integrate large language models and develop the next generation of AI apps and services.
\\n
Our server products are designed to make IT professionals, developers, and their systems more productive and efficient. Server software is integrated server infrastructure and middleware designed to support software applications built on the Windows Server operating system. This includes the server platform, database, business intelligence, storage, management and operations, virtualization, service-oriented architecture platform, security, and identity software. We also license standalone and software development lifecycle tools for software architects, developers, testers, and project managers. Server products revenue is mainly affected by purchases through volume licensing programs, licenses sold to original equipment manufacturers (\\u201cOEM\\u201d), and retail packaged products. CALs provide access rights to certain server products, including SQL Server and Windows Server, and revenue is reported along with the associated server product.
\\n
Nuance and GitHub include both cloud and on-premises offerings. Nuance provides healthcare and enterprise AI solutions. GitHub provides a collaboration platform and code hosting service for developers.
\\n
Enterprise Services
\\n
Enterprise Services, including Enterprise Support Services, Industry Solutions, and Nuance Professional Services, assist customers in developing, deploying, and managing Microsoft server solutions, Microsoft desktop solutions, and Nuance conversational AI and ambient intelligent solutions, along with providing training and certification to developers and IT professionals on various Microsoft products.
Microsoft is a technology company whose mission is to empower every person and every organization on the planet to achieve more. We strive to create local opportunity, growth, and impact in every country around the world. We are creating the platforms and tools, powered by artificial intelligence (\\u201cAI\\u201d), that deliver better, faster, and more effective solutions to support small and large business competitiveness, improve educational and health outcomes, grow public-sector efficiency, and empower human ingenuity. From infrastructure and data, to business applications and collaboration, we provide unique, differentiated value to customers.
\\n
In a world of increasing economic complexity, AI has the power to revolutionize many types of work. Microsoft is now innovating and expanding our portfolio with AI capabilities to help people and organizations overcome today\\u2019s challenges and emerge stronger. Customers are looking to unlock value from their digital spend and innovate for this next generation of AI, while simplifying security and management. Those leveraging the Microsoft Cloud are best positioned to take advantage of technological advancements and drive innovation. Our investment in AI spans the entire company, from Microsoft Teams and Outlook, to Bing and Xbox, and we are infusing generative AI capability into our consumer and commercial offerings to deliver copilot capability for all services across the Microsoft Cloud.
\\n
We\\u2019re committed to making the promise of AI real \\u2013 and doing it responsibly. Our work is guided by a core set of principles: fairness, reliability and safety, privacy and security, inclusiveness, transparency, and accountability.
\\n
What We Offer
\\n
Founded in 1975, we develop and support software, services, devices, and solutions that deliver new value for customers and help people and businesses realize their full potential.
\\n
We offer an array of services, including cloud-based solutions that provide customers with software, services, platforms, and content, and we provide solution support and consulting services. We also deliver relevant online advertising to a global audience.
Our AI platform, Azure AI, is helping organizations transform, bringing intelligence and insights to the hands of their employees and customers to solve their most pressing challenges. Organizations large and small are deploying Azure AI solutions to achieve more at scale, more easily, with the proper enterprise-level and responsible AI protections.
\\n
We have a long-term partnership with OpenAI, a leading AI research and deployment company. We deploy OpenAI\\u2019s models across our consumer and enterprise products. As OpenAI\\u2019s exclusive cloud provider, Azure powers all of OpenAI\'s workloads. We have also increased our investments in the development and deployment of specialized supercomputing systems to accelerate OpenAI\\u2019s research.
\\n
Our hybrid infrastructure offers integrated, end-to-end security, compliance, identity, and management capabilities to support the real-world needs and evolving regulatory requirements of commercial customers and enterprises. Our industry clouds bring together capabilities across the entire Microsoft Cloud, along with industry-specific customizations. Azure Arc simplifies governance and management by delivering a consistent multi-cloud and on-premises management platform.
\\n
Nuance, a leader in conversational AI and ambient intelligence across industries including healthcare, financial services, retail, and telecommunications, joined Microsoft in 2022. Microsoft and Nuance enable organizations to accelerate their business goals with security-focused, cloud-based solutions infused with AI.
\\n
We are accelerating our development of mixed reality solutions with new Azure services and devices. Microsoft Mesh enables organizations to create custom, immersive experiences for the workplace to help bring remote and hybrid workers and teams together.
\\n
The ability to convert data into AI drives our competitive advantage. The Microsoft Intelligent Data Platform is a leading cloud data platform that fully integrates databases, analytics, and governance. The platform empowers organizations to invest more time creating value rather than integrating and managing their data. Microsoft Fabric is an end-to-end, unified analytics platform that brings together all the data and analytics tools that organizations need.
Microsoft is a technology company whose mission is to empower every person and every organization on the planet to achieve more. We strive to create local opportunity, growth, and impact in every country around the world. We are creating the platforms and tools, powered by artificial intelligence (\\u201cAI\\u201d), that deliver better, faster, and more effective solutions to support small and large business competitiveness, improve educational and health outcomes, grow public-sector efficiency, and empower human ingenuity. From infrastructure and data, to business applications and collaboration, we provide unique, differentiated value to customers.
\\n
In a world of increasing economic complexity, AI has the power to revolutionize many types of work. Microsoft is now innovating and expanding our portfolio with AI capabilities to help people and organizations overcome today\\u2019s challenges and emerge stronger. Customers are looking to unlock value from their digital spend and innovate for this next generation of AI, while simplifying security and management. Those leveraging the Microsoft Cloud are best positioned to take advantage of technological advancements and drive innovation. Our investment in AI spans the entire company, from Microsoft Teams and Outlook, to Bing and Xbox, and we are infusing generative AI capability into our consumer and commercial offerings to deliver copilot capability for all services across the Microsoft Cloud.
\\n
We\\u2019re committed to making the promise of AI real \\u2013 and doing it responsibly. Our work is guided by a core set of principles: fairness, reliability and safety, privacy and security, inclusiveness, transparency, and accountability.
\\n
What We Offer
\\n
Founded in 1975, we develop and support software, services, devices, and solutions that deliver new value for customers and help people and businesses realize their full potential.
\\n
We offer an array of services, including cloud-based solutions that provide customers with software, services, platforms, and content, and we provide solution support and consulting services. We also deliver relevant online advertising to a global audience.
Our AI platform, Azure AI, is helping organizations transform, bringing intelligence and insights to the hands of their employees and customers to solve their most pressing challenges. Organizations large and small are deploying Azure AI solutions to achieve more at scale, more easily, with the proper enterprise-level and responsible AI protections.
\\n
We have a long-term partnership with OpenAI, a leading AI research and deployment company. We deploy OpenAI\\u2019s models across our consumer and enterprise products. As OpenAI\\u2019s exclusive cloud provider, Azure powers all of OpenAI\'s workloads. We have also increased our investments in the development and deployment of specialized supercomputing systems to accelerate OpenAI\\u2019s research.
\\n
Our hybrid infrastructure offers integrated, end-to-end security, compliance, identity, and management capabilities to support the real-world needs and evolving regulatory requirements of commercial customers and enterprises. Our industry clouds bring together capabilities across the entire Microsoft Cloud, along with industry-specific customizations. Azure Arc simplifies governance and management by delivering a consistent multi-cloud and on-premises management platform.
\\n
Nuance, a leader in conversational AI and ambient intelligence across industries including healthcare, financial services, retail, and telecommunications, joined Microsoft in 2022. Microsoft and Nuance enable organizations to accelerate their business goals with security-focused, cloud-based solutions infused with AI.
\\n
We are accelerating our development of mixed reality solutions with new Azure services and devices. Microsoft Mesh enables organizations to create custom, immersive experiences for the workplace to help bring remote and hybrid workers and teams together.
\\n
The ability to convert data into AI drives our competitive advantage. The Microsoft Intelligent Data Platform is a leading cloud data platform that fully integrates databases, analytics, and governance. The platform empowers organizations to invest more time creating value rather than integrating and managing their data. Microsoft Fabric is an end-to-end, unified analytics platform that brings together all the data and analytics tools that organizations need.
", "id": "doc_7ff8f57d63e2eebb0a3372db05153822fdee65e6", "chunk_id": 7, "title": "/documents/MSFT_FY23Q4_10K.docx", "filepath": "MSFT_FY23Q4_10K.docx", "url": "[/documents/MSFT_FY23Q4_10K.docx](https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx?se=2024-10-01T05%3A23%3A06Z&sp=r&sv=2024-05-04&sr=c&sig=cIyn1/%2Bk5pCX7Liy8PgDiytzArIx/9Vq7GA2eGkmyik%3D)", "metadata": {"offset": 13285, "source": "https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx_SAS_TOKEN_PLACEHOLDER_", "markdown_url": "[/documents/MSFT_FY23Q4_10K.docx](https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx?se=2024-10-01T05%3A23%3A06Z&sp=r&sv=2024-05-04&sr=c&sig=cIyn1/%2Bk5pCX7Liy8PgDiytzArIx/9Vq7GA2eGkmyik%3D)", "title": "/documents/MSFT_FY23Q4_10K.docx", "original_url": "https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx_SAS_TOKEN_PLACEHOLDER_", "chunk": 7, "key": "doc_7ff8f57d63e2eebb0a3372db05153822fdee65e6", "filename": "MSFT_FY23Q4_10K"}}], "intent": "Explain Microsoft AI"}',
+ end_turn: false,
+ role: "tool",
+ },
+ {
+ content:
+ "Microsoft AI refers to the artificial intelligence capabilities and offerings provided by Microsoft. It encompasses a range of technologies and solutions that leverage AI to empower individuals and organizations to achieve more. Microsoft's AI platform, Azure AI, enables organizations to transform their operations by bringing intelligence and insights to employees and customers. It offers AI-optimized infrastructure, advanced models, and AI services designed for developers and data scientists[doc2][doc6]. Microsoft's AI capabilities are integrated into various products and services, including Microsoft Teams, Outlook, Bing, Xbox, and the Microsoft Cloud[doc1][doc4]. The company is committed to developing AI responsibly, guided by principles such as fairness, reliability, privacy, and transparency[doc5]. Additionally, Microsoft has a partnership with OpenAI and deploys OpenAI's models across its consumer and enterprise products[doc3]. Overall, Microsoft AI aims to drive innovation, improve productivity, and deliver value to customers across different industries and sectors.",
+ end_turn: true,
+ role: "assistant",
+ },
+ ],
+ },
+ ],
+ created: "response.created",
+ id: "response.id",
+ model: "gpt-35-turbo-16k",
+ object: "response.object",
+ };
+
+ export const citationObj = {
+ content:
+ "[/documents/MSFT_FY23Q4_10K.docx](https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx?se=2024-10-01T05%3A38%3A07Z&sp=r&sv=2024-05-04&sr=c&sig=8fFfpNI/tv2rdTKAcunuWpW6zJkZuw%2BGvEGo2zQ1QSA%3D)\n\n\n
The ability to convert data into AI drives our competitive advantage. The Microsoft Intelligent Data Platform is a leading cloud data platform that fully integrates databases, analytics, and governance. The platform empowers organizations to invest more time creating value rather than integrating and managing their data. Microsoft Fabric is an end-to-end, unified analytics platform that brings together all the data and analytics tools that organizations need.
\n
GitHub Copilot is at the forefront of AI-powered software development, giving developers a new tool to write code easier and faster so they can focus on more creative problem-solving. From GitHub to Visual Studio, we provide a developer tool chain for everyone, no matter the technical experience, across all platforms, whether Azure, Windows, or any other cloud or client platform.
\n
Windows also plays a critical role in fueling our cloud business with Windows 365, a desktop operating system that’s also a cloud service. From another internet-connected device, including Android or macOS devices, users can run Windows 365, just like a virtual machine.
\n
Additionally, we are extending our infrastructure beyond the planet, bringing cloud computing to space. Azure Orbital is a fully managed ground station as a service for fast downlinking of data.
\n
Create More Personal Computing
\n
We strive to make computing more personal, enabling users to interact with technology in more intuitive, engaging, and dynamic ways.
\n
Windows 11 offers innovations focused on enhancing productivity, including Windows Copilot with centralized AI assistance and Dev Home to help developers become more productive. Windows 11 security and privacy features include operating system security, application security, and user and identity security.
\n
Through our Search, News, Mapping, and Browser services, Microsoft delivers unique trust, privacy, and safety features. In February 2023, we launched an all new, AI-powered Microsoft Edge browser and Bing search engine with Bing Chat to deliver better search, more complete answers, and the ability to generate content. Microsoft Edge is our fast and secure browser that helps protect users’ data. Quick access to AI-powered tools, apps, and more within Microsoft Edge’s sidebar enhance browsing capabilities.
",
+ id: "2",
+ chunk_id: 8,
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ filepath: "MSFT_FY23Q4_10K.docx",
+ url: "[/documents/MSFT_FY23Q4_10K.docx](https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx?se=2024-10-01T05%3A38%3A07Z&sp=r&sv=2024-05-04&sr=c&sig=8fFfpNI/tv2rdTKAcunuWpW6zJkZuw%2BGvEGo2zQ1QSA%3D)",
+ metadata: {
+ offset: 15580,
+ source:
+ "https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx_SAS_TOKEN_PLACEHOLDER_",
+ markdown_url:
+ "[/documents/MSFT_FY23Q4_10K.docx](https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx?se=2024-10-01T05%3A38%3A07Z&sp=r&sv=2024-05-04&sr=c&sig=8fFfpNI/tv2rdTKAcunuWpW6zJkZuw%2BGvEGo2zQ1QSA%3D)",
+ title: "/documents/MSFT_FY23Q4_10K.docx",
+ original_url:
+ "https://str5z43dncphzu3k.blob.core.windows.net/documents/MSFT_FY23Q4_10K.docx_SAS_TOKEN_PLACEHOLDER_",
+ chunk: 8,
+ key: "doc_d85da45581d92f2ff59e261197d2c70c2b6f8802",
+ filename: "MSFT_FY23Q4_10K",
+ },
+ reindex_id: "1",
+ };
+
+ export const AIResponseContent =
+ "Microsoft AI refers to the artificial intelligence capabilities and offerings provided by Microsoft. It encompasses a range of technologies and solutions that leverage AI to empower individuals and organizations to achieve more. Microsoft's AI platform, Azure AI, enables organizations to transform their operations by bringing intelligence and insights to employees and customers. It offers AI-optimized infrastructure, advanced models, and AI services designed for developers and data scientists is an ";
\ No newline at end of file
diff --git a/ClientAdvisor/App/frontend/__mocks__/react-markdown.tsx b/ClientAdvisor/App/frontend/__mocks__/react-markdown.tsx
new file mode 100644
index 000000000..587310af8
--- /dev/null
+++ b/ClientAdvisor/App/frontend/__mocks__/react-markdown.tsx
@@ -0,0 +1,17 @@
+// __mocks__/react-markdown.tsx
+
+import React from 'react';
+
+// Mock implementation of react-markdown
+const mockNode = {
+ children: [{ value: 'console.log("Test Code");' }]
+};
+const mockProps = { className: 'language-javascript' };
+
+const ReactMarkdown: React.FC<{ children: React.ReactNode , components: any }> = ({ children,components }) => {
+ return
{
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault(); // Prevent the default action like scrolling.
- handleShowMoreClick(e); // Call the same function as onClick.
+ onCardClick(); // Call the same function as onClick.
}
}}>
{ClientName}
@@ -85,4 +85,3 @@ const UserCard: React.FC = ({
);
};
-export default UserCard;
diff --git a/ClientAdvisor/App/frontend/src/components/common/Button.module.css b/ClientAdvisor/App/frontend/src/components/common/Button.module.css
index 14c1ecb70..dc5df4d5b 100644
--- a/ClientAdvisor/App/frontend/src/components/common/Button.module.css
+++ b/ClientAdvisor/App/frontend/src/components/common/Button.module.css
@@ -25,6 +25,7 @@
.historyButtonRoot {
width: 180px;
border: 1px solid #d1d1d1;
+ border-radius: 5px;
}
.historyButtonRoot:hover {
diff --git a/ClientAdvisor/App/frontend/src/helpers/helpers.test.ts b/ClientAdvisor/App/frontend/src/helpers/helpers.test.ts
new file mode 100644
index 000000000..2ec74735b
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/helpers/helpers.test.ts
@@ -0,0 +1,200 @@
+import { groupByMonth, formatMonth, parseCitationFromMessage, parseErrorMessage, tryGetRaiPrettyError } from './helpers';
+import { ChatMessage, Conversation } from '../api/models';
+
+describe('groupByMonth', () => {
+
+ test('should group recent conversations into the "Recent" group when the difference is less than or equal to 7 days', () => {
+ const currentDate = new Date();
+ const recentDate = new Date(currentDate.getTime() - 3 * 24 * 60 * 60 * 1000); // 3 days ago
+ const entries: Conversation[] = [
+ {
+ id: '1',
+ title: 'Recent Conversation',
+ date: recentDate.toISOString(),
+ messages: [],
+ },
+ ];
+ const result = groupByMonth(entries);
+ expect(result[0].month).toBe('Recent');
+ expect(result[0].entries.length).toBe(1);
+ expect(result[0].entries[0].id).toBe('1');
+ });
+
+ test('should group conversations by month when the difference is more than 7 days', () => {
+ const entries: Conversation[] = [
+ {
+ id: '1',
+ title: 'Older Conversation',
+ date: '2024-09-01T10:26:03.844538',
+ messages: [],
+ },
+ {
+ id: '2',
+ title: 'Another Older Conversation',
+ date: '2024-08-01T10:26:03.844538',
+ messages: [],
+ },
+
+ {
+ id: '3',
+ title: 'Older Conversation',
+ date: '2024-10-08T10:26:03.844538',
+ messages: [],
+ },
+ ];
+
+ const result = groupByMonth(entries);
+ expect(result[1].month).toBe('September 2024');
+ expect(result[1].entries.length).toBe(1);
+ expect(result[2].month).toBe('August 2024');
+ expect(result[2].entries.length).toBe(1);
+ });
+
+ test('should push entries into an existing group if the group for that month already exists', () => {
+ const entries: Conversation[] = [
+ {
+ id: '1',
+ title: 'First Conversation',
+ date: '2024-09-08T10:26:03.844538',
+ messages: [],
+ },
+ {
+ id: '2',
+ title: 'Second Conversation',
+ date: '2024-09-10T10:26:03.844538',
+ messages: [],
+ },
+ ];
+
+ const result = groupByMonth(entries);
+
+ expect(result[0].month).toBe('September 2024');
+ expect(result[0].entries.length).toBe(2);
+ });
+
+});
+
+describe('formatMonth', () => {
+
+ it('should return the month name if the year is the current year', () => {
+ const currentYear = new Date().getFullYear();
+ const month = `${new Date().toLocaleString('default', { month: 'long' })} ${currentYear}`;
+
+ const result = formatMonth(month);
+
+ expect(result).toEqual(new Date().toLocaleString('default', { month: 'long' }));
+ });
+
+ it('should return the full month string if the year is not the current year', () => {
+ const month = 'January 2023'; // Assuming the current year is 2024
+ const result = formatMonth(month);
+
+ expect(result).toEqual(month);
+ });
+
+ it('should handle invalid month format gracefully', () => {
+ const month = 'Invalid Month Format';
+ const result = formatMonth(month);
+
+ expect(result).toEqual(month);
+ });
+
+ it('should return the full month string if the month is empty', () => {
+ const month = ' ';
+ const result = formatMonth(month);
+
+ expect(result).toEqual(month);
+ });
+
+});
+
+describe('parseCitationFromMessage', () => {
+
+ it('should return citations when the message role is "tool" and content is valid JSON', () => {
+ const message: ChatMessage = {
+ id: '1',
+ role: 'tool',
+ content: JSON.stringify({
+ citations: ['citation1', 'citation2'],
+ }),
+ date: new Date().toISOString(),
+ };
+
+ const result = parseCitationFromMessage(message);
+
+ expect(result).toEqual(['citation1', 'citation2']);
+ });
+
+ it('should return an empty array if the message role is not "tool"', () => {
+ const message: ChatMessage = {
+ id: '2',
+ role: 'user',
+ content: JSON.stringify({
+ citations: ['citation1', 'citation2'],
+ }),
+ date: new Date().toISOString(),
+ };
+
+ const result = parseCitationFromMessage(message);
+
+ expect(result).toEqual([]);
+ });
+
+ it('should return an empty array if the content is not valid JSON', () => {
+ const message: ChatMessage = {
+ id: '3',
+ role: 'tool',
+ content: 'invalid JSON content',
+ date: new Date().toISOString(),
+ };
+
+ const result = parseCitationFromMessage(message);
+
+ expect(result).toEqual([]);
+ });
+
+});
+
+describe('tryGetRaiPrettyError', () => {
+
+ it('should return prettified error message when inner error is filtered as jailbreak', () => {
+ const errorMessage = "Some error occurred, 'innererror': {'content_filter_result': {'jailbreak': {'filtered': True}}}}}";
+
+ // Fix the input format: Single quotes must be properly escaped in the context of JSON parsing
+ const result = tryGetRaiPrettyError(errorMessage);
+
+ expect(result).toEqual(
+ 'The prompt was filtered due to triggering Azure OpenAI’s content filtering system.\n' +
+ 'Reason: This prompt contains content flagged as Jailbreak\n\n' +
+ 'Please modify your prompt and retry. Learn more: https://go.microsoft.com/fwlink/?linkid=2198766'
+ );
+ });
+
+ it('should return the original error message if no inner error found', () => {
+ const errorMessage = "Error: some error message without inner error";
+ const result = tryGetRaiPrettyError(errorMessage);
+
+ expect(result).toEqual(errorMessage);
+ });
+
+ it('should return the original error message if inner error is malformed', () => {
+ const errorMessage = "Error: some error message, 'innererror': {'content_filter_result': {'jailbreak': {'filtered': true}}}";
+ const result = tryGetRaiPrettyError(errorMessage);
+
+ expect(result).toEqual(errorMessage);
+ });
+
+});
+
+describe('parseErrorMessage', () => {
+
+ it('should extract inner error message and call tryGetRaiPrettyError', () => {
+ const errorMessage = "Error occurred - {\\'error\\': {\\'message\\': 'Some inner error message'}}";
+ const result = parseErrorMessage(errorMessage);
+
+ expect(result).toEqual("Error occurred - {'error': {'message': 'Some inner error message");
+ });
+
+});
+
+
diff --git a/ClientAdvisor/App/frontend/src/helpers/helpers.ts b/ClientAdvisor/App/frontend/src/helpers/helpers.ts
new file mode 100644
index 000000000..3541110db
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/helpers/helpers.ts
@@ -0,0 +1,134 @@
+import { Conversation, GroupedChatHistory, ChatMessage, ToolMessageContent } from '../api/models'
+
+export const groupByMonth = (entries: Conversation[]) => {
+ const groups: GroupedChatHistory[] = [{ month: 'Recent', entries: [] }]
+ const currentDate = new Date()
+
+ entries.forEach(entry => {
+ const date = new Date(entry.date)
+ const daysDifference = (currentDate.getTime() - date.getTime()) / (1000 * 60 * 60 * 24)
+ const monthYear = date.toLocaleString('default', { month: 'long', year: 'numeric' })
+ const existingGroup = groups.find(group => group.month === monthYear)
+
+ if (daysDifference <= 7) {
+ groups[0].entries.push(entry)
+ } else {
+ if (existingGroup) {
+ existingGroup.entries.push(entry)
+ } else {
+ groups.push({ month: monthYear, entries: [entry] })
+ }
+ }
+ })
+
+ groups.sort((a, b) => {
+ // Check if either group has no entries and handle it
+ if (a.entries.length === 0 && b.entries.length === 0) {
+ return 0 // No change in order
+ } else if (a.entries.length === 0) {
+ return 1 // Move 'a' to a higher index (bottom)
+ } else if (b.entries.length === 0) {
+ return -1 // Move 'b' to a higher index (bottom)
+ }
+ const dateA = new Date(a.entries[0].date)
+ const dateB = new Date(b.entries[0].date)
+ return dateB.getTime() - dateA.getTime()
+ })
+
+ groups.forEach(group => {
+ group.entries.sort((a, b) => {
+ const dateA = new Date(a.date)
+ const dateB = new Date(b.date)
+ return dateB.getTime() - dateA.getTime()
+ })
+ })
+
+ return groups
+}
+
+export const formatMonth = (month: string) => {
+ const currentDate = new Date()
+ const currentYear = currentDate.getFullYear()
+
+ const [monthName, yearString] = month.split(' ')
+ const year = parseInt(yearString)
+
+ if (year === currentYear) {
+ return monthName
+ } else {
+ return month
+ }
+}
+
+
+// -------------Chat.tsx-------------
+export const parseCitationFromMessage = (message: ChatMessage) => {
+ if (message?.role && message?.role === 'tool') {
+ try {
+ const toolMessage = JSON.parse(message.content) as ToolMessageContent
+ return toolMessage.citations
+ } catch {
+ return []
+ }
+ }
+ return []
+}
+
+export const tryGetRaiPrettyError = (errorMessage: string) => {
+ try {
+ // Using a regex to extract the JSON part that contains "innererror"
+ const match = errorMessage.match(/'innererror': ({.*})\}\}/)
+ if (match) {
+ // Replacing single quotes with double quotes and converting Python-like booleans to JSON booleans
+ const fixedJson = match[1]
+ .replace(/'/g, '"')
+ .replace(/\bTrue\b/g, 'true')
+ .replace(/\bFalse\b/g, 'false')
+ const innerErrorJson = JSON.parse(fixedJson)
+ let reason = ''
+ // Check if jailbreak content filter is the reason of the error
+ const jailbreak = innerErrorJson.content_filter_result.jailbreak
+ if (jailbreak.filtered === true) {
+ reason = 'Jailbreak'
+ }
+
+ // Returning the prettified error message
+ if (reason !== '') {
+ return (
+ 'The prompt was filtered due to triggering Azure OpenAI’s content filtering system.\n' +
+ 'Reason: This prompt contains content flagged as ' +
+ reason +
+ '\n\n' +
+ 'Please modify your prompt and retry. Learn more: https://go.microsoft.com/fwlink/?linkid=2198766'
+ )
+ }
+ }
+ } catch (e) {
+ console.error('Failed to parse the error:', e)
+ }
+ return errorMessage
+}
+
+
+export const parseErrorMessage = (errorMessage: string) => {
+ let errorCodeMessage = errorMessage.substring(0, errorMessage.indexOf('-') + 1)
+ const innerErrorCue = "{\\'error\\': {\\'message\\': "
+ if (errorMessage.includes(innerErrorCue)) {
+ try {
+ let innerErrorString = errorMessage.substring(errorMessage.indexOf(innerErrorCue))
+ if (innerErrorString.endsWith("'}}")) {
+ innerErrorString = innerErrorString.substring(0, innerErrorString.length - 3)
+ }
+ innerErrorString = innerErrorString.replaceAll("\\'", "'")
+ let newErrorMessage = errorCodeMessage + ' ' + innerErrorString
+ errorMessage = newErrorMessage
+ } catch (e) {
+ console.error('Error parsing inner error message: ', e)
+ }
+ }
+
+ return tryGetRaiPrettyError(errorMessage)
+}
+
+// -------------Chat.tsx-------------
+
diff --git a/ClientAdvisor/App/frontend/src/mocks/handlers.ts b/ClientAdvisor/App/frontend/src/mocks/handlers.ts
new file mode 100644
index 000000000..b60d86989
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/mocks/handlers.ts
@@ -0,0 +1,5 @@
+import { http, HttpResponse } from 'msw'
+
+export const handlers = [
+
+];
diff --git a/ClientAdvisor/App/frontend/src/mocks/server.ts b/ClientAdvisor/App/frontend/src/mocks/server.ts
new file mode 100644
index 000000000..5f8393d60
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/mocks/server.ts
@@ -0,0 +1,5 @@
+// src/mocks/server.ts
+import { setupServer } from 'msw/node';
+import { handlers } from './handlers';
+
+export const server = setupServer(...handlers);
diff --git a/ClientAdvisor/App/frontend/src/pages/chat/Chat.test.tsx b/ClientAdvisor/App/frontend/src/pages/chat/Chat.test.tsx
new file mode 100644
index 000000000..1621ef965
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/pages/chat/Chat.test.tsx
@@ -0,0 +1,1537 @@
+import { renderWithContext, screen, waitFor, fireEvent, act } from '../../test/test.utils'
+import Chat from './Chat'
+import { ChatHistoryLoadingState } from '../../api/models'
+
+import {
+ getUserInfo,
+ conversationApi,
+ historyGenerate,
+ historyClear,
+ ChatMessage,
+ Citation,
+ historyUpdate,
+ CosmosDBStatus
+} from '../../api'
+import userEvent from '@testing-library/user-event'
+
+import { AIResponseContent, decodedConversationResponseWithCitations } from '../../../__mocks__/mockAPIData'
+import { CitationPanel } from './Components/CitationPanel'
+// import { BuildingCheckmarkRegular } from '@fluentui/react-icons';
+
+// Mocking necessary modules and components
+jest.mock('../../api/api', () => ({
+ getUserInfo: jest.fn(),
+ historyClear: jest.fn(),
+ historyGenerate: jest.fn(),
+ historyUpdate: jest.fn(),
+ conversationApi: jest.fn()
+}))
+
+interface ChatMessageContainerProps {
+ messages: ChatMessage[]
+ isLoading: boolean
+ showLoadingMessage: boolean
+ onShowCitation: (citation: Citation) => void
+}
+
+const citationObj = {
+ id: '123',
+ content: 'This is a sample citation content.',
+ title: 'Test Citation with Blob URL',
+ url: 'https://test.core.example.com/resource',
+ filepath: 'path',
+ metadata: '',
+ chunk_id: '',
+ reindex_id: ''
+}
+jest.mock('./Components/ChatMessageContainer', () => ({
+ ChatMessageContainer: jest.fn((props: ChatMessageContainerProps) => {
+ return (
+
- This app does not have authentication configured. Please add an identity provider by finding your app in the{' '}
-
- Azure Portal
-
- and following{' '}
-
- these instructions
-
- .
-
-
- Authentication configuration takes a few minutes to apply.
-
-
- If you deployed in the last 10 minutes, please wait and reload the page after 10 minutes.
-
diff --git a/ClientAdvisor/App/frontend/src/pages/chat/Components/AuthNotConfigure.test.tsx b/ClientAdvisor/App/frontend/src/pages/chat/Components/AuthNotConfigure.test.tsx
new file mode 100644
index 000000000..a47a1e4d3
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/pages/chat/Components/AuthNotConfigure.test.tsx
@@ -0,0 +1,49 @@
+import React from 'react'
+import { render, screen } from '@testing-library/react'
+import '@testing-library/jest-dom'
+import { AuthNotConfigure } from './AuthNotConfigure'
+import styles from '../Chat.module.css'
+
+// Mock the Fluent UI icons
+jest.mock('@fluentui/react-icons', () => ({
+ ShieldLockRegular: () =>
+}))
+
+describe('AuthNotConfigure Component', () => {
+ it('renders without crashing', () => {
+ render()
+
+ // Check that the icon is rendered
+ const icon = screen.getByTestId('shield-lock-icon')
+ expect(icon).toBeInTheDocument()
+
+ // Check that the titles and subtitles are rendered
+ expect(screen.getByText('Authentication Not Configured')).toBeInTheDocument()
+ expect(screen.getByText(/This app does not have authentication configured./)).toBeInTheDocument()
+
+ // Check the strong text is rendered
+ expect(screen.getByText('Authentication configuration takes a few minutes to apply.')).toBeInTheDocument()
+ expect(screen.getByText(/please wait and reload the page after 10 minutes/i)).toBeInTheDocument()
+ })
+
+ it('renders the Azure portal and instructions links with correct href', () => {
+ render()
+
+ // Check the Azure Portal link
+ const azurePortalLink = screen.getByText('Azure Portal')
+ expect(azurePortalLink).toBeInTheDocument()
+ expect(azurePortalLink).toHaveAttribute('href', 'https://portal.azure.com/')
+ expect(azurePortalLink).toHaveAttribute('target', '_blank')
+
+ // Check the instructions link
+ const instructionsLink = screen.getByText('these instructions')
+ expect(instructionsLink).toBeInTheDocument()
+ expect(instructionsLink).toHaveAttribute(
+ 'href',
+ 'https://learn.microsoft.com/en-us/azure/app-service/scenario-secure-app-authentication-app-service#3-configure-authentication-and-authorization'
+ )
+ expect(instructionsLink).toHaveAttribute('target', '_blank')
+ })
+
+
+})
diff --git a/ClientAdvisor/App/frontend/src/pages/chat/Components/AuthNotConfigure.tsx b/ClientAdvisor/App/frontend/src/pages/chat/Components/AuthNotConfigure.tsx
new file mode 100644
index 000000000..ac5151182
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/pages/chat/Components/AuthNotConfigure.tsx
@@ -0,0 +1,36 @@
+import React from 'react'
+import { Stack } from '@fluentui/react'
+import { ShieldLockRegular } from '@fluentui/react-icons'
+
+import styles from '../Chat.module.css'
+
+export const AuthNotConfigure = ()=>{
+ return (
+
+
+
Authentication Not Configured
+
+ This app does not have authentication configured. Please add an identity provider by finding your app in the{' '}
+
+ Azure Portal
+
+ and following{' '}
+
+ these instructions
+
+ .
+
+
+ Authentication configuration takes a few minutes to apply.
+
+
+ If you deployed in the last 10 minutes, please wait and reload the page after 10 minutes.
+
+
+ )
+}
\ No newline at end of file
diff --git a/ClientAdvisor/App/frontend/src/pages/chat/Components/ChatMessageContainer.test.tsx b/ClientAdvisor/App/frontend/src/pages/chat/Components/ChatMessageContainer.test.tsx
new file mode 100644
index 000000000..bb470c29f
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/pages/chat/Components/ChatMessageContainer.test.tsx
@@ -0,0 +1,178 @@
+import { render, screen, fireEvent } from '@testing-library/react';
+import { ChatMessageContainer } from './ChatMessageContainer';
+import { ChatMessage, Citation } from '../../../api/models';
+import { Answer } from '../../../components/Answer';
+
+jest.mock('../../../components/Answer', () => ({
+ Answer: jest.fn((props: any) =>
)}
-
+
-
+
diff --git a/ClientAdvisor/App/frontend/src/state/AppProvider.tsx b/ClientAdvisor/App/frontend/src/state/AppProvider.tsx
index d0166462d..051db7224 100644
--- a/ClientAdvisor/App/frontend/src/state/AppProvider.tsx
+++ b/ClientAdvisor/App/frontend/src/state/AppProvider.tsx
@@ -1,6 +1,14 @@
import React, { createContext, ReactNode, useEffect,
useReducer } from 'react'
+import {
+ frontendSettings,
+ historyEnsure,
+ historyList,
+ // UserSelectRequest
+
+} from '../api'
+
import {
ChatHistoryLoadingState,
Conversation,
@@ -8,12 +16,9 @@ import {
CosmosDBStatus,
Feedback,
FrontendSettings,
- frontendSettings,
- historyEnsure,
- historyList,
// UserSelectRequest
-} from '../api'
+} from '../api/models'
import { appStateReducer } from './AppReducer'
@@ -51,7 +56,8 @@ export type Action =
| { type: 'GET_FEEDBACK_STATE'; payload: string }
| { type: 'UPDATE_CLIENT_ID'; payload: string }
| { type: 'SET_IS_REQUEST_INITIATED'; payload: boolean }
- | { type: 'TOGGLE_LOADER' };
+ | { type: 'TOGGLE_LOADER' }
+ | { type: 'RESET_CLIENT_ID'};
const initialState: AppState = {
isChatHistoryOpen: false,
diff --git a/ClientAdvisor/App/frontend/src/state/AppReducer.tsx b/ClientAdvisor/App/frontend/src/state/AppReducer.tsx
index 21a126dab..03a778cc2 100644
--- a/ClientAdvisor/App/frontend/src/state/AppReducer.tsx
+++ b/ClientAdvisor/App/frontend/src/state/AppReducer.tsx
@@ -80,6 +80,8 @@ export const appStateReducer = (state: AppState, action: Action): AppState => {
return {...state, isRequestInitiated : action.payload}
case 'TOGGLE_LOADER':
return {...state, isLoader : !state.isLoader}
+ case 'RESET_CLIENT_ID':
+ return {...state, clientId: ''}
default:
return state
}
diff --git a/ClientAdvisor/App/frontend/src/test/TestProvider.tsx b/ClientAdvisor/App/frontend/src/test/TestProvider.tsx
new file mode 100644
index 000000000..97a65cf68
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/test/TestProvider.tsx
@@ -0,0 +1,26 @@
+// AppProvider.tsx
+import React, { createContext, useReducer, ReactNode } from 'react';
+import { Conversation, ChatHistoryLoadingState } from '../api/models';
+// Define the AppState interface
+export interface AppState {
+ chatHistory: Conversation[];
+ isCosmosDBAvailable: { cosmosDB: boolean; status: string };
+ isChatHistoryOpen: boolean;
+ filteredChatHistory: Conversation[];
+ currentChat: Conversation | null;
+ frontendSettings: Record;
+ feedbackState: Record;
+ clientId: string;
+ isRequestInitiated: boolean;
+ isLoader: boolean;
+ chatHistoryLoadingState: ChatHistoryLoadingState;
+}
+
+// Define the context
+export const AppStateContext = createContext<{
+ state: AppState;
+ dispatch: React.Dispatch;
+}>({
+ state: {} as AppState,
+ dispatch: () => {},
+});
diff --git a/ClientAdvisor/App/frontend/src/test/setupTests.ts b/ClientAdvisor/App/frontend/src/test/setupTests.ts
new file mode 100644
index 000000000..3f517be72
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/test/setupTests.ts
@@ -0,0 +1,59 @@
+import '@testing-library/jest-dom'; // For jest-dom matchers like toBeInTheDocument
+
+import { initializeIcons } from '@fluentui/react/lib/Icons';
+initializeIcons();
+
+import { server } from '../mocks/server';
+
+// Establish API mocking before all tests
+beforeAll(() => server.listen());
+
+// Reset any request handlers that are declared in a test
+afterEach(() => server.resetHandlers());
+
+// Clean up after the tests are finished
+afterAll(() => server.close());
+
+// Mock IntersectionObserver
+class IntersectionObserverMock {
+ callback: IntersectionObserverCallback;
+ options: IntersectionObserverInit;
+
+ root: Element | null = null; // Required property
+ rootMargin: string = '0px'; // Required property
+ thresholds: number[] = [0]; // Required property
+
+ constructor(callback: IntersectionObserverCallback, options: IntersectionObserverInit) {
+ this.callback = callback;
+ this.options = options;
+ }
+
+ observe = jest.fn((target: Element) => {
+ // Simulate intersection with an observer instance
+ this.callback([{ isIntersecting: true }] as IntersectionObserverEntry[], this as IntersectionObserver);
+ });
+
+ unobserve = jest.fn();
+ disconnect = jest.fn(); // Required method
+ takeRecords = jest.fn(); // Required method
+}
+
+// Store the original IntersectionObserver
+const originalIntersectionObserver = window.IntersectionObserver;
+
+beforeAll(() => {
+ window.IntersectionObserver = IntersectionObserverMock as any;
+});
+
+afterAll(() => {
+ // Restore the original IntersectionObserver
+ window.IntersectionObserver = originalIntersectionObserver;
+});
+
+
+
+
+
+
+
+
diff --git a/ClientAdvisor/App/frontend/src/test/test.utils.tsx b/ClientAdvisor/App/frontend/src/test/test.utils.tsx
new file mode 100644
index 000000000..f980523aa
--- /dev/null
+++ b/ClientAdvisor/App/frontend/src/test/test.utils.tsx
@@ -0,0 +1,35 @@
+// test-utils.tsx
+import React from 'react';
+import { render, RenderResult } from '@testing-library/react';
+import { AppStateContext } from '../state/AppProvider';
+import { Conversation, ChatHistoryLoadingState } from '../api/models';
+// Default mock state
+const defaultMockState = {
+ chatHistory: [],
+ isCosmosDBAvailable: { cosmosDB: true, status: 'success' },
+ isChatHistoryOpen: true,
+ filteredChatHistory: [],
+ currentChat: null,
+ frontendSettings: {},
+ feedbackState: {},
+ clientId: '',
+ isRequestInitiated: false,
+ isLoader: false,
+ chatHistoryLoadingState: ChatHistoryLoadingState.Loading,
+};
+
+// Create a custom render function
+const renderWithContext = (
+ component: React.ReactElement,
+ contextState = {}
+): RenderResult => {
+ const state = { ...defaultMockState, ...contextState };
+ return render(
+
+ {component}
+
+ );
+};
+
+export * from '@testing-library/react';
+export { renderWithContext };
diff --git a/ClientAdvisor/App/frontend/tsconfig.json b/ClientAdvisor/App/frontend/tsconfig.json
index f117a3d18..962fb6e49 100644
--- a/ClientAdvisor/App/frontend/tsconfig.json
+++ b/ClientAdvisor/App/frontend/tsconfig.json
@@ -5,7 +5,7 @@
"lib": ["DOM", "DOM.Iterable", "ESNext"],
"allowJs": false,
"skipLibCheck": true,
- "esModuleInterop": false,
+ "esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"strict": true,
"forceConsistentCasingInFileNames": true,
@@ -15,9 +15,16 @@
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
+ "typeRoots": ["node_modules/@types"],
+ // "typeRoots": [
+ // "./node_modules/@types" // Ensure Jest types are found
+ // ],
"types": ["vite/client", "jest", "mocha", "node"],
"noUnusedLocals": false
},
- "include": ["src"],
+ "include": [
+ "src", // Your source files
+ "testMock", // Include your mocks if necessary
+ ],
"references": [{ "path": "./tsconfig.node.json" }]
}
diff --git a/ClientAdvisor/App/requirements-dev.txt b/ClientAdvisor/App/requirements-dev.txt
index b4eac12d8..9c8cdf4f7 100644
--- a/ClientAdvisor/App/requirements-dev.txt
+++ b/ClientAdvisor/App/requirements-dev.txt
@@ -12,3 +12,6 @@ gunicorn==20.1.0
quart-session==3.0.0
pymssql==2.3.0
httpx==0.27.0
+flake8==7.1.1
+black==24.8.0
+autoflake==2.3.1
diff --git a/ClientAdvisor/App/requirements.txt b/ClientAdvisor/App/requirements.txt
index a921be2a0..e97a6a961 100644
--- a/ClientAdvisor/App/requirements.txt
+++ b/ClientAdvisor/App/requirements.txt
@@ -12,3 +12,6 @@ gunicorn==20.1.0
quart-session==3.0.0
pymssql==2.3.0
httpx==0.27.0
+flake8==7.1.1
+black==24.8.0
+autoflake==2.3.1
diff --git a/ClientAdvisor/App/tools/data_collection.py b/ClientAdvisor/App/tools/data_collection.py
index 901b8be20..13cbed260 100644
--- a/ClientAdvisor/App/tools/data_collection.py
+++ b/ClientAdvisor/App/tools/data_collection.py
@@ -2,34 +2,33 @@
import sys
import asyncio
import json
+import app
from dotenv import load_dotenv
-#import the app.py module to gain access to the methods to construct payloads and
-#call the API through the sdk
+# import the app.py module to gain access to the methods to construct payloads and
+# call the API through the sdk
# Add parent directory to sys.path
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-
-import app
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
-#function to enable loading of the .env file into the global variables of the app.py module
-def load_env_into_module(module_name, prefix=''):
+def load_env_into_module(module_name, prefix=""):
load_dotenv()
module = __import__(module_name)
for key, value in os.environ.items():
if key.startswith(prefix):
- setattr(module, key[len(prefix):], value)
+ setattr(module, key[len(prefix) :], value)
+
load_env_into_module("app")
-#some settings required in app.py
+# some settings required in app.py
app.SHOULD_STREAM = False
app.SHOULD_USE_DATA = app.should_use_data()
-#format:
+# format:
"""
[
{
@@ -40,71 +39,65 @@ def load_env_into_module(module_name, prefix=''):
generated_data_path = r"path/to/qa_input_file.json"
-with open(generated_data_path, 'r') as file:
+with open(generated_data_path, "r") as file:
data = json.load(file)
"""
Process a list of q(and a) pairs outputting to a file as we go.
"""
-async def process(data: list, file):
- for qa_pairs_obj in data:
- qa_pairs = qa_pairs_obj["qa_pairs"]
- for qa_pair in qa_pairs:
- question = qa_pair["question"]
- messages = [{"role":"user", "content":question}]
-
- print("processing question "+question)
-
- request = {"messages":messages, "id":"1"}
- response = await app.complete_chat_request(request)
- #print(json.dumps(response))
-
- messages = response["choices"][0]["messages"]
-
- tool_message = None
- assistant_message = None
-
- for message in messages:
- if message["role"] == "tool":
- tool_message = message["content"]
- elif message["role"] == "assistant":
- assistant_message = message["content"]
- else:
- raise ValueError("unknown message role")
-
- #construct data for ai studio evaluation
+async def process(data: list, file):
+ for qa_pairs_obj in data:
+ qa_pairs = qa_pairs_obj["qa_pairs"]
+ for qa_pair in qa_pairs:
+ question = qa_pair["question"]
+ messages = [{"role": "user", "content": question}]
- user_message = {"role":"user", "content":question}
- assistant_message = {"role":"assistant", "content":assistant_message}
+ print("processing question " + question)
- #prepare citations
- citations = json.loads(tool_message)
- assistant_message["context"] = citations
+ request = {"messages": messages, "id": "1"}
- #create output
- messages = []
- messages.append(user_message)
- messages.append(assistant_message)
+ response = await app.complete_chat_request(request)
- evaluation_data = {"messages":messages}
+ # print(json.dumps(response))
- #incrementally write out to the jsonl file
- file.write(json.dumps(evaluation_data)+"\n")
- file.flush()
+ messages = response["choices"][0]["messages"]
+ tool_message = None
+ assistant_message = None
-evaluation_data_file_path = r"path/to/output_file.jsonl"
+ for message in messages:
+ if message["role"] == "tool":
+ tool_message = message["content"]
+ elif message["role"] == "assistant":
+ assistant_message = message["content"]
+ else:
+ raise ValueError("unknown message role")
-with open(evaluation_data_file_path, "w") as file:
- asyncio.run(process(data, file))
+ # construct data for ai studio evaluation
+ user_message = {"role": "user", "content": question}
+ assistant_message = {"role": "assistant", "content": assistant_message}
+ # prepare citations
+ citations = json.loads(tool_message)
+ assistant_message["context"] = citations
+ # create output
+ messages = []
+ messages.append(user_message)
+ messages.append(assistant_message)
+ evaluation_data = {"messages": messages}
+ # incrementally write out to the jsonl file
+ file.write(json.dumps(evaluation_data) + "\n")
+ file.flush()
+evaluation_data_file_path = r"path/to/output_file.jsonl"
+with open(evaluation_data_file_path, "w") as file:
+ asyncio.run(process(data, file))
diff --git a/ClientAdvisor/AzureFunction/function_app.py b/ClientAdvisor/AzureFunction/function_app.py
index f9bfd8dc8..9f6368cdd 100644
--- a/ClientAdvisor/AzureFunction/function_app.py
+++ b/ClientAdvisor/AzureFunction/function_app.py
@@ -18,7 +18,6 @@
from semantic_kernel.functions.kernel_function_decorator import kernel_function
from semantic_kernel.kernel import Kernel
import pymssql
-
# Azure Function App
app = func.FunctionApp(http_auth_level=func.AuthLevel.ANONYMOUS)
@@ -40,7 +39,7 @@ def greeting(self, input: Annotated[str, "the question"]) -> Annotated[str, "The
client = openai.AzureOpenAI(
azure_endpoint=endpoint,
api_key=api_key,
- api_version="2023-09-01-preview"
+ api_version=api_version
)
deployment = os.environ.get("AZURE_OPEN_AI_DEPLOYMENT_MODEL")
try:
@@ -75,7 +74,7 @@ def get_SQL_Response(
client = openai.AzureOpenAI(
azure_endpoint=endpoint,
api_key=api_key,
- api_version="2023-09-01-preview"
+ api_version=api_version
)
deployment = os.environ.get("AZURE_OPEN_AI_DEPLOYMENT_MODEL")
@@ -100,6 +99,17 @@ def get_SQL_Response(
Do not include assets values unless asked for.
Always use ClientId = {clientid} in the query filter.
Always return client name in the query.
+ If a question involves date and time, always use FORMAT(YourDateTimeColumn, 'yyyy-MM-dd HH:mm:ss') in the query.
+ If asked, provide information about client meetings according to the requested timeframe: give details about upcoming meetings if asked for "next" or "upcoming" meetings, and provide details about past meetings if asked for "previous" or "last" meetings including the scheduled time and don't filter with "LIMIT 1" in the query.
+ If asked about the number of past meetings with this client, provide the count of records where the ConversationId is neither null nor an empty string and the EndTime is before the current date in the query.
+ If asked, provide information on the client's investment risk tolerance level in the query.
+ If asked, provide information on the client's portfolio performance in the query.
+ If asked, provide information about the client's top-performing investments in the query.
+ If asked, provide information about any recent changes in the client's investment allocations in the query.
+ If asked about the client's portfolio performance over the last quarter, calculate the total investment by summing the investment amounts where AssetDate is greater than or equal to the date from one quarter ago using DATEADD(QUARTER, -1, GETDATE()) in the query.
+ If asked about upcoming important dates or deadlines for the client, always ensure that StartTime is greater than the current date. Do not convert the formats of StartTime and EndTime and consistently provide the upcoming dates along with the scheduled times in the query.
+ To determine the asset value, sum the investment values for the most recent available date. If asked for the asset types in the portfolio and the present of each, provide a list of each asset type with its most recent investment value.
+ If the user inquires about asset on a specific date ,sum the investment values for the specific date avoid summing values from all dates prior to the requested date.If asked for the asset types in the portfolio and the value of each for specific date , provide a list of each asset type with specific date investment value avoid summing values from all dates prior to the requested date.
Only return the generated sql query. do not return anything else'''
try:
@@ -152,13 +162,16 @@ def get_answers_from_calltranscripts(
client = openai.AzureOpenAI(
azure_endpoint= endpoint, #f"{endpoint}/openai/deployments/{deployment}/extensions",
api_key=apikey,
- api_version="2024-02-01"
+ api_version=api_version
)
query = question
- system_message = '''You are an assistant who provides wealth advisors with helpful information to prepare for client meetings.
- You have access to the client’s meeting call transcripts.
- You can use this information to answer questions about the clients'''
+ system_message = '''You are an assistant who provides wealth advisors with helpful information to prepare for client meetings and provide details on the call transcripts.
+ You have access to the client’s meetings and call transcripts
+ When asked about action items from previous meetings with the client, **ALWAYS provide information only for the most recent dates**.
+ Always return time in "HH:mm" format for the client in response.
+ If requested for call transcript(s), the response for each transcript should be summarized separately and Ensure all transcripts for the specified client are retrieved and format **must** follow as First Call Summary,Second Call Summary etc.
+ Your answer must **not** include any client identifiers or ids or numbers or ClientId in the final response.'''
completion = client.chat.completions.create(
model = deployment,
@@ -182,7 +195,6 @@ def get_answers_from_calltranscripts(
"parameters": {
"endpoint": search_endpoint,
"index_name": index_name,
- "semantic_configuration": "default",
"query_type": "vector_simple_hybrid", #"vector_semantic_hybrid"
"fields_mapping": {
"content_fields_separator": "\n",
@@ -259,14 +271,20 @@ async def stream_openai_text(req: Request) -> StreamingResponse:
settings.max_tokens = 800
settings.temperature = 0
+ # Read the HTML file
+ with open("table.html", "r") as file:
+ html_content = file.read()
+
system_message = '''you are a helpful assistant to a wealth advisor.
Do not answer any questions not related to wealth advisors queries.
- If the client name and client id do not match, only return - Please only ask questions about the selected client or select another client to inquire about their details. do not return any other information.
- Only use the client name returned from database in the response.
+ **If the client name in the question does not match the selected client's name**, always return: "Please ask questions only about the selected client." Do not provide any other information.
+ Always consider to give selected client full name only in response and do not use other example names also consider my client means currently selected client.
If you cannot answer the question, always return - I cannot answer this question from the data available. Please rephrase or add more details.
** Remove any client identifiers or ids or numbers or ClientId in the final response.
+ Client name **must be** same as retrieved from database.
'''
-
+ system_message += html_content
+
user_query = query.replace('?',' ')
user_query_prompt = f'''{user_query}. Always send clientId as {user_query.split(':::')[-1]} '''
@@ -280,4 +298,4 @@ async def stream_openai_text(req: Request) -> StreamingResponse:
settings=settings
)
- return StreamingResponse(stream_processor(sk_response), media_type="text/event-stream")
\ No newline at end of file
+ return StreamingResponse(stream_processor(sk_response), media_type="text/event-stream")
diff --git a/ClientAdvisor/AzureFunction/table.html b/ClientAdvisor/AzureFunction/table.html
new file mode 100644
index 000000000..51ded0bea
--- /dev/null
+++ b/ClientAdvisor/AzureFunction/table.html
@@ -0,0 +1,11 @@
+
+