From 923c6a922234f67c0669d0337a25e9999c4455cd Mon Sep 17 00:00:00 2001 From: Rohini-Microsoft Date: Thu, 12 Jun 2025 10:26:14 +0530 Subject: [PATCH 1/6] Implemented Log execution time per prompt in Report --- tests/e2e-test/requirements.txt | 6 +- tests/e2e-test/tests/conftest.py | 93 +++++--- .../tests/test_poc_byoc_client_advisor.py | 211 +++++++----------- 3 files changed, 149 insertions(+), 161 deletions(-) diff --git a/tests/e2e-test/requirements.txt b/tests/e2e-test/requirements.txt index 0d70ceecc..596cdaeeb 100644 --- a/tests/e2e-test/requirements.txt +++ b/tests/e2e-test/requirements.txt @@ -1,3 +1,7 @@ pytest-playwright pytest-html -python-dotenv \ No newline at end of file +python-dotenv +pytest-check +pytest-reporter-html1 +py +beautifulsoup4 \ No newline at end of file diff --git a/tests/e2e-test/tests/conftest.py b/tests/e2e-test/tests/conftest.py index 79d6f6387..328a1929f 100644 --- a/tests/e2e-test/tests/conftest.py +++ b/tests/e2e-test/tests/conftest.py @@ -1,59 +1,98 @@ -from pathlib import Path +from bs4 import BeautifulSoup import pytest from playwright.sync_api import sync_playwright from config.constants import * -from slugify import slugify -from pages.homePage import HomePage -from pages.loginPage import LoginPage -from dotenv import load_dotenv +import logging +import atexit import os +import io - +# Playwright session-scoped login/logout fixture @pytest.fixture(scope="session") def login_logout(): - # perform login and browser close once in a session with sync_playwright() as p: browser = p.chromium.launch(headless=False) context = browser.new_context() context.set_default_timeout(80000) page = context.new_page() - # Navigate to the login URL page.goto(URL) - # Wait for the login form to appear page.wait_for_load_state('networkidle') page.wait_for_timeout(5000) - # # login to web url with username and password + # Optional login steps # login_page = LoginPage(page) # load_dotenv() # login_page.authenticate(os.getenv('user_name'), os.getenv('pass_word')) yield page - # perform close the browser browser.close() - +# Change HTML report title @pytest.hookimpl(tryfirst=True) def pytest_html_report_title(report): report.title = "Automation_BYOc_ClientAdvisor" +log_streams = {} + +# Capture logs per test +@pytest.hookimpl(tryfirst=True) +def pytest_runtest_setup(item): + stream = io.StringIO() + handler = logging.StreamHandler(stream) + handler.setLevel(logging.INFO) + logger = logging.getLogger() + logger.addHandler(handler) + log_streams[item.nodeid] = (handler, stream) +# Add captured logs to report @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): - pytest_html = item.config.pluginmanager.getplugin("html") outcome = yield - screen_file="" report = outcome.get_result() - extra = getattr(report, "extra", []) - if report.when == "call": - if report.failed and "page" in item.funcargs: - page = item.funcargs["page"] - screenshot_dir = Path("screenshots") - screenshot_dir.mkdir(exist_ok=True) - screen_file = str(screenshot_dir / f"{slugify(item.nodeid)}.png") - page.screenshot(path=screen_file) - xfail = hasattr(report, "wasxfail") - if (report.skipped and xfail) or (report.failed and not xfail): - # add the screenshots to the html report - extra.append(pytest_html.extras.png(screen_file)) - report.extras = extra + + handler, stream = log_streams.get(item.nodeid, (None, None)) + if handler and stream: + handler.flush() + log_output = stream.getvalue() + logger = logging.getLogger() + logger.removeHandler(handler) + report.description = f"
{log_output.strip()}
" + log_streams.pop(item.nodeid, None) + else: + report.description = "" + +# Optional: simplify test display names if using `prompt` +def pytest_collection_modifyitems(items): + for item in items: + # Retain only the readable part after the last `[` and before the closing `]` + if "[" in item.nodeid and "]" in item.nodeid: + pretty_name = item.nodeid.split("[", 1)[1].rsplit("]", 1)[0] + item._nodeid = pretty_name + else: + # Use function name as fallback + item._nodeid = item.name + + +# Rename 'Duration' column in HTML report +def rename_duration_column(): + report_path = os.path.abspath("report.html") + if not os.path.exists(report_path): + print("Report file not found, skipping column rename.") + return + + with open(report_path, 'r', encoding='utf-8') as f: + soup = BeautifulSoup(f, 'html.parser') + + headers = soup.select('table#results-table thead th') + for th in headers: + if th.text.strip() == 'Duration': + th.string = 'Execution Time' + break + else: + print("'Duration' column not found in report.") + + with open(report_path, 'w', encoding='utf-8') as f: + f.write(str(soup)) + +# Run after tests complete +atexit.register(rename_duration_column) diff --git a/tests/e2e-test/tests/test_poc_byoc_client_advisor.py b/tests/e2e-test/tests/test_poc_byoc_client_advisor.py index 18aa7cd08..e1c99612c 100644 --- a/tests/e2e-test/tests/test_poc_byoc_client_advisor.py +++ b/tests/e2e-test/tests/test_poc_byoc_client_advisor.py @@ -1,141 +1,86 @@ +import logging +import time +import pytest from config.constants import * from pages.homePage import HomePage +logger = logging.getLogger(__name__) -# def test_chatbot_responds_with_upcoming_meeting_schedule_date(login_logout): -# page = login_logout -# home_page = HomePage(page) -# # validate page title -# assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content() -# # select a client -# home_page.select_a_client(client_name) -# # validate selected client name -# assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content() -# # ask a question -# home_page.enter_a_question(next_meeting_question) -# # click send button -# home_page.click_send_button() -# # Validate response status code -# home_page.validate_response_status() -# # validate the upcoming meeting date-time in both side panel and response -# home_page.validate_next_meeting_date_time() +def validate_home_and_client(home): + assert homepage_title == home.page.locator(home.HOME_PAGE_TITLE).text_content() + home.select_a_client(client_name) + assert client_name == home.page.locator(home.SELECTED_CLIENT_NAME_LABEL).text_content() -def test_save_chat_confirmation_popup(login_logout): - page = login_logout - home_page = HomePage(page) - # validate page title - assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content() - # select a client - home_page.select_a_client(client_name) - # validate selected client name - assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content() - # clear the chat if any - home_page.click_clear_chat_icon() - # ask a question - home_page.enter_a_question(golden_path_question1) - # click send button - home_page.click_send_button() - # Validate response status code - home_page.validate_response_status() - #click on the plus button - home_page.click_on_save_chat_plus_icon() - assert page.locator(home_page.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible() +def save_chat_confirmation_popup(home): + home.click_clear_chat_icon() + home.enter_a_question(golden_path_question1) + home.click_send_button() + home.validate_response_status() + home.click_on_save_chat_plus_icon() + assert home.page.locator(home.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible() -def test_delete_chat_history_during_response(login_logout): - page = login_logout - home_page = HomePage(page) - # validate page title - assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content() - # select a client - home_page.select_a_client(client_name) - # validate selected client name - assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content() - # ask a question - home_page.enter_a_question(golden_path_question1) - # click send button - home_page.click_send_button() - #click on the plus button - home_page.click_on_save_chat_plus_icon() - assert page.locator(home_page.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible() - #click on show chat history button - home_page.click_on_show_chat_history_button() - #click on saved chat history - home_page.click_on_saved_chat() - #ask the question - home_page.enter_a_question(golden_path_question1) - #click on click_send_button_for_chat_history_response - home_page.click_send_button_for_chat_history_response() - # validate the delete icon disabled - assert page.locator(home_page.SHOW_CHAT_HISTORY_DELETE_ICON).is_disabled() - # click on hide chat history button - home_page.click_hide_chat_history_button() - # clear the chat - home_page.click_clear_chat_icon() - -def test_golden_path_demo_script(login_logout): +def delete_chat_history_during_response(home): + home.enter_a_question(golden_path_question1) + home.click_send_button() + home.click_on_save_chat_plus_icon() + assert home.page.locator(home.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible() + home.click_on_show_chat_history_button() + home.click_on_saved_chat() + home.enter_a_question(golden_path_question1) + home.click_send_button_for_chat_history_response() + assert home.page.locator(home.SHOW_CHAT_HISTORY_DELETE_ICON).is_disabled() + home.click_hide_chat_history_button() + home.click_clear_chat_icon() + +def golden_path_full_demo(home): + _validate_golden_path_response(home, golden_path_question1) + _validate_golden_path_response(home, golden_path_question2) + _validate_golden_path_response(home, golden_path_question3) + _validate_golden_path_response(home, golden_path_question4) + _validate_golden_path_response(home, golden_path_question5) + _validate_client_info_absence(home, golden_path_question7) + +# Define test steps and actions +test_cases = [ + ("Validate homepage and select client", validate_home_and_client), + ("Save chat confirmation popup", save_chat_confirmation_popup), + ("Delete chat history during response", delete_chat_history_during_response), + ("Golden path full demo", golden_path_full_demo), +] + +# Create readable test IDs +test_ids = [f"{i+1:02d}. {desc}" for i, (desc, _) in enumerate(test_cases)] + +def _validate_golden_path_response(home, question): + home.enter_a_question(question) + home.click_send_button() + home.validate_response_status() + response_text = home.page.locator(home.ANSWER_TEXT) + assert response_text.nth(response_text.count() - 1).text_content() != invalid_response, \ + f"Incorrect response for question: {question}" + +def _validate_client_info_absence(home, question): + home.enter_a_question(question) + home.click_send_button() + home.validate_response_status() + response_text = home.page.locator(home.ANSWER_TEXT).nth(home.page.locator(home.ANSWER_TEXT).count() - 1).text_content().lower() + assert "arun sharma" not in response_text, "Other client information appeared in response." + assert client_name.lower() not in response_text, f"Client name '{client_name}' should not be in response for question: {question}" + +@pytest.mark.parametrize("desc, action", test_cases, ids=test_ids) +def test_home_page_cases(login_logout, desc, action, request): + """ + Parametrized test for home page scenarios including chat flows and validations. + """ page = login_logout home_page = HomePage(page) - # validate page title - assert homepage_title == page.locator(home_page.HOME_PAGE_TITLE).text_content() - # select a client - home_page.select_a_client(client_name) - # validate selected client name - assert client_name == page.locator(home_page.SELECTED_CLIENT_NAME_LABEL).text_content() - # ask a question - home_page.enter_a_question(golden_path_question1) - # click send button - home_page.click_send_button() - # Validate response status code - home_page.validate_response_status() - response_text = page.locator(home_page.ANSWER_TEXT) - # validate the response - assert response_text.nth(response_text.count()-1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question1 - # ask a question - home_page.enter_a_question(golden_path_question2) - # click send button - home_page.click_send_button() - # Validate response status code - home_page.validate_response_status() - # validate the response - assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question2 - # ask a question - home_page.enter_a_question(golden_path_question3) - # click send button - home_page.click_send_button() - # Validate response status code - home_page.validate_response_status() - # validate the response - assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question3 - # ask a question - home_page.enter_a_question(golden_path_question4) - # click send button - home_page.click_send_button() - # Validate response status code - home_page.validate_response_status() - # validate the response - assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question4 - # ask a question - home_page.enter_a_question(golden_path_question5) - # click send button - home_page.click_send_button() - # Validate response status code - home_page.validate_response_status() - # validate the response - assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question5 - # # ask a question - # home_page.enter_a_question(golden_path_question6) - # # click send button - # home_page.click_send_button() - # # Validate response status code - # home_page.validate_response_status() - # # validate the response - # assert response_text.nth(response_text.count() - 1).text_content() != invalid_response,"Incorrect response for question: "+golden_path_question6 - # ask a question - home_page.enter_a_question(golden_path_question7) - # click send button - home_page.click_send_button() - # Validate response status code - home_page.validate_response_status() - # validate the response - assert (response_text.nth(response_text.count() - 1).text_content().lower()).find("arun sharma") == -1,"Other client information in response for client: "+client_name - assert (response_text.nth(response_text.count() - 1).text_content().lower()).find(client_name) == -1,"Response is generated for selected client "+client_name+" even client name is different in question: "+golden_path_question7 \ No newline at end of file + home_page.page = page # Required for locator access in helper functions + logger.info(f"Running step: {desc}") + + start = time.time() + action(home_page) + end = time.time() + + duration = end - start + logger.info(f"Execution Time for '{desc}': {duration:.2f}s") + request.node._report_sections.append(("call", "log", f"Execution time: {duration:.2f}s")) From 4bb533ec1d0d35dfa76313193740e6acb7542995 Mon Sep 17 00:00:00 2001 From: Rohini-Microsoft Date: Fri, 13 Jun 2025 15:53:06 +0530 Subject: [PATCH 2/6] updated readme --- tests/e2e-test/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e-test/README.md b/tests/e2e-test/README.md index 453eb273a..a899aa783 100644 --- a/tests/e2e-test/README.md +++ b/tests/e2e-test/README.md @@ -23,7 +23,7 @@ Installing Playwright Pytest from Virtual Environment - Install the required browsers "playwright install" Run test cases -- To run test cases from your 'tests' folder : "pytest --headed --html=report/report.html" +- To run test cases from your 'tests/e2e-test' folder : "pytest --headed --html=report/report.html" Steps need to be followed to enable Access Token and Client Credentials - Go to App Service from the resource group and select the Access Tokens check box in 'Manage->Authentication' tab From 2b73b28eda3c89be1d8e6d838bf4c55a12b7054e Mon Sep 17 00:00:00 2001 From: Rohini-Microsoft Date: Wed, 18 Jun 2025 18:10:29 +0530 Subject: [PATCH 3/6] updated the code --- tests/e2e-test/pages/homePage.py | 57 +++++--- .../tests/test_poc_byoc_client_advisor.py | 129 +++++++++++------- 2 files changed, 114 insertions(+), 72 deletions(-) diff --git a/tests/e2e-test/pages/homePage.py b/tests/e2e-test/pages/homePage.py index 7301a8eda..f563326ed 100644 --- a/tests/e2e-test/pages/homePage.py +++ b/tests/e2e-test/pages/homePage.py @@ -20,6 +20,11 @@ class HomePage(BasePage): HIDE_CHAT_HISTORY_BUTTON = "//span[text()='Hide chat history']" USER_CHAT_MESSAGE = "(//div[contains(@class,'chatMessageUserMessage')])[1]" STOP_GENERATING_LABEL = "//span[text()='Stop generating']" + CHAT_HISTORY_NAME = "//div[contains(@class, 'ChatHistoryListItemCell_chatTitle')]" + CLEAR_CHAT_HISTORY_MENU = "//button[@id='moreButton']" + CLEAR_CHAT_HISTORY = "//button[@role='menuitem']" + REFERENCE_LINKS_IN_RESPONSE = "//span[@role='button' and contains(@class, 'citationContainer')]" + CLOSE_BUTTON = "svg[role='button'][tabindex='0']" def __init__(self, page): self.page = page @@ -37,6 +42,31 @@ def enter_a_question(self, text): self.page.locator(self.TYPE_QUESTION_TEXT_AREA).fill(text) self.page.wait_for_timeout(2000) + def delete_chat_history(self): + self.page.locator(self.SHOW_CHAT_HISTORY_BUTTON).click() + chat_history = self.page.locator("//span[contains(text(),'No chat history.')]") + if chat_history.is_visible(): + self.page.wait_for_load_state('networkidle') + self.page.wait_for_timeout(2000) + self.page.locator(self.HIDE_CHAT_HISTORY_BUTTON).click() + + + else: + self.page.locator(self.CLEAR_CHAT_HISTORY_MENU).click() + self.page.locator(self.CLEAR_CHAT_HISTORY).click() + self.page.get_by_role("button", name="Clear All").click() + self.page.wait_for_timeout(10000) + self.page.locator(self.HIDE_CHAT_HISTORY_BUTTON).click() + self.page.wait_for_load_state('networkidle') + self.page.wait_for_timeout(2000) + + def close_chat_history(self): + self.page.locator(self.HIDE_CHAT_HISTORY_BUTTON).click() + self.page.wait_for_load_state('networkidle') + self.page.wait_for_timeout(2000) + + + def click_send_button(self): # Click on send button in question area self.page.locator(self.SEND_BUTTON).click() @@ -54,30 +84,17 @@ def validate_next_meeting_date_time(self): response_raw_datetime = self.page.locator(self.ANSWER_TEXT).text_content() BasePage.compare_raw_date_time(self,response_raw_datetime,sidepanel_raw_datetime) - def click_on_save_chat_plus_icon(self): - self.page.wait_for_selector(self.SAVE_CHATHISTORY_PLUS_ICON) - self.page.locator(self.SAVE_CHATHISTORY_PLUS_ICON).click() - self.page.wait_for_timeout(1000) def click_on_show_chat_history_button(self): self.page.wait_for_selector(self.SHOW_CHAT_HISTORY_BUTTON) self.page.locator(self.SHOW_CHAT_HISTORY_BUTTON).click() - self.page.wait_for_timeout(1000) - - def click_send_button_for_chat_history_response(self): - # Click on send button in question area - self.page.locator(self.SEND_BUTTON).click() - def click_on_saved_chat(self): - #click on saved chat in the show chat history section - self.page.wait_for_selector(self.SAVED_CHAT_LABEL) - self.page.locator(self.SAVED_CHAT_LABEL).click() - def click_clear_chat_icon(self): - # Click on clear chat icon in question area - if self.page.locator(self.USER_CHAT_MESSAGE).is_visible(): - self.page.locator(self.CLEAR_CHAT_ICON).click() + def has_reference_link(self): + # Get all assistant messages + assistant_messages = self.page.locator("div.chat-message.assistant") + last_assistant = assistant_messages.nth(assistant_messages.count() - 1) - def click_hide_chat_history_button(self): - # Click on hide chat history button in question area - self.page.locator(self.HIDE_CHAT_HISTORY_BUTTON).click() \ No newline at end of file + # Use XPath properly by prefixing with 'xpath=' + reference_links = last_assistant.locator("xpath=.//span[@role='button' and contains(@class, 'citationContainer')]") + return reference_links.count() > 0 \ No newline at end of file diff --git a/tests/e2e-test/tests/test_poc_byoc_client_advisor.py b/tests/e2e-test/tests/test_poc_byoc_client_advisor.py index e1c99612c..bc21866e8 100644 --- a/tests/e2e-test/tests/test_poc_byoc_client_advisor.py +++ b/tests/e2e-test/tests/test_poc_byoc_client_advisor.py @@ -3,84 +3,109 @@ import pytest from config.constants import * from pages.homePage import HomePage +import io logger = logging.getLogger(__name__) +# ----------------- Part A: Functional Tests ----------------- + def validate_home_and_client(home): assert homepage_title == home.page.locator(home.HOME_PAGE_TITLE).text_content() home.select_a_client(client_name) assert client_name == home.page.locator(home.SELECTED_CLIENT_NAME_LABEL).text_content() -def save_chat_confirmation_popup(home): - home.click_clear_chat_icon() - home.enter_a_question(golden_path_question1) - home.click_send_button() - home.validate_response_status() - home.click_on_save_chat_plus_icon() - assert home.page.locator(home.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible() - def delete_chat_history_during_response(home): - home.enter_a_question(golden_path_question1) - home.click_send_button() - home.click_on_save_chat_plus_icon() - assert home.page.locator(home.SAVE_CHAT_CONFIRMATION_POPUPTEXT).is_visible() - home.click_on_show_chat_history_button() - home.click_on_saved_chat() - home.enter_a_question(golden_path_question1) - home.click_send_button_for_chat_history_response() - assert home.page.locator(home.SHOW_CHAT_HISTORY_DELETE_ICON).is_disabled() - home.click_hide_chat_history_button() - home.click_clear_chat_icon() - -def golden_path_full_demo(home): - _validate_golden_path_response(home, golden_path_question1) - _validate_golden_path_response(home, golden_path_question2) - _validate_golden_path_response(home, golden_path_question3) - _validate_golden_path_response(home, golden_path_question4) - _validate_golden_path_response(home, golden_path_question5) + home.delete_chat_history() + # home.close_chat_history() + + +def validate_client_absence(home): _validate_client_info_absence(home, golden_path_question7) -# Define test steps and actions -test_cases = [ - ("Validate homepage and select client", validate_home_and_client), - ("Save chat confirmation popup", save_chat_confirmation_popup), - ("Delete chat history during response", delete_chat_history_during_response), - ("Golden path full demo", golden_path_full_demo), +functional_test_cases = [ + ("Validate homepage is loaded and select client", validate_home_and_client), + ("Validate delete chat history", delete_chat_history_during_response), +] + +@pytest.mark.parametrize("desc, action", functional_test_cases, ids=[x[0] for x in functional_test_cases]) +def test_functional_flows(login_logout, desc, action, request): + page = login_logout + home_page = HomePage(page) + home_page.page = page + + log_capture = io.StringIO() + handler = logging.StreamHandler(log_capture) + logger.addHandler(handler) + + logger.info(f"Running step: {desc}") + start = time.time() + try: + action(home_page) + finally: + duration = time.time() - start + logger.info(f"Execution Time for '{desc}': {duration:.2f}s") + logger.removeHandler(handler) + request.node._report_sections.append(("call", "log", log_capture.getvalue())) + +# ----------------- Part B: GP Question Tests ----------------- + +# GP Questions List +gp_questions = [ + golden_path_question1, + golden_path_question2, + golden_path_question3, + golden_path_question4, + golden_path_question5 ] -# Create readable test IDs -test_ids = [f"{i+1:02d}. {desc}" for i, (desc, _) in enumerate(test_cases)] +# Custom readable test IDs +gp_test_ids = [f"Validate response for prompt: {q[:60]}... " for i, q in enumerate(gp_questions)] def _validate_golden_path_response(home, question): home.enter_a_question(question) home.click_send_button() home.validate_response_status() response_text = home.page.locator(home.ANSWER_TEXT) - assert response_text.nth(response_text.count() - 1).text_content() != invalid_response, \ - f"Incorrect response for question: {question}" + last_response = response_text.nth(response_text.count() - 1).text_content() + assert last_response != invalid_response, f"Incorrect response for: {question}" + assert last_response != "Chart cannot be generated.", f"Chart error for: {question}" + + if home.has_reference_link(): + logger.info("Citation link found. Opening citation.") + home.click_reference_link_in_response() + logger.info("Closing citation.") + home.close_citation() + + home.click_on_show_chat_history_button() + home.close_chat_history() + def _validate_client_info_absence(home, question): home.enter_a_question(question) home.click_send_button() home.validate_response_status() - response_text = home.page.locator(home.ANSWER_TEXT).nth(home.page.locator(home.ANSWER_TEXT).count() - 1).text_content().lower() + response_text = home.page.locator(home.ANSWER_TEXT).nth( + home.page.locator(home.ANSWER_TEXT).count() - 1 + ).text_content().lower() assert "arun sharma" not in response_text, "Other client information appeared in response." - assert client_name.lower() not in response_text, f"Client name '{client_name}' should not be in response for question: {question}" + assert client_name.lower() not in response_text, f"Client name '{client_name}' appeared in response." -@pytest.mark.parametrize("desc, action", test_cases, ids=test_ids) -def test_home_page_cases(login_logout, desc, action, request): - """ - Parametrized test for home page scenarios including chat flows and validations. - """ +@pytest.mark.parametrize("question", gp_questions, ids=gp_test_ids) +def test_gp_questions_individual(login_logout, question, request): page = login_logout - home_page = HomePage(page) - home_page.page = page # Required for locator access in helper functions - logger.info(f"Running step: {desc}") + home = HomePage(page) + home.page = page - start = time.time() - action(home_page) - end = time.time() + log_capture = io.StringIO() + handler = logging.StreamHandler(log_capture) + logger.addHandler(handler) - duration = end - start - logger.info(f"Execution Time for '{desc}': {duration:.2f}s") - request.node._report_sections.append(("call", "log", f"Execution time: {duration:.2f}s")) + logger.info(f"Running Golden Path test for: {question}") + start = time.time() + try: + _validate_golden_path_response(home, question) + finally: + duration = time.time() - start + logger.info(f"Execution Time for GP Question: {duration:.2f}s") + logger.removeHandler(handler) + request.node._report_sections.append(("call", "log", log_capture.getvalue())) From abc5f09adbcdd217929b6978e0069e5427b89489 Mon Sep 17 00:00:00 2001 From: Rohini-Microsoft Date: Wed, 18 Jun 2025 23:56:50 +0530 Subject: [PATCH 4/6] added functions --- tests/e2e-test/pages/homePage.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/e2e-test/pages/homePage.py b/tests/e2e-test/pages/homePage.py index f563326ed..2b354f01e 100644 --- a/tests/e2e-test/pages/homePage.py +++ b/tests/e2e-test/pages/homePage.py @@ -20,6 +20,8 @@ class HomePage(BasePage): HIDE_CHAT_HISTORY_BUTTON = "//span[text()='Hide chat history']" USER_CHAT_MESSAGE = "(//div[contains(@class,'chatMessageUserMessage')])[1]" STOP_GENERATING_LABEL = "//span[text()='Stop generating']" + # # SHOW_CHAT_HISTORY_BUTTON = "//button[normalize-space()='Show Chat History']" + # HIDE_CHAT_HISTORY_BUTTON = "//button[.//span[text()='Hide chat history']]" CHAT_HISTORY_NAME = "//div[contains(@class, 'ChatHistoryListItemCell_chatTitle')]" CLEAR_CHAT_HISTORY_MENU = "//button[@id='moreButton']" CLEAR_CHAT_HISTORY = "//button[@role='menuitem']" @@ -88,7 +90,21 @@ def validate_next_meeting_date_time(self): def click_on_show_chat_history_button(self): self.page.wait_for_selector(self.SHOW_CHAT_HISTORY_BUTTON) self.page.locator(self.SHOW_CHAT_HISTORY_BUTTON).click() + self.page.wait_for_timeout(1000) + def click_send_button_for_chat_history_response(self): + # Click on send button in question area + self.page.locator(self.SEND_BUTTON).click() + + + def click_clear_chat_icon(self): + # Click on clear chat icon in question area + if self.page.locator(self.USER_CHAT_MESSAGE).is_visible(): + self.page.locator(self.CLEAR_CHAT_ICON).click() + + def click_hide_chat_history_button(self): + # Click on hide chat history button in question area + self.page.locator(self.HIDE_CHAT_HISTORY_BUTTON).click() def has_reference_link(self): # Get all assistant messages From 779694c2a36f725397b8b1c69dfc7812361e9391 Mon Sep 17 00:00:00 2001 From: Harsh-Microsoft Date: Thu, 19 Jun 2025 11:13:40 +0530 Subject: [PATCH 5/6] feat: added fdp changes, updated to use ai agents instead of openai assistants (#566) * initial bicep changes for fdp * update role assignments in bicep * feat: initial fdp changes for client advisor * updated post deployment scripts to use keyless authentication * rebuilt main.json * fix configuration handling and error checking in backend services * updated unit tests * Refactor code for improved readability and maintainability by organizing imports and formatting code blocks consistently across multiple files. --- docs/DeploymentGuide.md | 4 +- infra/abbreviations.json | 2 + infra/deploy_ai_foundry.bicep | 384 +++------ infra/deploy_app_service.bicep | 136 ++- infra/deploy_cosmos_db.bicep | 45 - infra/deploy_sql_db.bicep | 24 - infra/deploy_storage_account.bicep | 9 - infra/main.bicep | 33 +- infra/main.bicepparam | 2 +- infra/main.json | 744 +++++----------- .../index_scripts/create_search_index.py | 265 +++--- .../index_scripts/create_sql_tables.py | 256 ++++-- .../index_scripts/create_update_sql_dates.py | 54 +- infra/scripts/process_sample_data.sh | 15 +- infra/scripts/run_create_index_scripts.sh | 48 ++ src/App/.env.sample | 9 +- src/App/app.py | 813 +++++++----------- src/App/backend/agents/agent_factory.py | 63 ++ src/App/backend/chat_logic_handler.py | 381 -------- src/App/backend/common/config.py | 154 ++++ src/App/backend/{ => common}/event_utils.py | 6 +- src/App/backend/{ => common}/utils.py | 9 +- .../backend/plugins/chat_with_data_plugin.py | 258 ++++++ src/App/backend/services/chat_service.py | 64 ++ .../cosmosdb_service.py} | 0 src/App/backend/services/sqldb_service.py | 244 ++++++ src/App/db.py | 60 -- src/App/requirements.txt | 15 +- .../backend/agents/test_agent_factory.py | 106 +++ src/App/tests/backend/auth/test_auth.py | 3 +- .../tests/backend/common/test_event_utils.py | 81 ++ .../tests/backend/{ => common}/test_utils.py | 22 +- .../plugins/test_chat_with_data_plugin.py | 276 ++++++ .../backend/services/test_chat_service.py | 196 +++++ .../test_cosmosdb_service.py | 2 +- .../backend/services/test_sqldb_service.py | 443 ++++++++++ src/App/tests/test_app.py | 166 ++-- src/App/tests/test_db.py | 92 -- 38 files changed, 3120 insertions(+), 2364 deletions(-) create mode 100644 src/App/backend/agents/agent_factory.py delete mode 100644 src/App/backend/chat_logic_handler.py create mode 100644 src/App/backend/common/config.py rename src/App/backend/{ => common}/event_utils.py (89%) rename src/App/backend/{ => common}/utils.py (97%) create mode 100644 src/App/backend/plugins/chat_with_data_plugin.py create mode 100644 src/App/backend/services/chat_service.py rename src/App/backend/{history/cosmosdbservice.py => services/cosmosdb_service.py} (100%) create mode 100644 src/App/backend/services/sqldb_service.py delete mode 100644 src/App/db.py create mode 100644 src/App/tests/backend/agents/test_agent_factory.py create mode 100644 src/App/tests/backend/common/test_event_utils.py rename src/App/tests/backend/{ => common}/test_utils.py (90%) create mode 100644 src/App/tests/backend/plugins/test_chat_with_data_plugin.py create mode 100644 src/App/tests/backend/services/test_chat_service.py rename src/App/tests/backend/{history => services}/test_cosmosdb_service.py (98%) create mode 100644 src/App/tests/backend/services/test_sqldb_service.py delete mode 100644 src/App/tests/test_db.py diff --git a/docs/DeploymentGuide.md b/docs/DeploymentGuide.md index 9fe502c90..823452049 100644 --- a/docs/DeploymentGuide.md +++ b/docs/DeploymentGuide.md @@ -116,7 +116,7 @@ When you start the deployment, most parameters will have **default values**, but | **Embedding Model** | OpenAI embedding model used for vector similarity. | `text-embedding-ada-002` | | **Embedding Model Capacity** | Set the capacity for **embedding models**. Choose based on usage and quota. | `80` | | **Image Tag** | The version of the Docker image to use (e.g., `latest`, `dev`, `hotfix`). | `latest` | -| **Azure OpenAI API Version** | Set the API version for OpenAI model deployments. | `2025-01-01-preview` | +| **Azure OpenAI API Version** | Set the API version for OpenAI model deployments. | `2025-04-01-preview` | | **AZURE\_LOCATION** | Sets the Azure region for resource deployment. | `japaneast` | | **Existing Log Analytics Workspace** | To reuse an existing Log Analytics Workspace ID instead of creating a new one. | *(empty)* | @@ -211,7 +211,7 @@ This will rebuild the source code, package it into a container, and push it to t ``` if you don't have azd env then you need to pass parameters along with the command. Then the command will look like the following: ```shell - bash ./infra/scripts/process_sample_data.sh + bash ./infra/scripts/process_sample_data.sh ``` 2. **Add Authentication Provider** diff --git a/infra/abbreviations.json b/infra/abbreviations.json index d28fd8252..6859d0acf 100644 --- a/infra/abbreviations.json +++ b/infra/abbreviations.json @@ -1,6 +1,8 @@ { "ai": { "aiSearch": "srch-", + "aiFoundry": "aif-", + "aiFoundryProject": "aifp-", "aiServices": "aisa-", "aiVideoIndexer": "avi-", "machineLearningWorkspace": "mlw-", diff --git a/infra/deploy_ai_foundry.bicep b/infra/deploy_ai_foundry.bicep index 4ba89548e..43a713c71 100644 --- a/infra/deploy_ai_foundry.bicep +++ b/infra/deploy_ai_foundry.bicep @@ -8,24 +8,18 @@ param azureOpenaiAPIVersion string param gptDeploymentCapacity int param embeddingModel string param embeddingDeploymentCapacity int -param managedIdentityObjectId string param existingLogAnalyticsWorkspaceId string = '' // Load the abbrevations file required to name the azure resources. var abbrs = loadJsonContent('./abbreviations.json') -var storageName = '${abbrs.storage.storageAccount}${solutionName}hub' -var storageSkuName = 'Standard_LRS' -var aiServicesName = '${abbrs.ai.aiServices}${solutionName}' +var aiFoundryName = '${abbrs.ai.aiFoundry}${solutionName}' var applicationInsightsName = '${abbrs.managementGovernance.applicationInsights}${solutionName}' -var containerRegistryName = '${abbrs.containers.containerRegistry}${solutionName}' var keyvaultName = keyVaultName var location = solutionLocation //'eastus2' -var aiHubName = '${abbrs.ai.aiHub}${solutionName}-hub' -var aiHubFriendlyName = aiHubName -var aiHubDescription = 'AI Hub' -var aiProjectName = '${abbrs.ai.aiHubProject}${solutionName}' +var aiProjectName = '${abbrs.ai.aiFoundryProject}${solutionName}' var aiProjectFriendlyName = aiProjectName +var aiProjectDescription = 'AI Foundry Project' var aiSearchName = '${abbrs.ai.aiSearch}${solutionName}' var workspaceName = '${abbrs.managementGovernance.logAnalyticsWorkspace}${solutionName}' var aiModelDeployments = [ @@ -49,8 +43,6 @@ var aiModelDeployments = [ } ] -var containerRegistryNameCleaned = replace(containerRegistryName, '-', '') - resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = { name: keyVaultName } @@ -108,75 +100,46 @@ resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { } } -resource containerRegistry 'Microsoft.ContainerRegistry/registries@2021-09-01' = { - name: containerRegistryNameCleaned +resource aiFoundry 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' = { + name: aiFoundryName location: location sku: { - name: 'Premium' + name: 'S0' + } + kind: 'AIServices' + identity: { + type: 'SystemAssigned' } properties: { - adminUserEnabled: true - dataEndpointEnabled: false - networkRuleBypassOptions: 'AzureServices' - networkRuleSet: { - defaultAction: 'Deny' - } - policies: { - quarantinePolicy: { - status: 'enabled' - } - retentionPolicy: { - status: 'enabled' - days: 7 - } - trustPolicy: { - status: 'disabled' - type: 'Notary' - } + allowProjectManagement: true + customSubDomainName: aiFoundryName + networkAcls: { + defaultAction: 'Allow' + virtualNetworkRules: [] + ipRules: [] } - publicNetworkAccess: 'Disabled' - zoneRedundancy: 'Disabled' + publicNetworkAccess: 'Enabled' + disableLocalAuth: false } } -var storageNameCleaned = replace(storageName, '-', '') - -resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' = { - name: aiServicesName +resource aiFoundryProject 'Microsoft.CognitiveServices/accounts/projects@2025-04-01-preview' = { + parent: aiFoundry + name: aiProjectName location: location - sku: { - name: 'S0' + identity: { + type: 'SystemAssigned' } - kind: 'AIServices' properties: { - customSubDomainName: aiServicesName - // apiProperties: { - // statisticsEnabled: false - // } - publicNetworkAccess: 'Enabled' + description: aiProjectDescription + displayName: aiProjectFriendlyName } } -// resource aiServices 'Microsoft.CognitiveServices/accounts@2021-10-01' = { -// name: aiServicesName -// location: location -// sku: { -// name: 'S0' -// } -// kind: 'AIServices' -// properties: { -// customSubDomainName: aiServicesName -// // apiProperties: { -// // statisticsEnabled: false -// // } -// publicNetworkAccess: 'Enabled' -// } -// } - @batchSize(1) -resource aiServicesDeployments 'Microsoft.CognitiveServices/accounts/deployments@2023-05-01' = [ +resource aiFModelDeployments 'Microsoft.CognitiveServices/accounts/deployments@2023-05-01' = [ for aiModeldeployment in aiModelDeployments: { - parent: aiServices //aiServices_m + parent: aiFoundry name: aiModeldeployment.name properties: { model: { @@ -192,12 +155,15 @@ resource aiServicesDeployments 'Microsoft.CognitiveServices/accounts/deployments } ] -resource aiSearch 'Microsoft.Search/searchServices@2023-11-01' = { +resource aiSearch 'Microsoft.Search/searchServices@2025-02-01-preview' = { name: aiSearchName location: solutionLocation sku: { name: 'basic' } + identity: { + type: 'SystemAssigned' + } properties: { replicaCount: 1 partitionCount: 1 @@ -211,177 +177,87 @@ resource aiSearch 'Microsoft.Search/searchServices@2023-11-01' = { } disableLocalAuth: false authOptions: { - apiKeyOnly: {} + aadOrApiKey: { + aadAuthFailureMode: 'http403' + } } semanticSearch: 'free' } } -resource storage 'Microsoft.Storage/storageAccounts@2022-09-01' = { - name: storageNameCleaned - location: location - sku: { - name: storageSkuName - } - kind: 'StorageV2' +resource aiSearchFoundryConnection 'Microsoft.CognitiveServices/accounts/connections@2025-04-01-preview' ={ + name: 'foundry-search-connection' + parent: aiFoundry properties: { - accessTier: 'Hot' - allowBlobPublicAccess: false - allowCrossTenantReplication: false - allowSharedKeyAccess: false - encryption: { - keySource: 'Microsoft.Storage' - requireInfrastructureEncryption: false - services: { - blob: { - enabled: true - keyType: 'Account' - } - file: { - enabled: true - keyType: 'Account' - } - queue: { - enabled: true - keyType: 'Service' - } - table: { - enabled: true - keyType: 'Service' - } - } - } - isHnsEnabled: false - isNfsV3Enabled: false - keyPolicy: { - keyExpirationPeriodInDays: 7 - } - largeFileSharesState: 'Disabled' - minimumTlsVersion: 'TLS1_2' - networkAcls: { - bypass: 'AzureServices' - defaultAction: 'Allow' + category: 'CognitiveSearch' + target: aiSearch.properties.endpoint + authType: 'AAD' + isSharedToAll: true + metadata: { + ApiType: 'Azure' + ResourceId: aiSearch.id + location: aiSearch.location } - supportsHttpsTrafficOnly: true } } -@description('This is the built-in Storage Blob Data Contributor.') -resource blobDataContributor 'Microsoft.Authorization/roleDefinitions@2018-01-01-preview' existing = { - scope: resourceGroup() - name: 'ba92f5b4-2d11-453d-a403-e96b0029c9fe' +@description('This is the built-in Search Index Data Reader role.') +resource searchIndexDataReaderRoleDefinition 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + scope: aiSearch + name: '1407120a-92aa-4202-b7e9-c0e197c71c8f' } -resource storageroleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { - name: guid(resourceGroup().id, managedIdentityObjectId, blobDataContributor.id) +resource searchIndexDataReaderRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(aiSearch.id, aiFoundry.id, searchIndexDataReaderRoleDefinition.id) + scope: aiSearch properties: { - principalId: managedIdentityObjectId - roleDefinitionId: blobDataContributor.id + roleDefinitionId: searchIndexDataReaderRoleDefinition.id + principalId: aiFoundry.identity.principalId principalType: 'ServicePrincipal' } } -resource aiHub 'Microsoft.MachineLearningServices/workspaces@2023-08-01-preview' = { - name: aiHubName - location: location - identity: { - type: 'SystemAssigned' - } - properties: { - // organization - friendlyName: aiHubFriendlyName - description: aiHubDescription - - // dependent resources - keyVault: keyVault.id - storageAccount: storage.id - applicationInsights: applicationInsights.id - containerRegistry: containerRegistry.id - } - kind: 'hub' - - resource aiServicesConnection 'connections@2024-07-01-preview' = { - name: '${aiHubName}-connection-AzureOpenAI' - properties: { - category: 'AIServices' - target: aiServices.properties.endpoint - authType: 'ApiKey' - isSharedToAll: true - credentials: { - key: aiServices.listKeys().key1 - } - metadata: { - ApiType: 'Azure' - ResourceId: aiServices.id - } - } - dependsOn: [ - aiServicesDeployments - aiSearch - ] - } - - resource aiSearchConnection 'connections@2024-07-01-preview' = { - name: '${aiHubName}-connection-AzureAISearch' - properties: { - category: 'CognitiveSearch' - target: 'https://${aiSearch.name}.search.windows.net' - authType: 'ApiKey' - isSharedToAll: true - credentials: { - key: aiSearch.listAdminKeys().primaryKey - } - metadata: { - type: 'azure_ai_search' - ApiType: 'Azure' - ResourceId: aiSearch.id - ApiVersion: '2024-05-01-preview' - DeploymentApiVersion: '2023-11-01' - } - } - } - dependsOn: [ - aiServicesDeployments - aiSearch - ] +@description('This is the built-in Search Service Contributor role.') +resource searchServiceContributorRoleDefinition 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + scope: aiSearch + name: '7ca78c08-252a-4471-8644-bb5ff32d4ba0' } -resource aiHubProject 'Microsoft.MachineLearningServices/workspaces@2024-01-01-preview' = { - name: aiProjectName - location: location - kind: 'Project' - identity: { - type: 'SystemAssigned' - } - properties: { - friendlyName: aiProjectFriendlyName - hubResourceId: aiHub.id - } -} - -resource tenantIdEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'TENANT-ID' +resource searchServiceContributorRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(aiSearch.id, aiFoundry.id, searchServiceContributorRoleDefinition.id) + scope: aiSearch properties: { - value: subscription().tenantId + roleDefinitionId: searchServiceContributorRoleDefinition.id + principalId: aiFoundry.identity.principalId + principalType: 'ServicePrincipal' } } -resource azureOpenAIApiKeyEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-OPENAI-KEY' +resource appInsightsFoundryConnection 'Microsoft.CognitiveServices/accounts/connections@2025-04-01-preview' = { + name: 'foundry-app-insights-connection' + parent: aiFoundry properties: { - value: aiServices.listKeys().key1 //aiServices_m.listKeys().key1 + category: 'AppInsights' + target: applicationInsights.id + authType: 'ApiKey' + isSharedToAll: true + credentials: { + key: applicationInsights.properties.ConnectionString + } + metadata: { + ApiType: 'Azure' + ResourceId: applicationInsights.id + } } } -resource azureOpenAIDeploymentModel 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-OPEN-AI-DEPLOYMENT-MODEL' - properties: { - value: gptModelName - } -} +// resource azureOpenAIApiKeyEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { +// parent: keyVault +// name: 'AZURE-OPENAI-KEY' +// properties: { +// value: aiFoundry.listKeys().key1 //aiServices_m.listKeys().key1 +// } +// } resource azureOpenAIApiVersionEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { parent: keyVault @@ -395,25 +271,25 @@ resource azureOpenAIEndpointEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01- parent: keyVault name: 'AZURE-OPENAI-ENDPOINT' properties: { - value: aiServices.properties.endpoint //aiServices_m.properties.endpoint + value: aiFoundry.properties.endpoints['OpenAI Language Model Instance API'] //aiServices_m.properties.endpoint } } -resource azureAIProjectConnectionStringEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { +resource azureOpenAIEmbeddingModelEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { parent: keyVault - name: 'AZURE-AI-PROJECT-CONN-STRING' + name: 'AZURE-OPENAI-EMBEDDING-MODEL' properties: { - value: '${split(aiHubProject.properties.discoveryUrl, '/')[2]};${subscription().subscriptionId};${resourceGroup().name};${aiHubProject.name}' + value: embeddingModel } } -resource azureSearchAdminKeyEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-SEARCH-KEY' - properties: { - value: aiSearch.listAdminKeys().primaryKey - } -} +// resource azureSearchAdminKeyEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { +// parent: keyVault +// name: 'AZURE-SEARCH-KEY' +// properties: { +// value: aiSearch.listAdminKeys().primaryKey +// } +// } resource azureSearchServiceEndpointEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { parent: keyVault @@ -423,14 +299,6 @@ resource azureSearchServiceEndpointEntry 'Microsoft.KeyVault/vaults/secrets@2021 } } -resource azureSearchServiceEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-SEARCH-SERVICE' - properties: { - value: aiSearch.name - } -} - resource azureSearchIndexEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { parent: keyVault name: 'AZURE-SEARCH-INDEX' @@ -439,72 +307,24 @@ resource azureSearchIndexEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-pre } } -resource cogServiceEndpointEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'COG-SERVICES-ENDPOINT' - properties: { - value: aiServices.properties.endpoint - } -} - -resource cogServiceKeyEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'COG-SERVICES-KEY' - properties: { - value: aiServices.listKeys().key1 - } -} - -resource cogServiceNameEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'COG-SERVICES-NAME' - properties: { - value: aiServicesName - } -} - -resource azureSubscriptionIdEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-SUBSCRIPTION-ID' - properties: { - value: subscription().subscriptionId - } -} - -resource resourceGroupNameEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-RESOURCE-GROUP' - properties: { - value: resourceGroup().name - } -} - -resource azureLocatioEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-LOCATION' - properties: { - value: solutionLocation - } -} - output keyvaultName string = keyvaultName output keyvaultId string = keyVault.id -output aiServicesTarget string = aiServices.properties.endpoint //aiServices_m.properties.endpoint -output aiServicesName string = aiServicesName //aiServicesName_m -output aiServicesId string = aiServices.id //aiServices_m.id +output aiFoundryProjectEndpoint string = aiFoundryProject.properties.endpoints['AI Foundry API'] +output aiServicesTarget string = aiFoundry.properties.endpoint //aiServices_m.properties.endpoint +output aoaiEndpoint string = aiFoundry.properties.endpoints['OpenAI Language Model Instance API'] //aiServices_m.properties.endpoint +output aiFoundryName string = aiFoundryName //aiServicesName_m +output aiFoundryId string = aiFoundry.id //aiServices_m.id output aiSearchName string = aiSearchName output aiSearchId string = aiSearch.id output aiSearchTarget string = 'https://${aiSearch.name}.search.windows.net' output aiSearchService string = aiSearch.name -output aiProjectName string = aiHubProject.name +output aiFoundryProjectName string = aiFoundryProject.name output applicationInsightsId string = applicationInsights.id output logAnalyticsWorkspaceResourceName string = useExisting ? existingLogAnalyticsWorkspace.name : logAnalytics.name output logAnalyticsWorkspaceResourceGroup string = useExisting ? existingLawResourceGroup : resourceGroup().name -output storageAccountName string = storageNameCleaned output applicationInsightsConnectionString string = applicationInsights.properties.ConnectionString - diff --git a/infra/deploy_app_service.bicep b/infra/deploy_app_service.bicep index d06cf2f74..3ad3b0ff2 100644 --- a/infra/deploy_app_service.bicep +++ b/infra/deploy_app_service.bicep @@ -22,10 +22,6 @@ param AzureSearchService string = '' @description('Name of Azure Search Index') param AzureSearchIndex string = '' -@description('Azure Search Admin Key') -@secure() -param AzureSearchKey string = '' - @description('Use semantic search') param AzureSearchUseSemanticSearch string = 'False' @@ -59,10 +55,6 @@ param AzureOpenAIModel string @description('Azure Open AI Endpoint') param AzureOpenAIEndpoint string = '' -@description('Azure OpenAI Key') -@secure() -param AzureOpenAIKey string - @description('Azure OpenAI Temperature') param AzureOpenAITemperature string = '0' @@ -103,10 +95,6 @@ param AzureSearchStrictness string = '3' @description('Azure OpenAI Embedding Deployment Name') param AzureOpenAIEmbeddingName string = '' -@description('Azure Open AI Embedding Key') -@secure() -param AzureOpenAIEmbeddingkey string = '' - @description('Azure Open AI Embedding Endpoint') param AzureOpenAIEmbeddingEndpoint string = '' @@ -119,20 +107,9 @@ param SQLDB_SERVER string = '' @description('SQL Database Name') param SQLDB_DATABASE string = '' -@description('SQL Database Username') -param SQLDB_USERNAME string = '' - -@description('SQL Database Password') -@secure() -param SQLDB_PASSWORD string = '' - @description('Azure Cosmos DB Account') param AZURE_COSMOSDB_ACCOUNT string = '' -// @description('Azure Cosmos DB Account Key') -// @secure() -// param AZURE_COSMOSDB_ACCOUNT_KEY string = '' - @description('Azure Cosmos DB Conversations Container') param AZURE_COSMOSDB_CONVERSATIONS_CONTAINER string = '' @@ -160,10 +137,10 @@ param callTranscriptSystemPrompt string @description('Azure Function App Stream Text System Prompt') param streamTextSystemPrompt string -@secure() -param aiProjectConnectionString string +param aiFoundryProjectEndpoint string param useAIProjectClientFlag string = 'false' -param aiProjectName string +param aiFoundryProjectName string +param aiFoundryName string param applicationInsightsConnectionString string // var WebAppImageName = 'DOCKER|byoaiacontainer.azurecr.io/byoaia-app:latest' @@ -214,10 +191,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = { name: 'AZURE_SEARCH_INDEX' value: AzureSearchIndex } - { - name: 'AZURE_SEARCH_KEY' - value: AzureSearchKey - } { name: 'AZURE_SEARCH_USE_SEMANTIC_SEARCH' value: AzureSearchUseSemanticSearch @@ -262,10 +235,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = { name: 'AZURE_OPENAI_ENDPOINT' value: AzureOpenAIEndpoint } - { - name: 'AZURE_OPENAI_KEY' - value: AzureOpenAIKey - } { name: 'AZURE_OPENAI_TEMPERATURE' value: AzureOpenAITemperature @@ -314,47 +283,36 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = { name: 'AZURE_OPENAI_EMBEDDING_NAME' value: AzureOpenAIEmbeddingName } - - { - name: 'AZURE_OPENAI_EMBEDDING_KEY' - value: AzureOpenAIEmbeddingkey - } - { name: 'AZURE_OPENAI_EMBEDDING_ENDPOINT' value: AzureOpenAIEmbeddingEndpoint } - - {name: 'SQLDB_SERVER' + { + name: 'SQLDB_SERVER' value: SQLDB_SERVER } - - {name: 'SQLDB_DATABASE' + { + name: 'SQLDB_DATABASE' value: SQLDB_DATABASE } - - {name: 'SQLDB_USERNAME' - value: SQLDB_USERNAME - } - - {name: 'SQLDB_PASSWORD' - value: SQLDB_PASSWORD - } - - {name: 'USE_INTERNAL_STREAM' + { + name: 'USE_INTERNAL_STREAM' value: USE_INTERNAL_STREAM } - - {name: 'AZURE_COSMOSDB_ACCOUNT' + { + name: 'AZURE_COSMOSDB_ACCOUNT' value: AZURE_COSMOSDB_ACCOUNT } - {name: 'AZURE_COSMOSDB_CONVERSATIONS_CONTAINER' + { + name: 'AZURE_COSMOSDB_CONVERSATIONS_CONTAINER' value: AZURE_COSMOSDB_CONVERSATIONS_CONTAINER } - {name: 'AZURE_COSMOSDB_DATABASE' + { + name: 'AZURE_COSMOSDB_DATABASE' value: AZURE_COSMOSDB_DATABASE } - {name: 'AZURE_COSMOSDB_ENABLE_FEEDBACK' + { + name: 'AZURE_COSMOSDB_ENABLE_FEEDBACK' value: AZURE_COSMOSDB_ENABLE_FEEDBACK } //{name: 'VITE_POWERBI_EMBED_URL' @@ -368,10 +326,6 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = { name: 'AZURE_AI_SEARCH_ENDPOINT' value: azureSearchServiceEndpoint } - { - name: 'SQLDB_CONNECTION_STRING' - value: 'TBD' - } { name: 'AZURE_SQL_SYSTEM_PROMPT' value: sqlSystemPrompt @@ -384,14 +338,22 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = { name: 'AZURE_OPENAI_STREAM_TEXT_SYSTEM_PROMPT' value: streamTextSystemPrompt } - { - name: 'AZURE_AI_PROJECT_CONN_STRING' - value: aiProjectConnectionString - } { name: 'USE_AI_PROJECT_CLIENT' value: useAIProjectClientFlag } + { + name: 'AZURE_AI_AGENT_ENDPOINT' + value: aiFoundryProjectEndpoint + } + { + name: 'AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME' + value: AzureOpenAIModel + } + { + name: 'AZURE_AI_AGENT_API_VERSION' + value: AzureOpenAIApiVersion + } ] linuxFxVersion: WebAppImageName } @@ -428,20 +390,44 @@ module cosmosUserRole 'core/database/cosmos/cosmos-role-assign.bicep' = { ] } -resource aiHubProject 'Microsoft.MachineLearningServices/workspaces@2024-01-01-preview' existing = { - name: aiProjectName +resource aiFoundry 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' existing = { + name: aiFoundryName +} + +resource aiFoundryProject 'Microsoft.CognitiveServices/accounts/projects@2025-04-01-preview' existing = { + parent: aiFoundry + name: aiFoundryProjectName +} + +@description('This is the built-in Azure AI User role.') +resource aiUserRoleDefinitionFoundry 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + scope: aiFoundry + name: '53ca6127-db72-4b80-b1b0-d745d6d5456d' +} + +resource aiUserRoleAssignmentFoundry 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(Website.id, aiFoundry.id, aiUserRoleDefinitionFoundry.id) + scope: aiFoundry + properties: { + roleDefinitionId: aiUserRoleDefinitionFoundry.id + principalId: Website.identity.principalId + principalType: 'ServicePrincipal' + } } -resource aiDeveloper 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { - name: '64702f94-c441-49e6-a78b-ef80e0188fee' +@description('This is the built-in Azure AI User role.') +resource aiUserRoleDefinitionFoundryProject 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + scope: aiFoundryProject + name: '53ca6127-db72-4b80-b1b0-d745d6d5456d' } -resource aiDeveloperAccessProj 'Microsoft.Authorization/roleAssignments@2022-04-01' = { - name: guid(Website.name, aiHubProject.id, aiDeveloper.id) - scope: aiHubProject +resource aiUserRoleAssignmentFoundryProject 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(Website.id, aiFoundryProject.id, aiUserRoleDefinitionFoundryProject.id) + scope: aiFoundryProject properties: { - roleDefinitionId: aiDeveloper.id + roleDefinitionId: aiUserRoleDefinitionFoundryProject.id principalId: Website.identity.principalId + principalType: 'ServicePrincipal' } } diff --git a/infra/deploy_cosmos_db.bicep b/infra/deploy_cosmos_db.bicep index 6b26f820a..4a3f29198 100644 --- a/infra/deploy_cosmos_db.bicep +++ b/infra/deploy_cosmos_db.bicep @@ -2,7 +2,6 @@ param solutionLocation string @description('Name') param cosmosDBName string -param kvName string param databaseName string = 'db_conversation_history' param collectionName string = 'conversations' @@ -65,50 +64,6 @@ resource database 'Microsoft.DocumentDB/databaseAccounts/sqlDatabases@2022-05-15 ] } -resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = { - name: kvName -} - -resource AZURE_COSMOSDB_ACCOUNT 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-COSMOSDB-ACCOUNT' - properties: { - value: cosmos.name - } -} - -resource AZURE_COSMOSDB_ACCOUNT_KEY 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-COSMOSDB-ACCOUNT-KEY' - properties: { - value: cosmos.listKeys().primaryMasterKey - } -} - -resource AZURE_COSMOSDB_DATABASE 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-COSMOSDB-DATABASE' - properties: { - value: databaseName - } -} - -resource AZURE_COSMOSDB_CONVERSATIONS_CONTAINER 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-COSMOSDB-CONVERSATIONS-CONTAINER' - properties: { - value: collectionName - } -} - -resource AZURE_COSMOSDB_ENABLE_FEEDBACK 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'AZURE-COSMOSDB-ENABLE-FEEDBACK' - properties: { - value: 'True' - } -} - output cosmosAccountName string = cosmos.name output cosmosDatabaseName string = databaseName output cosmosContainerName string = collectionName diff --git a/infra/deploy_sql_db.bicep b/infra/deploy_sql_db.bicep index f81957ade..669ddb31c 100644 --- a/infra/deploy_sql_db.bicep +++ b/infra/deploy_sql_db.bicep @@ -12,14 +12,6 @@ param sqlDBName string @description('Location for all resources.') param location string = solutionLocation -@description('The administrator username of the SQL logical server.') -@secure() -param administratorLogin string = 'sqladmin' - -@description('The administrator password of the SQL logical server.') -@secure() -param administratorLoginPassword string = 'TestPassword_1234' - resource sqlServer 'Microsoft.Sql/servers@2023-08-01-preview' = { name: serverName @@ -98,22 +90,6 @@ resource sqldbDatabaseEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-previe } } -resource sqldbDatabaseUsername 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'SQLDB-USERNAME' - properties: { - value: administratorLogin - } -} - -resource sqldbDatabasePwd 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'SQLDB-PASSWORD' - properties: { - value: administratorLoginPassword - } -} - output sqlServerName string = serverName output sqlDbName string = sqlDBName // output sqlDbUser string = administratorLogin diff --git a/infra/deploy_storage_account.bicep b/infra/deploy_storage_account.bicep index 05d834dfe..f9f8f9f1a 100644 --- a/infra/deploy_storage_account.bicep +++ b/infra/deploy_storage_account.bicep @@ -91,7 +91,6 @@ resource roleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { } -var storageAccountKeys = listKeys(storageAccounts_resource.id, '2021-04-01') //var storageAccountString = 'DefaultEndpointsProtocol=https;AccountName=${storageAccounts_resource.name};AccountKey=${storageAccounts_resource.listKeys().keys[0].value};EndpointSuffix=${environment().suffixes.storage}' resource keyVault 'Microsoft.KeyVault/vaults@2022-07-01' existing = { @@ -114,13 +113,5 @@ resource adlsAccountContainerEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01 } } -resource adlsAccountKeyEntry 'Microsoft.KeyVault/vaults/secrets@2021-11-01-preview' = { - parent: keyVault - name: 'ADLS-ACCOUNT-KEY' - properties: { - value: storageAccountKeys.keys[0].value - } -} - output storageName string = saName output storageContainer string = 'data' diff --git a/infra/main.bicep b/infra/main.bicep index a11faf2cc..4e7f4de11 100644 --- a/infra/main.bicep +++ b/infra/main.bicep @@ -27,7 +27,7 @@ param deploymentType string = 'GlobalStandard' ]) param gptModelName string = 'gpt-4o-mini' -param azureOpenaiAPIVersion string = '2025-01-01-preview' +param azureOpenaiAPIVersion string = '2025-04-01-preview' @minValue(10) @description('Capacity of the GPT deployment:') @@ -99,12 +99,11 @@ var functionAppCallTranscriptSystemPrompt = '''You are an assistant who supports When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. If no data is available, state 'No relevant data found for previous meetings.''' -var functionAppStreamTextSystemPrompt = '''You are a helpful assistant to a Wealth Advisor. - The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client. - If no name is provided, assume the question is about '{SelectedClientName}'. - If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.' - Otherwise, provide thorough answers using only data from SQL or call transcripts. - If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response.''' +var functionAppStreamTextSystemPrompt = '''The currently selected client's name is '{SelectedClientName}'. Treat any case-insensitive or partial mention as referring to this client. + If the user mentions no name, assume they are asking about '{SelectedClientName}'.. + If the user references a name that clearly differs from '{SelectedClientName}', respond only with: 'Please only ask questions about the selected client or select another client.' Otherwise, provide thorough answers for every question using only data from SQL or call transcripts.' + If no data is found, respond with 'No data found for that client.' Remove any client identifiers from the final response. + Always send clientId as '{client_id}'.''' // ========== Managed Identity ========== // module managedIdentityModule 'deploy_managed_identity.bicep' = { @@ -142,7 +141,6 @@ module aifoundry 'deploy_ai_foundry.bicep' = { gptDeploymentCapacity: gptDeploymentCapacity embeddingModel: embeddingModel embeddingDeploymentCapacity: embeddingDeploymentCapacity - managedIdentityObjectId:managedIdentityModule.outputs.managedIdentityOutput.objectId existingLogAnalyticsWorkspaceId: existingLogAnalyticsWorkspaceId } scope: resourceGroup(resourceGroup().name) @@ -154,7 +152,6 @@ module cosmosDBModule 'deploy_cosmos_db.bicep' = { params: { solutionLocation: cosmosLocation cosmosDBName:'${abbrs.databases.cosmosDBDatabase}${solutionPrefix}' - kvName: keyvaultModule.outputs.keyvaultName } scope: resourceGroup(resourceGroup().name) } @@ -201,7 +198,6 @@ module appserviceModule 'deploy_app_service.bicep' = { WebsiteName: '${abbrs.compute.webApp}${solutionPrefix}' AzureSearchService:aifoundry.outputs.aiSearchService AzureSearchIndex:'transcripts_index' - AzureSearchKey:keyVault.getSecret('AZURE-SEARCH-KEY') AzureSearchUseSemanticSearch:'True' AzureSearchSemanticSearchConfig:'my-semantic-config' AzureSearchTopK:'5' @@ -209,10 +205,9 @@ module appserviceModule 'deploy_app_service.bicep' = { AzureSearchFilenameColumn:'chunk_id' AzureSearchTitleColumn:'client_id' AzureSearchUrlColumn:'sourceurl' - AzureOpenAIResource:aifoundry.outputs.aiServicesName - AzureOpenAIEndpoint:aifoundry.outputs.aiServicesTarget + AzureOpenAIResource:aifoundry.outputs.aiFoundryName + AzureOpenAIEndpoint:aifoundry.outputs.aoaiEndpoint AzureOpenAIModel:gptModelName - AzureOpenAIKey:keyVault.getSecret('AZURE-OPENAI-KEY') AzureOpenAITemperature:'0' AzureOpenAITopP:'1' AzureOpenAIMaxTokens:'1000' @@ -225,13 +220,10 @@ module appserviceModule 'deploy_app_service.bicep' = { AzureSearchPermittedGroupsField:'' AzureSearchStrictness:'3' AzureOpenAIEmbeddingName:embeddingModel - AzureOpenAIEmbeddingkey:keyVault.getSecret('AZURE-OPENAI-KEY') - AzureOpenAIEmbeddingEndpoint:aifoundry.outputs.aiServicesTarget + AzureOpenAIEmbeddingEndpoint:aifoundry.outputs.aoaiEndpoint USE_INTERNAL_STREAM:'True' SQLDB_SERVER:'${sqlDBModule.outputs.sqlServerName}.database.windows.net' SQLDB_DATABASE:sqlDBModule.outputs.sqlDbName - SQLDB_USERNAME:'sqladmin' - SQLDB_PASSWORD:keyVault.getSecret('SQLDB-PASSWORD') AZURE_COSMOSDB_ACCOUNT: cosmosDBModule.outputs.cosmosAccountName AZURE_COSMOSDB_CONVERSATIONS_CONTAINER: cosmosDBModule.outputs.cosmosContainerName AZURE_COSMOSDB_DATABASE: cosmosDBModule.outputs.cosmosDatabaseName @@ -245,8 +237,9 @@ module appserviceModule 'deploy_app_service.bicep' = { sqlSystemPrompt: functionAppSqlPrompt callTranscriptSystemPrompt: functionAppCallTranscriptSystemPrompt streamTextSystemPrompt: functionAppStreamTextSystemPrompt - aiProjectConnectionString:keyVault.getSecret('AZURE-AI-PROJECT-CONN-STRING') - aiProjectName:aifoundry.outputs.aiProjectName + aiFoundryProjectName:aifoundry.outputs.aiFoundryProjectName + aiFoundryProjectEndpoint: aifoundry.outputs.aiFoundryProjectEndpoint + aiFoundryName: aifoundry.outputs.aiFoundryName applicationInsightsConnectionString:aifoundry.outputs.applicationInsightsConnectionString } scope: resourceGroup(resourceGroup().name) @@ -262,3 +255,5 @@ output SQLDB_SERVER string = sqlDBModule.outputs.sqlServerName output SQLDB_DATABASE string = sqlDBModule.outputs.sqlDbName output MANAGEDINDENTITY_WEBAPP_NAME string = managedIdentityModule.outputs.managedIdentityWebAppOutput.name output MANAGEDINDENTITY_WEBAPP_CLIENTID string = managedIdentityModule.outputs.managedIdentityWebAppOutput.clientId +output AI_FOUNDARY_NAME string = aifoundry.outputs.aiFoundryName +output AI_SEARCH_SERVICE_NAME string = aifoundry.outputs.aiSearchService diff --git a/infra/main.bicepparam b/infra/main.bicepparam index d61275246..f0ed4b2ca 100644 --- a/infra/main.bicepparam +++ b/infra/main.bicepparam @@ -4,7 +4,7 @@ param environmentName = readEnvironmentVariable('AZURE_ENV_NAME', 'byocatemplate param cosmosLocation = readEnvironmentVariable('AZURE_ENV_COSMOS_LOCATION', 'eastus2') param deploymentType = readEnvironmentVariable('AZURE_ENV_MODEL_DEPLOYMENT_TYPE', 'GlobalStandard') param gptModelName = readEnvironmentVariable('AZURE_ENV_MODEL_NAME', 'gpt-4o-mini') -param azureOpenaiAPIVersion = readEnvironmentVariable('AZURE_ENV_MODEL_VERSION', '2025-01-01-preview') +param azureOpenaiAPIVersion = readEnvironmentVariable('AZURE_ENV_MODEL_VERSION', '2025-04-01-preview') param gptDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_MODEL_CAPACITY', '30')) param embeddingModel = readEnvironmentVariable('AZURE_ENV_EMBEDDING_MODEL_NAME', 'text-embedding-ada-002') param embeddingDeploymentCapacity = int(readEnvironmentVariable('AZURE_ENV_EMBEDDING_MODEL_CAPACITY', '80')) diff --git a/infra/main.json b/infra/main.json index fee4c39e0..fe41cf42c 100644 --- a/infra/main.json +++ b/infra/main.json @@ -4,8 +4,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "10579732773480527563" + "version": "0.36.1.42791", + "templateHash": "8950753165543697743" } }, "parameters": { @@ -17,6 +17,13 @@ "description": "A unique prefix for all resources in this deployment. This should be 3-20 characters long:" } }, + "existingLogAnalyticsWorkspaceId": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "Optional: Existing Log Analytics Workspace Resource ID" + } + }, "cosmosLocation": { "type": "string", "metadata": { @@ -48,7 +55,7 @@ }, "azureOpenaiAPIVersion": { "type": "string", - "defaultValue": "2025-01-01-preview" + "defaultValue": "2025-04-01-preview" }, "gptDeploymentCapacity": { "type": "int", @@ -111,6 +118,8 @@ "$fxv#0": { "ai": { "aiSearch": "srch-", + "aiFoundry": "aif-", + "aiFoundryProject": "aifp-", "aiServices": "aisa-", "aiVideoIndexer": "avi-", "machineLearningWorkspace": "mlw-", @@ -341,7 +350,7 @@ "abbrs": "[variables('$fxv#0')]", "functionAppSqlPrompt": "Generate a valid T-SQL query to find {query} for tables and columns provided below:\n 1. Table: Clients\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\n 2. Table: InvestmentGoals\n Columns: ClientId, InvestmentGoal\n 3. Table: Assets\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\n 4. Table: ClientSummaries\n Columns: ClientId, ClientSummary\n 5. Table: InvestmentGoalsDetails\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\n 6. Table: Retirement\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\n 7. Table: ClientMeetings\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\n Always use the Investment column from the Assets table as the value.\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\n Do not use client name in filters.\n Do not include assets values unless asked for.\n ALWAYS use ClientId = {clientid} in the query filter.\n ALWAYS select Client Name (Column: Client) in the query.\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\n Only return the generated SQL query. Do not return anything else.", "functionAppCallTranscriptSystemPrompt": "You are an assistant who supports wealth advisors in preparing for client meetings. \n You have access to the client’s past meeting call transcripts. \n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \n If no data is available, state 'No relevant data found for previous meetings.", - "functionAppStreamTextSystemPrompt": "You are a helpful assistant to a Wealth Advisor. \n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\n If no name is provided, assume the question is about '{SelectedClientName}'.\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response." + "functionAppStreamTextSystemPrompt": "The currently selected client's name is '{SelectedClientName}'. Treat any case-insensitive or partial mention as referring to this client.\n If the user mentions no name, assume they are asking about '{SelectedClientName}'..\n If the user references a name that clearly differs from '{SelectedClientName}', respond only with: 'Please only ask questions about the selected client or select another client.' Otherwise, provide thorough answers for every question using only data from SQL or call transcripts.'\n If no data is found, respond with 'No data found for that client.' Remove any client identifiers from the final response.\n Always send clientId as '{client_id}'." }, "resources": [ { @@ -371,8 +380,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "13884114971020005637" + "version": "0.36.1.42791", + "templateHash": "1287895326947269968" } }, "parameters": { @@ -485,8 +494,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "13533490792012888554" + "version": "0.36.1.42791", + "templateHash": "2457137526968921597" } }, "parameters": { @@ -697,8 +706,8 @@ "embeddingDeploymentCapacity": { "value": "[parameters('embeddingDeploymentCapacity')]" }, - "managedIdentityObjectId": { - "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_managed_identity'), '2022-09-01').outputs.managedIdentityOutput.value.objectId]" + "existingLogAnalyticsWorkspaceId": { + "value": "[parameters('existingLogAnalyticsWorkspaceId')]" } }, "template": { @@ -707,8 +716,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "16963364971780216238" + "version": "0.36.1.42791", + "templateHash": "15647067587936233417" } }, "parameters": { @@ -739,14 +748,17 @@ "embeddingDeploymentCapacity": { "type": "int" }, - "managedIdentityObjectId": { - "type": "string" + "existingLogAnalyticsWorkspaceId": { + "type": "string", + "defaultValue": "" } }, "variables": { "$fxv#0": { "ai": { "aiSearch": "srch-", + "aiFoundry": "aif-", + "aiFoundryProject": "aifp-", "aiServices": "aisa-", "aiVideoIndexer": "avi-", "machineLearningWorkspace": "mlw-", @@ -972,18 +984,13 @@ } }, "abbrs": "[variables('$fxv#0')]", - "storageName": "[format('{0}{1}hub', variables('abbrs').storage.storageAccount, parameters('solutionName'))]", - "storageSkuName": "Standard_LRS", - "aiServicesName": "[format('{0}{1}', variables('abbrs').ai.aiServices, parameters('solutionName'))]", + "aiFoundryName": "[format('{0}{1}', variables('abbrs').ai.aiFoundry, parameters('solutionName'))]", "applicationInsightsName": "[format('{0}{1}', variables('abbrs').managementGovernance.applicationInsights, parameters('solutionName'))]", - "containerRegistryName": "[format('{0}{1}', variables('abbrs').containers.containerRegistry, parameters('solutionName'))]", "keyvaultName": "[parameters('keyVaultName')]", "location": "[parameters('solutionLocation')]", - "aiHubName": "[format('{0}{1}-hub', variables('abbrs').ai.aiHub, parameters('solutionName'))]", - "aiHubFriendlyName": "[variables('aiHubName')]", - "aiHubDescription": "AI Hub", - "aiProjectName": "[format('{0}{1}', variables('abbrs').ai.aiHubProject, parameters('solutionName'))]", + "aiProjectName": "[format('{0}{1}', variables('abbrs').ai.aiFoundryProject, parameters('solutionName'))]", "aiProjectFriendlyName": "[variables('aiProjectName')]", + "aiProjectDescription": "AI Foundry Project", "aiSearchName": "[format('{0}{1}', variables('abbrs').ai.aiSearch, parameters('solutionName'))]", "workspaceName": "[format('{0}{1}', variables('abbrs').managementGovernance.logAnalyticsWorkspace, parameters('solutionName'))]", "aiModelDeployments": [ @@ -1006,60 +1013,14 @@ "raiPolicyName": "Microsoft.Default" } ], - "containerRegistryNameCleaned": "[replace(variables('containerRegistryName'), '-', '')]", - "storageNameCleaned": "[replace(variables('storageName'), '-', '')]" + "useExisting": "[not(empty(parameters('existingLogAnalyticsWorkspaceId')))]", + "existingLawSubscription": "[if(variables('useExisting'), split(parameters('existingLogAnalyticsWorkspaceId'), '/')[2], '')]", + "existingLawResourceGroup": "[if(variables('useExisting'), split(parameters('existingLogAnalyticsWorkspaceId'), '/')[4], '')]", + "existingLawName": "[if(variables('useExisting'), split(parameters('existingLogAnalyticsWorkspaceId'), '/')[8], '')]" }, "resources": [ { - "type": "Microsoft.MachineLearningServices/workspaces/connections", - "apiVersion": "2024-07-01-preview", - "name": "[format('{0}/{1}', variables('aiHubName'), format('{0}-connection-AzureOpenAI', variables('aiHubName')))]", - "properties": { - "category": "AIServices", - "target": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]", - "authType": "ApiKey", - "isSharedToAll": true, - "credentials": { - "key": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]" - }, - "metadata": { - "ApiType": "Azure", - "ResourceId": "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" - } - }, - "dependsOn": [ - "[resourceId('Microsoft.MachineLearningServices/workspaces', variables('aiHubName'))]", - "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]", - "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]", - "aiServicesDeployments" - ] - }, - { - "type": "Microsoft.MachineLearningServices/workspaces/connections", - "apiVersion": "2024-07-01-preview", - "name": "[format('{0}/{1}', variables('aiHubName'), format('{0}-connection-AzureAISearch', variables('aiHubName')))]", - "properties": { - "category": "CognitiveSearch", - "target": "[format('https://{0}.search.windows.net', variables('aiSearchName'))]", - "authType": "ApiKey", - "isSharedToAll": true, - "credentials": { - "key": "[listAdminKeys(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), '2023-11-01').primaryKey]" - }, - "metadata": { - "type": "azure_ai_search", - "ApiType": "Azure", - "ResourceId": "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]", - "ApiVersion": "2024-05-01-preview", - "DeploymentApiVersion": "2023-11-01" - } - }, - "dependsOn": [ - "[resourceId('Microsoft.MachineLearningServices/workspaces', variables('aiHubName'))]", - "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]" - ] - }, - { + "condition": "[not(variables('useExisting'))]", "type": "Microsoft.OperationalInsights/workspaces", "apiVersion": "2023-09-01", "name": "[variables('workspaceName')]", @@ -1082,68 +1043,62 @@ "Application_Type": "web", "publicNetworkAccessForIngestion": "Enabled", "publicNetworkAccessForQuery": "Enabled", - "WorkspaceResourceId": "[resourceId('Microsoft.OperationalInsights/workspaces', variables('workspaceName'))]" + "WorkspaceResourceId": "[if(variables('useExisting'), extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', variables('existingLawSubscription'), variables('existingLawResourceGroup')), 'Microsoft.OperationalInsights/workspaces', variables('existingLawName')), resourceId('Microsoft.OperationalInsights/workspaces', variables('workspaceName')))]" }, "dependsOn": [ "[resourceId('Microsoft.OperationalInsights/workspaces', variables('workspaceName'))]" ] }, { - "type": "Microsoft.ContainerRegistry/registries", - "apiVersion": "2021-09-01", - "name": "[variables('containerRegistryNameCleaned')]", + "type": "Microsoft.CognitiveServices/accounts", + "apiVersion": "2025-04-01-preview", + "name": "[variables('aiFoundryName')]", "location": "[variables('location')]", "sku": { - "name": "Premium" + "name": "S0" + }, + "kind": "AIServices", + "identity": { + "type": "SystemAssigned" }, "properties": { - "adminUserEnabled": true, - "dataEndpointEnabled": false, - "networkRuleBypassOptions": "AzureServices", - "networkRuleSet": { - "defaultAction": "Deny" - }, - "policies": { - "quarantinePolicy": { - "status": "enabled" - }, - "retentionPolicy": { - "status": "enabled", - "days": 7 - }, - "trustPolicy": { - "status": "disabled", - "type": "Notary" - } + "allowProjectManagement": true, + "customSubDomainName": "[variables('aiFoundryName')]", + "networkAcls": { + "defaultAction": "Allow", + "virtualNetworkRules": [], + "ipRules": [] }, - "publicNetworkAccess": "Disabled", - "zoneRedundancy": "Disabled" + "publicNetworkAccess": "Enabled", + "disableLocalAuth": false } }, { - "type": "Microsoft.CognitiveServices/accounts", - "apiVersion": "2024-04-01-preview", - "name": "[variables('aiServicesName')]", + "type": "Microsoft.CognitiveServices/accounts/projects", + "apiVersion": "2025-04-01-preview", + "name": "[format('{0}/{1}', variables('aiFoundryName'), variables('aiProjectName'))]", "location": "[variables('location')]", - "sku": { - "name": "S0" + "identity": { + "type": "SystemAssigned" }, - "kind": "AIServices", "properties": { - "customSubDomainName": "[variables('aiServicesName')]", - "publicNetworkAccess": "Enabled" - } + "description": "[variables('aiProjectDescription')]", + "displayName": "[variables('aiProjectFriendlyName')]" + }, + "dependsOn": [ + "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]" + ] }, { "copy": { - "name": "aiServicesDeployments", + "name": "aiFModelDeployments", "count": "[length(variables('aiModelDeployments'))]", "mode": "serial", "batchSize": 1 }, "type": "Microsoft.CognitiveServices/accounts/deployments", "apiVersion": "2023-05-01", - "name": "[format('{0}/{1}', variables('aiServicesName'), variables('aiModelDeployments')[copyIndex()].name)]", + "name": "[format('{0}/{1}', variables('aiFoundryName'), variables('aiModelDeployments')[copyIndex()].name)]", "properties": { "model": { "format": "OpenAI", @@ -1156,17 +1111,20 @@ "capacity": "[variables('aiModelDeployments')[copyIndex()].sku.capacity]" }, "dependsOn": [ - "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" + "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]" ] }, { "type": "Microsoft.Search/searchServices", - "apiVersion": "2023-11-01", + "apiVersion": "2025-02-01-preview", "name": "[variables('aiSearchName')]", "location": "[parameters('solutionLocation')]", "sku": { "name": "basic" }, + "identity": { + "type": "SystemAssigned" + }, "properties": { "replicaCount": 1, "partitionCount": 1, @@ -1180,140 +1138,85 @@ }, "disableLocalAuth": false, "authOptions": { - "apiKeyOnly": {} + "aadOrApiKey": { + "aadAuthFailureMode": "http403" + } }, "semanticSearch": "free" } }, { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "2022-09-01", - "name": "[variables('storageNameCleaned')]", - "location": "[variables('location')]", - "sku": { - "name": "[variables('storageSkuName')]" - }, - "kind": "StorageV2", + "type": "Microsoft.CognitiveServices/accounts/connections", + "apiVersion": "2025-04-01-preview", + "name": "[format('{0}/{1}', variables('aiFoundryName'), 'foundry-search-connection')]", "properties": { - "accessTier": "Hot", - "allowBlobPublicAccess": false, - "allowCrossTenantReplication": false, - "allowSharedKeyAccess": false, - "encryption": { - "keySource": "Microsoft.Storage", - "requireInfrastructureEncryption": false, - "services": { - "blob": { - "enabled": true, - "keyType": "Account" - }, - "file": { - "enabled": true, - "keyType": "Account" - }, - "queue": { - "enabled": true, - "keyType": "Service" - }, - "table": { - "enabled": true, - "keyType": "Service" - } - } - }, - "isHnsEnabled": false, - "isNfsV3Enabled": false, - "keyPolicy": { - "keyExpirationPeriodInDays": 7 - }, - "largeFileSharesState": "Disabled", - "minimumTlsVersion": "TLS1_2", - "networkAcls": { - "bypass": "AzureServices", - "defaultAction": "Allow" - }, - "supportsHttpsTrafficOnly": true - } + "category": "CognitiveSearch", + "target": "[reference(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), '2025-02-01-preview').endpoint]", + "authType": "AAD", + "isSharedToAll": true, + "metadata": { + "ApiType": "Azure", + "ResourceId": "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]", + "location": "[reference(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), '2025-02-01-preview', 'full').location]" + } + }, + "dependsOn": [ + "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]", + "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]" + ] }, { "type": "Microsoft.Authorization/roleAssignments", "apiVersion": "2022-04-01", - "name": "[guid(resourceGroup().id, parameters('managedIdentityObjectId'), resourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe'))]", + "scope": "[format('Microsoft.Search/searchServices/{0}', variables('aiSearchName'))]", + "name": "[guid(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName')), extensionResourceId(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), 'Microsoft.Authorization/roleDefinitions', '1407120a-92aa-4202-b7e9-c0e197c71c8f'))]", "properties": { - "principalId": "[parameters('managedIdentityObjectId')]", - "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', 'ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "roleDefinitionId": "[extensionResourceId(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), 'Microsoft.Authorization/roleDefinitions', '1407120a-92aa-4202-b7e9-c0e197c71c8f')]", + "principalId": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName')), '2025-04-01-preview', 'full').identity.principalId]", "principalType": "ServicePrincipal" - } - }, - { - "type": "Microsoft.MachineLearningServices/workspaces", - "apiVersion": "2023-08-01-preview", - "name": "[variables('aiHubName')]", - "location": "[variables('location')]", - "identity": { - "type": "SystemAssigned" - }, - "properties": { - "friendlyName": "[variables('aiHubFriendlyName')]", - "description": "[variables('aiHubDescription')]", - "keyVault": "[resourceId('Microsoft.KeyVault/vaults', parameters('keyVaultName'))]", - "storageAccount": "[resourceId('Microsoft.Storage/storageAccounts', variables('storageNameCleaned'))]", - "applicationInsights": "[resourceId('Microsoft.Insights/components', variables('applicationInsightsName'))]", - "containerRegistry": "[resourceId('Microsoft.ContainerRegistry/registries', variables('containerRegistryNameCleaned'))]" }, - "kind": "hub", "dependsOn": [ - "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]", - "aiServicesDeployments", - "[resourceId('Microsoft.Insights/components', variables('applicationInsightsName'))]", - "[resourceId('Microsoft.ContainerRegistry/registries', variables('containerRegistryNameCleaned'))]", - "[resourceId('Microsoft.Storage/storageAccounts', variables('storageNameCleaned'))]" + "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]", + "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]" ] }, { - "type": "Microsoft.MachineLearningServices/workspaces", - "apiVersion": "2024-01-01-preview", - "name": "[variables('aiProjectName')]", - "location": "[variables('location')]", - "kind": "Project", - "identity": { - "type": "SystemAssigned" - }, + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2022-04-01", + "scope": "[format('Microsoft.Search/searchServices/{0}', variables('aiSearchName'))]", + "name": "[guid(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName')), extensionResourceId(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), 'Microsoft.Authorization/roleDefinitions', '7ca78c08-252a-4471-8644-bb5ff32d4ba0'))]", "properties": { - "friendlyName": "[variables('aiProjectFriendlyName')]", - "hubResourceId": "[resourceId('Microsoft.MachineLearningServices/workspaces', variables('aiHubName'))]" + "roleDefinitionId": "[extensionResourceId(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), 'Microsoft.Authorization/roleDefinitions', '7ca78c08-252a-4471-8644-bb5ff32d4ba0')]", + "principalId": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName')), '2025-04-01-preview', 'full').identity.principalId]", + "principalType": "ServicePrincipal" }, "dependsOn": [ - "[resourceId('Microsoft.MachineLearningServices/workspaces', variables('aiHubName'))]" + "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]", + "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]" ] }, { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'TENANT-ID')]", + "type": "Microsoft.CognitiveServices/accounts/connections", + "apiVersion": "2025-04-01-preview", + "name": "[format('{0}/{1}', variables('aiFoundryName'), 'foundry-app-insights-connection')]", "properties": { - "value": "[subscription().tenantId]" - } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPENAI-KEY')]", - "properties": { - "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]" + "category": "AppInsights", + "target": "[resourceId('Microsoft.Insights/components', variables('applicationInsightsName'))]", + "authType": "ApiKey", + "isSharedToAll": true, + "credentials": { + "key": "[reference(resourceId('Microsoft.Insights/components', variables('applicationInsightsName')), '2020-02-02').ConnectionString]" + }, + "metadata": { + "ApiType": "Azure", + "ResourceId": "[resourceId('Microsoft.Insights/components', variables('applicationInsightsName'))]" + } }, "dependsOn": [ - "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" + "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]", + "[resourceId('Microsoft.Insights/components', variables('applicationInsightsName'))]" ] }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPEN-AI-DEPLOYMENT-MODEL')]", - "properties": { - "value": "[parameters('gptModelName')]" - } - }, { "type": "Microsoft.KeyVault/vaults/secrets", "apiVersion": "2021-11-01-preview", @@ -1327,33 +1230,19 @@ "apiVersion": "2021-11-01-preview", "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPENAI-ENDPOINT')]", "properties": { - "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]" + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName')), '2025-04-01-preview').endpoints['OpenAI Language Model Instance API']]" }, "dependsOn": [ - "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" + "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]" ] }, { "type": "Microsoft.KeyVault/vaults/secrets", "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-AI-PROJECT-CONN-STRING')]", + "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPENAI-EMBEDDING-MODEL')]", "properties": { - "value": "[format('{0};{1};{2};{3}', split(reference(resourceId('Microsoft.MachineLearningServices/workspaces', variables('aiProjectName')), '2024-01-01-preview').discoveryUrl, '/')[2], subscription().subscriptionId, resourceGroup().name, variables('aiProjectName'))]" - }, - "dependsOn": [ - "[resourceId('Microsoft.MachineLearningServices/workspaces', variables('aiProjectName'))]" - ] - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-SEARCH-KEY')]", - "properties": { - "value": "[listAdminKeys(resourceId('Microsoft.Search/searchServices', variables('aiSearchName')), '2023-11-01').primaryKey]" - }, - "dependsOn": [ - "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]" - ] + "value": "[parameters('embeddingModel')]" + } }, { "type": "Microsoft.KeyVault/vaults/secrets", @@ -1366,17 +1255,6 @@ "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]" ] }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-SEARCH-SERVICE')]", - "properties": { - "value": "[variables('aiSearchName')]" - }, - "dependsOn": [ - "[resourceId('Microsoft.Search/searchServices', variables('aiSearchName'))]" - ] - }, { "type": "Microsoft.KeyVault/vaults/secrets", "apiVersion": "2021-11-01-preview", @@ -1384,60 +1262,6 @@ "properties": { "value": "transcripts_index" } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'COG-SERVICES-ENDPOINT')]", - "properties": { - "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]" - }, - "dependsOn": [ - "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" - ] - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'COG-SERVICES-KEY')]", - "properties": { - "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]" - }, - "dependsOn": [ - "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" - ] - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'COG-SERVICES-NAME')]", - "properties": { - "value": "[variables('aiServicesName')]" - } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-SUBSCRIPTION-ID')]", - "properties": { - "value": "[subscription().subscriptionId]" - } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-RESOURCE-GROUP')]", - "properties": { - "value": "[resourceGroup().name]" - } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-LOCATION')]", - "properties": { - "value": "[parameters('solutionLocation')]" - } } ], "outputs": { @@ -1449,17 +1273,25 @@ "type": "string", "value": "[resourceId('Microsoft.KeyVault/vaults', parameters('keyVaultName'))]" }, + "aiFoundryProjectEndpoint": { + "type": "string", + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts/projects', variables('aiFoundryName'), variables('aiProjectName')), '2025-04-01-preview').endpoints['AI Foundry API']]" + }, "aiServicesTarget": { "type": "string", - "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]" + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName')), '2025-04-01-preview').endpoint]" + }, + "aoaiEndpoint": { + "type": "string", + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName')), '2025-04-01-preview').endpoints['OpenAI Language Model Instance API']]" }, - "aiServicesName": { + "aiFoundryName": { "type": "string", - "value": "[variables('aiServicesName')]" + "value": "[variables('aiFoundryName')]" }, - "aiServicesId": { + "aiFoundryId": { "type": "string", - "value": "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" + "value": "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiFoundryName'))]" }, "aiSearchName": { "type": "string", @@ -1477,7 +1309,7 @@ "type": "string", "value": "[variables('aiSearchName')]" }, - "aiProjectName": { + "aiFoundryProjectName": { "type": "string", "value": "[variables('aiProjectName')]" }, @@ -1487,11 +1319,11 @@ }, "logAnalyticsWorkspaceResourceName": { "type": "string", - "value": "[variables('workspaceName')]" + "value": "[if(variables('useExisting'), variables('existingLawName'), variables('workspaceName'))]" }, - "storageAccountName": { + "logAnalyticsWorkspaceResourceGroup": { "type": "string", - "value": "[variables('storageNameCleaned')]" + "value": "[if(variables('useExisting'), variables('existingLawResourceGroup'), resourceGroup().name)]" }, "applicationInsightsConnectionString": { "type": "string", @@ -1501,8 +1333,7 @@ } }, "dependsOn": [ - "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_keyvault')]", - "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_managed_identity')]" + "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_keyvault')]" ] }, { @@ -1521,9 +1352,6 @@ }, "cosmosDBName": { "value": "[format('{0}{1}', variables('abbrs').databases.cosmosDBDatabase, variables('solutionPrefix'))]" - }, - "kvName": { - "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_keyvault'), '2022-09-01').outputs.keyvaultName.value]" } }, "template": { @@ -1532,8 +1360,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "9117647475512750570" + "version": "0.36.1.42791", + "templateHash": "12179523327793839969" } }, "parameters": { @@ -1546,9 +1374,6 @@ "description": "Name" } }, - "kvName": { - "type": "string" - }, "databaseName": { "type": "string", "defaultValue": "db_conversation_history" @@ -1647,52 +1472,6 @@ "dependsOn": [ "[resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('cosmosDBName'))]" ] - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('kvName'), 'AZURE-COSMOSDB-ACCOUNT')]", - "properties": { - "value": "[parameters('cosmosDBName')]" - }, - "dependsOn": [ - "[resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('cosmosDBName'))]" - ] - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('kvName'), 'AZURE-COSMOSDB-ACCOUNT-KEY')]", - "properties": { - "value": "[listKeys(resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('cosmosDBName')), '2022-08-15').primaryMasterKey]" - }, - "dependsOn": [ - "[resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('cosmosDBName'))]" - ] - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('kvName'), 'AZURE-COSMOSDB-DATABASE')]", - "properties": { - "value": "[parameters('databaseName')]" - } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('kvName'), 'AZURE-COSMOSDB-CONVERSATIONS-CONTAINER')]", - "properties": { - "value": "[parameters('collectionName')]" - } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('kvName'), 'AZURE-COSMOSDB-ENABLE-FEEDBACK')]", - "properties": { - "value": "True" - } } ], "outputs": { @@ -1710,10 +1489,7 @@ } } } - }, - "dependsOn": [ - "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_keyvault')]" - ] + } }, { "type": "Microsoft.Resources/deployments", @@ -1745,8 +1521,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "1117979962296827839" + "version": "0.36.1.42791", + "templateHash": "9019656445963157268" } }, "parameters": { @@ -1864,17 +1640,6 @@ "properties": { "value": "data" } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'ADLS-ACCOUNT-KEY')]", - "properties": { - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', parameters('saName')), '2021-04-01').keys[0].value]" - }, - "dependsOn": [ - "[resourceId('Microsoft.Storage/storageAccounts', parameters('saName'))]" - ] } ], "outputs": { @@ -1930,8 +1695,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "13918782005857949552" + "version": "0.36.1.42791", + "templateHash": "6152102507143828636" } }, "parameters": { @@ -1965,20 +1730,6 @@ "metadata": { "description": "Location for all resources." } - }, - "administratorLogin": { - "type": "securestring", - "defaultValue": "sqladmin", - "metadata": { - "description": "The administrator username of the SQL logical server." - } - }, - "administratorLoginPassword": { - "type": "securestring", - "defaultValue": "TestPassword_1234", - "metadata": { - "description": "The administrator password of the SQL logical server." - } } }, "resources": [ @@ -2064,22 +1815,6 @@ "properties": { "value": "[parameters('sqlDBName')]" } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'SQLDB-USERNAME')]", - "properties": { - "value": "[parameters('administratorLogin')]" - } - }, - { - "type": "Microsoft.KeyVault/vaults/secrets", - "apiVersion": "2021-11-01-preview", - "name": "[format('{0}/{1}', parameters('keyVaultName'), 'SQLDB-PASSWORD')]", - "properties": { - "value": "[parameters('administratorLoginPassword')]" - } } ], "outputs": { @@ -2125,14 +1860,6 @@ "AzureSearchIndex": { "value": "transcripts_index" }, - "AzureSearchKey": { - "reference": { - "keyVault": { - "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.KeyVault/vaults', reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.keyvaultName.value)]" - }, - "secretName": "AZURE-SEARCH-KEY" - } - }, "AzureSearchUseSemanticSearch": { "value": "True" }, @@ -2155,22 +1882,14 @@ "value": "sourceurl" }, "AzureOpenAIResource": { - "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesName.value]" + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiFoundryName.value]" }, "AzureOpenAIEndpoint": { - "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesTarget.value]" + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aoaiEndpoint.value]" }, "AzureOpenAIModel": { "value": "[parameters('gptModelName')]" }, - "AzureOpenAIKey": { - "reference": { - "keyVault": { - "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.KeyVault/vaults', reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.keyvaultName.value)]" - }, - "secretName": "AZURE-OPENAI-KEY" - } - }, "AzureOpenAITemperature": { "value": "0" }, @@ -2207,16 +1926,8 @@ "AzureOpenAIEmbeddingName": { "value": "[parameters('embeddingModel')]" }, - "AzureOpenAIEmbeddingkey": { - "reference": { - "keyVault": { - "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.KeyVault/vaults', reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.keyvaultName.value)]" - }, - "secretName": "AZURE-OPENAI-KEY" - } - }, "AzureOpenAIEmbeddingEndpoint": { - "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiServicesTarget.value]" + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aoaiEndpoint.value]" }, "USE_INTERNAL_STREAM": { "value": "True" @@ -2227,17 +1938,6 @@ "SQLDB_DATABASE": { "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_sql_db'), '2022-09-01').outputs.sqlDbName.value]" }, - "SQLDB_USERNAME": { - "value": "sqladmin" - }, - "SQLDB_PASSWORD": { - "reference": { - "keyVault": { - "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.KeyVault/vaults', reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.keyvaultName.value)]" - }, - "secretName": "SQLDB-PASSWORD" - } - }, "AZURE_COSMOSDB_ACCOUNT": { "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_cosmos_db'), '2022-09-01').outputs.cosmosAccountName.value]" }, @@ -2274,16 +1974,14 @@ "streamTextSystemPrompt": { "value": "[variables('functionAppStreamTextSystemPrompt')]" }, - "aiProjectConnectionString": { - "reference": { - "keyVault": { - "id": "[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.KeyVault/vaults', reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.keyvaultName.value)]" - }, - "secretName": "AZURE-AI-PROJECT-CONN-STRING" - } + "aiFoundryProjectName": { + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiFoundryProjectName.value]" + }, + "aiFoundryProjectEndpoint": { + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiFoundryProjectEndpoint.value]" }, - "aiProjectName": { - "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiProjectName.value]" + "aiFoundryName": { + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiFoundryName.value]" }, "applicationInsightsConnectionString": { "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.applicationInsightsConnectionString.value]" @@ -2295,8 +1993,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "18358947382114771550" + "version": "0.36.1.42791", + "templateHash": "6657678385477724168" } }, "parameters": { @@ -2348,13 +2046,6 @@ "description": "Name of Azure Search Index" } }, - "AzureSearchKey": { - "type": "securestring", - "defaultValue": "", - "metadata": { - "description": "Azure Search Admin Key" - } - }, "AzureSearchUseSemanticSearch": { "type": "string", "defaultValue": "False", @@ -2430,12 +2121,6 @@ "description": "Azure Open AI Endpoint" } }, - "AzureOpenAIKey": { - "type": "securestring", - "metadata": { - "description": "Azure OpenAI Key" - } - }, "AzureOpenAITemperature": { "type": "string", "defaultValue": "0", @@ -2534,13 +2219,6 @@ "description": "Azure OpenAI Embedding Deployment Name" } }, - "AzureOpenAIEmbeddingkey": { - "type": "securestring", - "defaultValue": "", - "metadata": { - "description": "Azure Open AI Embedding Key" - } - }, "AzureOpenAIEmbeddingEndpoint": { "type": "string", "defaultValue": "", @@ -2569,20 +2247,6 @@ "description": "SQL Database Name" } }, - "SQLDB_USERNAME": { - "type": "string", - "defaultValue": "", - "metadata": { - "description": "SQL Database Username" - } - }, - "SQLDB_PASSWORD": { - "type": "securestring", - "defaultValue": "", - "metadata": { - "description": "SQL Database Password" - } - }, "AZURE_COSMOSDB_ACCOUNT": { "type": "string", "defaultValue": "", @@ -2644,14 +2308,17 @@ "description": "Azure Function App Stream Text System Prompt" } }, - "aiProjectConnectionString": { - "type": "securestring" + "aiFoundryProjectEndpoint": { + "type": "string" }, "useAIProjectClientFlag": { "type": "string", "defaultValue": "false" }, - "aiProjectName": { + "aiFoundryProjectName": { + "type": "string" + }, + "aiFoundryName": { "type": "string" }, "applicationInsightsConnectionString": { @@ -2707,10 +2374,6 @@ "name": "AZURE_SEARCH_INDEX", "value": "[parameters('AzureSearchIndex')]" }, - { - "name": "AZURE_SEARCH_KEY", - "value": "[parameters('AzureSearchKey')]" - }, { "name": "AZURE_SEARCH_USE_SEMANTIC_SEARCH", "value": "[parameters('AzureSearchUseSemanticSearch')]" @@ -2755,10 +2418,6 @@ "name": "AZURE_OPENAI_ENDPOINT", "value": "[parameters('AzureOpenAIEndpoint')]" }, - { - "name": "AZURE_OPENAI_KEY", - "value": "[parameters('AzureOpenAIKey')]" - }, { "name": "AZURE_OPENAI_TEMPERATURE", "value": "[parameters('AzureOpenAITemperature')]" @@ -2807,10 +2466,6 @@ "name": "AZURE_OPENAI_EMBEDDING_NAME", "value": "[parameters('AzureOpenAIEmbeddingName')]" }, - { - "name": "AZURE_OPENAI_EMBEDDING_KEY", - "value": "[parameters('AzureOpenAIEmbeddingkey')]" - }, { "name": "AZURE_OPENAI_EMBEDDING_ENDPOINT", "value": "[parameters('AzureOpenAIEmbeddingEndpoint')]" @@ -2823,14 +2478,6 @@ "name": "SQLDB_DATABASE", "value": "[parameters('SQLDB_DATABASE')]" }, - { - "name": "SQLDB_USERNAME", - "value": "[parameters('SQLDB_USERNAME')]" - }, - { - "name": "SQLDB_PASSWORD", - "value": "[parameters('SQLDB_PASSWORD')]" - }, { "name": "USE_INTERNAL_STREAM", "value": "[parameters('USE_INTERNAL_STREAM')]" @@ -2859,10 +2506,6 @@ "name": "AZURE_AI_SEARCH_ENDPOINT", "value": "[parameters('azureSearchServiceEndpoint')]" }, - { - "name": "SQLDB_CONNECTION_STRING", - "value": "TBD" - }, { "name": "AZURE_SQL_SYSTEM_PROMPT", "value": "[parameters('sqlSystemPrompt')]" @@ -2875,13 +2518,21 @@ "name": "AZURE_OPENAI_STREAM_TEXT_SYSTEM_PROMPT", "value": "[parameters('streamTextSystemPrompt')]" }, - { - "name": "AZURE_AI_PROJECT_CONN_STRING", - "value": "[parameters('aiProjectConnectionString')]" - }, { "name": "USE_AI_PROJECT_CLIENT", "value": "[parameters('useAIProjectClientFlag')]" + }, + { + "name": "AZURE_AI_AGENT_ENDPOINT", + "value": "[parameters('aiFoundryProjectEndpoint')]" + }, + { + "name": "AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME", + "value": "[parameters('AzureOpenAIModel')]" + }, + { + "name": "AZURE_AI_AGENT_API_VERSION", + "value": "[parameters('AzureOpenAIApiVersion')]" } ], "linuxFxVersion": "[variables('WebAppImageName')]" @@ -2894,11 +2545,26 @@ { "type": "Microsoft.Authorization/roleAssignments", "apiVersion": "2022-04-01", - "scope": "[format('Microsoft.MachineLearningServices/workspaces/{0}', parameters('aiProjectName'))]", - "name": "[guid(parameters('WebsiteName'), resourceId('Microsoft.MachineLearningServices/workspaces', parameters('aiProjectName')), resourceId('Microsoft.Authorization/roleDefinitions', '64702f94-c441-49e6-a78b-ef80e0188fee'))]", + "scope": "[format('Microsoft.CognitiveServices/accounts/{0}', parameters('aiFoundryName'))]", + "name": "[guid(resourceId('Microsoft.Web/sites', parameters('WebsiteName')), resourceId('Microsoft.CognitiveServices/accounts', parameters('aiFoundryName')), extensionResourceId(resourceId('Microsoft.CognitiveServices/accounts', parameters('aiFoundryName')), 'Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d'))]", + "properties": { + "roleDefinitionId": "[extensionResourceId(resourceId('Microsoft.CognitiveServices/accounts', parameters('aiFoundryName')), 'Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d')]", + "principalId": "[reference(resourceId('Microsoft.Web/sites', parameters('WebsiteName')), '2020-06-01', 'full').identity.principalId]", + "principalType": "ServicePrincipal" + }, + "dependsOn": [ + "[resourceId('Microsoft.Web/sites', parameters('WebsiteName'))]" + ] + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2022-04-01", + "scope": "[format('Microsoft.CognitiveServices/accounts/{0}/projects/{1}', parameters('aiFoundryName'), parameters('aiFoundryProjectName'))]", + "name": "[guid(resourceId('Microsoft.Web/sites', parameters('WebsiteName')), resourceId('Microsoft.CognitiveServices/accounts/projects', parameters('aiFoundryName'), parameters('aiFoundryProjectName')), extensionResourceId(resourceId('Microsoft.CognitiveServices/accounts/projects', parameters('aiFoundryName'), parameters('aiFoundryProjectName')), 'Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d'))]", "properties": { - "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', '64702f94-c441-49e6-a78b-ef80e0188fee')]", - "principalId": "[reference(resourceId('Microsoft.Web/sites', parameters('WebsiteName')), '2020-06-01', 'full').identity.principalId]" + "roleDefinitionId": "[extensionResourceId(resourceId('Microsoft.CognitiveServices/accounts/projects', parameters('aiFoundryName'), parameters('aiFoundryProjectName')), 'Microsoft.Authorization/roleDefinitions', '53ca6127-db72-4b80-b1b0-d745d6d5456d')]", + "principalId": "[reference(resourceId('Microsoft.Web/sites', parameters('WebsiteName')), '2020-06-01', 'full').identity.principalId]", + "principalType": "ServicePrincipal" }, "dependsOn": [ "[resourceId('Microsoft.Web/sites', parameters('WebsiteName'))]" @@ -2930,8 +2596,8 @@ "metadata": { "_generator": { "name": "bicep", - "version": "0.35.1.17967", - "templateHash": "629726085607478347" + "version": "0.36.1.42791", + "templateHash": "399023243105742355" }, "description": "Creates a SQL role assignment under an Azure Cosmos DB account." }, @@ -3022,6 +2688,14 @@ "MANAGEDINDENTITY_WEBAPP_CLIENTID": { "type": "string", "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_managed_identity'), '2022-09-01').outputs.managedIdentityWebAppOutput.value.clientId]" + }, + "AI_FOUNDARY_NAME": { + "type": "string", + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiFoundryName.value]" + }, + "AI_SEARCH_SERVICE_NAME": { + "type": "string", + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiSearchService.value]" } } } \ No newline at end of file diff --git a/infra/scripts/index_scripts/create_search_index.py b/infra/scripts/index_scripts/create_search_index.py index a9901b025..42316feff 100644 --- a/infra/scripts/index_scripts/create_search_index.py +++ b/infra/scripts/index_scripts/create_search_index.py @@ -1,148 +1,169 @@ -#Get Azure Key Vault Client -key_vault_name = 'kv_to-be-replaced' #'nc6262-kv-2fpeafsylfd2e' -managed_identity_client_id = 'mici_to-be-replaced' - -index_name = "transcripts_index" - -file_system_client_name = "data" -directory = 'clienttranscripts/meeting_transcripts' -csv_file_name = 'clienttranscripts/meeting_transcripts_metadata/transcripts_metadata.csv' - -from azure.keyvault.secrets import SecretClient -from azure.identity import DefaultAzureCredential - -def get_secrets_from_kv(kv_name, secret_name): - - # Set the name of the Azure Key Vault - key_vault_name = kv_name - credential = DefaultAzureCredential(managed_identity_client_id=managed_identity_client_id) - - # Create a secret client object using the credential and Key Vault name - secret_client = SecretClient(vault_url=f"https://{key_vault_name}.vault.azure.net/", credential=credential) - - # Retrieve the secret value - return(secret_client.get_secret(secret_name).value) - -search_endpoint = get_secrets_from_kv(key_vault_name,"AZURE-SEARCH-ENDPOINT") -search_key = get_secrets_from_kv(key_vault_name,"AZURE-SEARCH-KEY") - -# openai_api_type = get_secrets_from_kv(key_vault_name,"OPENAI-API-TYPE") -openai_api_key = get_secrets_from_kv(key_vault_name,"AZURE-OPENAI-KEY") -openai_api_base = get_secrets_from_kv(key_vault_name,"AZURE-OPENAI-ENDPOINT") -openai_api_version = get_secrets_from_kv(key_vault_name,"AZURE-OPENAI-PREVIEW-API-VERSION") - - -# Create the search index -from azure.core.credentials import AzureKeyCredential -search_credential = AzureKeyCredential(search_key) +import base64 +import json +import os +import re +import time +import pandas as pd +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.keyvault.secrets import SecretClient +from azure.search.documents import SearchClient from azure.search.documents.indexes import SearchIndexClient from azure.search.documents.indexes.models import ( - SimpleField, - SearchFieldDataType, + HnswAlgorithmConfiguration, SearchableField, SearchField, - VectorSearch, - HnswAlgorithmConfiguration, - VectorSearchProfile, + SearchFieldDataType, + SearchIndex, SemanticConfiguration, - SemanticPrioritizedFields, SemanticField, + SemanticPrioritizedFields, SemanticSearch, - SearchIndex + SimpleField, + VectorSearch, + VectorSearchProfile, +) +from azure.storage.filedatalake import ( + DataLakeDirectoryClient, + DataLakeServiceClient, + FileSystemClient, +) +from openai import AzureOpenAI + +# Get Azure Key Vault Client +key_vault_name = "kv_to-be-replaced" #'nc6262-kv-2fpeafsylfd2e' +managed_identity_client_id = "mici_to-be-replaced" + +index_name = "transcripts_index" + +file_system_client_name = "data" +directory = "clienttranscripts/meeting_transcripts" +csv_file_name = ( + "clienttranscripts/meeting_transcripts_metadata/transcripts_metadata.csv" +) + +credential = DefaultAzureCredential( + managed_identity_client_id=managed_identity_client_id +) +token_provider = get_bearer_token_provider( + credential, + "https://cognitiveservices.azure.com/.default" +) + +# Create a secret client object using the credential and Key Vault name +secret_client = SecretClient( + vault_url=f"https://{key_vault_name}.vault.azure.net/", credential=credential ) +search_endpoint = secret_client.get_secret("AZURE-SEARCH-ENDPOINT").value +openai_api_base = secret_client.get_secret("AZURE-OPENAI-ENDPOINT").value +openai_api_version = secret_client.get_secret("AZURE-OPENAI-PREVIEW-API-VERSION").value +openai_embedding_model = secret_client.get_secret("AZURE-OPENAI-EMBEDDING-MODEL").value +account_name = secret_client.get_secret("ADLS-ACCOUNT-NAME").value + # Create a search index -index_client = SearchIndexClient(endpoint=search_endpoint, credential=search_credential) +index_client = SearchIndexClient(endpoint=search_endpoint, credential=credential) fields = [ - SimpleField(name="id", type=SearchFieldDataType.String, key=True, sortable=True, filterable=True, facetable=True), + SimpleField( + name="id", + type=SearchFieldDataType.String, + key=True, + sortable=True, + filterable=True, + facetable=True, + ), SearchableField(name="chunk_id", type=SearchFieldDataType.String), SearchableField(name="content", type=SearchFieldDataType.String), SearchableField(name="sourceurl", type=SearchFieldDataType.String), - SearchableField(name="client_id", type=SearchFieldDataType.String,filterable=True), - SearchField(name="contentVector", type=SearchFieldDataType.Collection(SearchFieldDataType.Single), - searchable=True, vector_search_dimensions=1536, vector_search_profile_name="myHnswProfile"), + SearchableField(name="client_id", type=SearchFieldDataType.String, filterable=True), + SearchField( + name="contentVector", + type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + searchable=True, + vector_search_dimensions=1536, + vector_search_profile_name="myHnswProfile", + ), ] -# Configure the vector search configuration +# Configure the vector search configuration vector_search = VectorSearch( - algorithms=[ - HnswAlgorithmConfiguration( - name="myHnsw" - ) - ], + algorithms=[HnswAlgorithmConfiguration(name="myHnsw")], profiles=[ VectorSearchProfile( name="myHnswProfile", algorithm_configuration_name="myHnsw", ) - ] + ], ) semantic_config = SemanticConfiguration( name="my-semantic-config", prioritized_fields=SemanticPrioritizedFields( keywords_fields=[SemanticField(field_name="client_id")], - content_fields=[SemanticField(field_name="content")] - ) + content_fields=[SemanticField(field_name="content")], + ), ) # Create the semantic settings with the configuration semantic_search = SemanticSearch(configurations=[semantic_config]) # Create the search index with the semantic settings -index = SearchIndex(name=index_name, fields=fields, - vector_search=vector_search, semantic_search=semantic_search) +index = SearchIndex( + name=index_name, + fields=fields, + vector_search=vector_search, + semantic_search=semantic_search, +) result = index_client.create_or_update_index(index) -print(f' {result.name} created') +print(f" {result.name} created") -from openai import AzureOpenAI - # Function: Get Embeddings -def get_embeddings(text: str,openai_api_base,openai_api_version,openai_api_key): - model_id = "text-embedding-ada-002" +def get_embeddings(text: str, openai_api_base, openai_api_version, azure_token_provider): + model_id = ( + openai_embedding_model if openai_embedding_model else "text-embedding-ada-002" + ) client = AzureOpenAI( api_version=openai_api_version, azure_endpoint=openai_api_base, - api_key = openai_api_key + azure_ad_token_provider=azure_token_provider, ) - + embedding = client.embeddings.create(input=text, model=model_id).data[0].embedding return embedding -import re def clean_spaces_with_regex(text): # Use a regular expression to replace multiple spaces with a single space - cleaned_text = re.sub(r'\s+', ' ', text) + cleaned_text = re.sub(r"\s+", " ", text) # Use a regular expression to replace consecutive dots with a single dot - cleaned_text = re.sub(r'\.{2,}', '.', cleaned_text) + cleaned_text = re.sub(r"\.{2,}", ".", cleaned_text) return cleaned_text + def chunk_data(text): - tokens_per_chunk = 1024 #500 + tokens_per_chunk = 1024 # 500 text = clean_spaces_with_regex(text) SENTENCE_ENDINGS = [".", "!", "?"] - WORDS_BREAKS = ['\n', '\t', '}', '{', ']', '[', ')', '(', ' ', ':', ';', ','] + WORDS_BREAKS = ["\n", "\t", "}", "{", "]", "[", ")", "(", " ", ":", ";", ","] - sentences = text.split('. ') # Split text into sentences + sentences = text.split(". ") # Split text into sentences chunks = [] - current_chunk = '' + current_chunk = "" current_chunk_token_count = 0 - + # Iterate through each sentence for sentence in sentences: # Split sentence into tokens tokens = sentence.split() - + # Check if adding the current sentence exceeds tokens_per_chunk if current_chunk_token_count + len(tokens) <= tokens_per_chunk: # Add the sentence to the current chunk if current_chunk: - current_chunk += '. ' + sentence + current_chunk += ". " + sentence else: current_chunk += sentence current_chunk_token_count += len(tokens) @@ -151,21 +172,15 @@ def chunk_data(text): chunks.append(current_chunk) current_chunk = sentence current_chunk_token_count = len(tokens) - + # Add the last chunk if current_chunk: chunks.append(current_chunk) - + return chunks -#add documents to the index -import json -import base64 -import time -import pandas as pd -from azure.search.documents import SearchClient -import os +# add documents to the index # foldername = 'clienttranscripts' # path_name = f'Data/{foldername}/meeting_transcripts' @@ -173,40 +188,32 @@ def chunk_data(text): # paths = os.listdir(path_name) -from azure.storage.filedatalake import ( - DataLakeServiceClient, - DataLakeDirectoryClient, - FileSystemClient -) - -account_name = get_secrets_from_kv(key_vault_name, "ADLS-ACCOUNT-NAME") -credential = DefaultAzureCredential(managed_identity_client_id=managed_identity_client_id) account_url = f"https://{account_name}.dfs.core.windows.net" -service_client = DataLakeServiceClient(account_url, credential=credential,api_version='2023-01-03') +service_client = DataLakeServiceClient( + account_url, credential=credential, api_version="2023-01-03" +) -file_system_client = service_client.get_file_system_client(file_system_client_name) +file_system_client = service_client.get_file_system_client(file_system_client_name) directory_name = directory paths = file_system_client.get_paths(path=directory_name) print(paths) -search_credential = AzureKeyCredential(search_key) -search_client = SearchClient(search_endpoint, index_name, search_credential) -index_client = SearchIndexClient(endpoint=search_endpoint, credential=search_credential) +search_client = SearchClient(search_endpoint, index_name, credential) +index_client = SearchIndexClient(endpoint=search_endpoint, credential=credential) # metadata_filepath = f'Data/{foldername}/meeting_transcripts_metadata/transcripts_metadata.csv' # # df_metadata = spark.read.format("csv").option("header","true").option("multiLine", "true").option("quote", "\"").option("escape", "\"").load(metadata_filepath).toPandas() # df_metadata = pd.read_csv(metadata_filepath) # # display(df_metadata) -import pandas as pd # Read the CSV file into a Pandas DataFrame file_path = csv_file_name print(file_path) file_client = file_system_client.get_file_client(file_path) csv_file = file_client.download_file() -df_metadata = pd.read_csv(csv_file, encoding='utf-8') +df_metadata = pd.read_csv(csv_file, encoding="utf-8") docs = [] counter = 0 @@ -217,48 +224,58 @@ def chunk_data(text): file_client = file_system_client.get_file_client(path.name) data_file = file_client.download_file() data = json.load(data_file) - text = data['Content'] + text = data["Content"] - filename = path.name.split('/')[-1] - document_id = filename.replace('.json','').replace('convo_','') + filename = path.name.split("/")[-1] + document_id = filename.replace(".json", "").replace("convo_", "") # print(document_id) - df_file_metadata = df_metadata[df_metadata['ConversationId']==str(document_id)].iloc[0] - + df_file_metadata = df_metadata[ + df_metadata["ConversationId"] == str(document_id) + ].iloc[0] + chunks = chunk_data(text) chunk_num = 0 for chunk in chunks: chunk_num += 1 d = { - "chunk_id" : document_id + '_' + str(chunk_num).zfill(2), - "client_id": str(df_file_metadata['ClientId']), - "content": 'ClientId is ' + str(df_file_metadata['ClientId']) + ' . ' + chunk, - } + "chunk_id": document_id + "_" + str(chunk_num).zfill(2), + "client_id": str(df_file_metadata["ClientId"]), + "content": "ClientId is " + + str(df_file_metadata["ClientId"]) + + " . " + + chunk, + } counter += 1 try: - v_contentVector = get_embeddings(d["content"],openai_api_base,openai_api_version,openai_api_key) + v_contentVector = get_embeddings( + d["content"], openai_api_base, openai_api_version, token_provider + ) except: time.sleep(30) - v_contentVector = get_embeddings(d["content"],openai_api_base,openai_api_version,openai_api_key) - + v_contentVector = get_embeddings( + d["content"], openai_api_base, openai_api_version, token_provider + ) docs.append( { - "id": base64.urlsafe_b64encode(bytes(d["chunk_id"], encoding='utf-8')).decode('utf-8'), - "chunk_id": d["chunk_id"], - "client_id": d["client_id"], - "content": d["content"], - "sourceurl": path.name.split('/')[-1], - "contentVector": v_contentVector + "id": base64.urlsafe_b64encode( + bytes(d["chunk_id"], encoding="utf-8") + ).decode("utf-8"), + "chunk_id": d["chunk_id"], + "client_id": d["client_id"], + "content": d["content"], + "sourceurl": path.name.split("/")[-1], + "contentVector": v_contentVector, } ) - + if counter % 10 == 0: result = search_client.upload_documents(documents=docs) docs = [] - print(f' {str(counter)} uploaded') - -#upload the last batch + print(f" {str(counter)} uploaded") + +# upload the last batch if docs != []: - search_client.upload_documents(documents=docs) \ No newline at end of file + search_client.upload_documents(documents=docs) diff --git a/infra/scripts/index_scripts/create_sql_tables.py b/infra/scripts/index_scripts/create_sql_tables.py index 322023f71..be04dbc7a 100644 --- a/infra/scripts/index_scripts/create_sql_tables.py +++ b/infra/scripts/index_scripts/create_sql_tables.py @@ -1,38 +1,42 @@ -key_vault_name = 'kv_to-be-replaced' -managed_identity_client_id = 'mici_to-be-replaced' +key_vault_name = "kv_to-be-replaced" +managed_identity_client_id = "mici_to-be-replaced" -import pandas as pd import os +import struct from datetime import datetime -from azure.keyvault.secrets import SecretClient -from azure.identity import DefaultAzureCredential -from azure.identity import DefaultAzureCredential +import pandas as pd import pyodbc -import struct +from azure.identity import DefaultAzureCredential +from azure.keyvault.secrets import SecretClient + def get_secrets_from_kv(kv_name, secret_name): - key_vault_name = kv_name # Set the name of the Azure Key Vault - credential = DefaultAzureCredential(managed_identity_client_id=managed_identity_client_id) - secret_client = SecretClient(vault_url=f"https://{key_vault_name}.vault.azure.net/", credential=credential) # Create a secret client object using the credential and Key Vault name - return(secret_client.get_secret(secret_name).value) # Retrieve the secret value - -server = get_secrets_from_kv(key_vault_name,"SQLDB-SERVER") -database = get_secrets_from_kv(key_vault_name,"SQLDB-DATABASE") -username = get_secrets_from_kv(key_vault_name,"SQLDB-USERNAME") -password = get_secrets_from_kv(key_vault_name,"SQLDB-PASSWORD") + key_vault_name = kv_name # Set the name of the Azure Key Vault + credential = DefaultAzureCredential( + managed_identity_client_id=managed_identity_client_id + ) + secret_client = SecretClient( + vault_url=f"https://{key_vault_name}.vault.azure.net/", credential=credential + ) # Create a secret client object using the credential and Key Vault name + return secret_client.get_secret(secret_name).value # Retrieve the secret value + + +server = get_secrets_from_kv(key_vault_name, "SQLDB-SERVER") +database = get_secrets_from_kv(key_vault_name, "SQLDB-DATABASE") driver = "{ODBC Driver 18 for SQL Server}" -#conn = pymssql.connect(server, username, password, database) -credential = DefaultAzureCredential(managed_identity_client_id=managed_identity_client_id) +credential = DefaultAzureCredential( + managed_identity_client_id=managed_identity_client_id +) token_bytes = credential.get_token( - "https://database.windows.net/.default" - ).token.encode("utf-16-LE") + "https://database.windows.net/.default" +).token.encode("utf-16-LE") token_struct = struct.pack(f" " +if [ -z "$resourceGroupName" ] || [ -z "$cosmosDbAccountName" ] || [ -z "$storageAccount" ] || [ -z "$fileSystem" ] || [ -z "$keyvaultName" ] || [ -z "$sqlServerName" ] || [ -z "$SqlDatabaseName" ] || [ -z "$webAppManagedIdentityClientId" ] || [ -z "$webAppManagedIdentityDisplayName" ] || [ -z "$aiFoundryName" ] || [ -z "$aiSearchName" ]; then + echo "Usage: $0 " exit 1 fi @@ -75,7 +84,7 @@ echo "copy_kb_files.sh completed successfully." # Call run_create_index_scripts.sh echo "Running run_create_index_scripts.sh" -bash infra/scripts/run_create_index_scripts.sh "$keyvaultName" "" "" "$resourceGroupName" "$sqlServerName" +bash infra/scripts/run_create_index_scripts.sh "$keyvaultName" "" "" "$resourceGroupName" "$sqlServerName" "$aiFoundryName" "$aiSearchName" if [ $? -ne 0 ]; then echo "Error: run_create_index_scripts.sh failed." exit 1 diff --git a/infra/scripts/run_create_index_scripts.sh b/infra/scripts/run_create_index_scripts.sh index 0f7da6294..dbe33af00 100644 --- a/infra/scripts/run_create_index_scripts.sh +++ b/infra/scripts/run_create_index_scripts.sh @@ -7,6 +7,8 @@ baseUrl="$2" managedIdentityClientId="$3" resourceGroupName="$4" sqlServerName="$5" +aiFoundryName="$6" +aiSearchName="$7" echo "Script Started" @@ -41,6 +43,8 @@ else # echo "Getting signed in user id" # signed_user_id=$(az ad signed-in-user show --query id -o tsv) + ### Assign Key Vault Administrator role to the signed in user ### + echo "Getting key vault resource id" key_vault_resource_id=$(az keyvault show --name $keyvaultName --query id --output tsv) @@ -60,6 +64,50 @@ else echo "User already has the Key Vault Administrator role." fi + ### Assign Azure AI User role to the signed in user ### + + echo "Getting Azure AI resource id" + aif_resource_id=$(az cognitiveservices account show --name $aiFoundryName --resource-group $resourceGroupName --query id --output tsv) + + # Check if the user has the Azure AI User role + echo "Checking if user has the Azure AI User role" + role_assignment=$(MSYS_NO_PATHCONV=1 az role assignment list --role 53ca6127-db72-4b80-b1b0-d745d6d5456d --scope $aif_resource_id --assignee $signed_user_id --query "[].roleDefinitionId" -o tsv) + if [ -z "$role_assignment" ]; then + echo "User does not have the Azure AI User role. Assigning the role." + MSYS_NO_PATHCONV=1 az role assignment create --assignee $signed_user_id --role 53ca6127-db72-4b80-b1b0-d745d6d5456d --scope $aif_resource_id --output none + if [ $? -eq 0 ]; then + echo "Azure AI User role assigned successfully." + else + echo "Failed to assign Azure AI User role." + exit 1 + fi + else + echo "User already has the Azure AI User role." + fi + + ### Assign Search Index Data Contributor role to the signed in user ### + + echo "Getting Azure Search resource id" + search_resource_id=$(az search service show --name $aiSearchName --resource-group $resourceGroupName --query id --output tsv) + + # Check if the user has the Search Index Data Contributor role + echo "Checking if user has the Search Index Data Contributor role" + role_assignment=$(MSYS_NO_PATHCONV=1 az role assignment list --assignee $signed_user_id --role 8ebe5a00-799e-43f5-93ac-243d3dce84a7 --scope $search_resource_id --query "[].roleDefinitionId" -o tsv) + if [ -z "$role_assignment" ]; then + echo "User does not have the Search Index Data Contributor role. Assigning the role." + MSYS_NO_PATHCONV=1 az role assignment create --assignee $signed_user_id --role 8ebe5a00-799e-43f5-93ac-243d3dce84a7 --scope $search_resource_id --output none + if [ $? -eq 0 ]; then + echo "Search Index Data Contributor role assigned successfully." + else + echo "Failed to assign Search Index Data Contributor role." + exit 1 + fi + else + echo "User already has the Search Index Data Contributor role." + fi + + ### Assign signed in user as SQL Server Admin ### + echo "Getting Azure SQL Server resource id" sql_server_resource_id=$(az sql server show --name $sqlServerName --resource-group $resourceGroupName --query id --output tsv) diff --git a/src/App/.env.sample b/src/App/.env.sample index 7dc66e86e..0f69d5442 100644 --- a/src/App/.env.sample +++ b/src/App/.env.sample @@ -1,7 +1,6 @@ # Azure OpenAI settings AZURE_OPENAI_RESOURCE= AZURE_OPENAI_MODEL="gpt-4o-mini" -AZURE_OPENAI_KEY= AZURE_OPENAI_TEMPERATURE="0" AZURE_OPENAI_TOP_P="1" AZURE_OPENAI_MAX_TOKENS="1000" @@ -12,7 +11,6 @@ AZURE_OPENAI_STREAM="True" AZURE_OPENAI_ENDPOINT= AZURE_OPENAI_EMBEDDING_NAME="text-embedding-ada-002" AZURE_OPENAI_EMBEDDING_ENDPOINT= -AZURE_OPENAI_EMBEDDING_KEY= # User Interface UI_TITLE= @@ -31,7 +29,6 @@ AZURE_COSMOSDB_ENABLE_FEEDBACK="True" # Azure Search settings AZURE_SEARCH_SERVICE= AZURE_SEARCH_INDEX="transcripts_index" -AZURE_SEARCH_KEY= AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG="my-semantic-config" AZURE_SEARCH_TOP_K="5" AZURE_SEARCH_ENABLE_IN_DOMAIN="False" @@ -55,9 +52,13 @@ SQLDB_PASSWORD= SQLDB_USER_MID= # AI Project -AZURE_AI_PROJECT_CONN_STRING= USE_AI_PROJECT_CLIENT="false" +# Azure AI Agent settings +AZURE_AI_AGENT_API_VERSION= +AZURE_AI_AGENT_ENDPOINT= +AZURE_AI_AGENT_MODEL_DEPLOYMENT_NAME= + # Prompts AZURE_CALL_TRANSCRIPT_SYSTEM_PROMPT="You are an assistant who supports wealth advisors in preparing for client meetings. \n You have access to the client’s past meeting call transcripts. \n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \n If no data is available, state 'No relevant data found for previous meetings." AZURE_OPENAI_STREAM_TEXT_SYSTEM_PROMPT="You are a helpful assistant to a Wealth Advisor. \n The currently selected client's name is '{SelectedClientName}', and any case-insensitive or partial mention should be understood as referring to this client.\n If no name is provided, assume the question is about '{SelectedClientName}'.\n If the query references a different client or includes comparative terms like 'compare' or 'other client', please respond with: 'Please only ask questions about the selected client or select another client.'\n Otherwise, provide thorough answers using only data from SQL or call transcripts. \n If no data is found, please respond with 'No data found for that client.' Remove any client identifiers from the final response." diff --git a/src/App/app.py b/src/App/app.py index b1559eb25..6127e0268 100644 --- a/src/App/app.py +++ b/src/App/app.py @@ -7,68 +7,53 @@ from types import SimpleNamespace from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from dotenv import load_dotenv +from azure.monitor.opentelemetry import configure_azure_monitor # from quart.sessions import SecureCookieSessionInterface from openai import AsyncAzureOpenAI +from opentelemetry import trace +from opentelemetry.trace import Status, StatusCode from quart import ( Blueprint, Quart, + Response, jsonify, render_template, request, send_from_directory, - Response ) +from backend.agents.agent_factory import AgentFactory from backend.auth.auth_utils import get_authenticated_user_details, get_tenantid -from backend.history.cosmosdbservice import CosmosConversationClient -from backend.utils import ( +from backend.common.config import config +from backend.common.event_utils import track_event_if_configured +from backend.common.utils import ( format_stream_response, generateFilterString, - parse_multi_columns + parse_multi_columns, ) -from db import get_connection -from db import dict_cursor - -from backend.chat_logic_handler import stream_response_from_wealth_assistant -from backend.event_utils import track_event_if_configured -from azure.monitor.opentelemetry import configure_azure_monitor -from opentelemetry import trace -from opentelemetry.trace import Status, StatusCode +from backend.services import sqldb_service +from backend.services.chat_service import stream_response_from_wealth_assistant +from backend.services.cosmosdb_service import CosmosConversationClient bp = Blueprint("routes", __name__, static_folder="static", template_folder="static") -# Current minimum Azure OpenAI version supported -MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION = "2024-02-15-preview" - -load_dotenv() - # app = Flask(__name__) # CORS(app) - -# UI configuration (optional) -UI_TITLE = os.environ.get("UI_TITLE") or "Woodgrove Bank" -UI_LOGO = os.environ.get("UI_LOGO") -UI_CHAT_LOGO = os.environ.get("UI_CHAT_LOGO") -UI_CHAT_TITLE = os.environ.get("UI_CHAT_TITLE") or "Start chatting" -UI_CHAT_DESCRIPTION = ( - os.environ.get("UI_CHAT_DESCRIPTION") - or "This chatbot is configured to answer your questions" -) -UI_FAVICON = os.environ.get("UI_FAVICON") or "/favicon.ico" -UI_SHOW_SHARE_BUTTON = os.environ.get("UI_SHOW_SHARE_BUTTON", "true").lower() == "true" - # Check if the Application Insights Instrumentation Key is set in the environment variables -instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") +instrumentation_key = config.INSTRUMENTATION_KEY if instrumentation_key: # Configure Application Insights if the Instrumentation Key is found configure_azure_monitor(connection_string=instrumentation_key) - logging.info("Application Insights configured with the provided Instrumentation Key") + logging.info( + "Application Insights configured with the provided Instrumentation Key" + ) else: # Log a warning if the Instrumentation Key is not found - logging.warning("No Application Insights Instrumentation Key found. Skipping configuration") + logging.warning( + "No Application Insights Instrumentation Key found. Skipping configuration" + ) # Configure logging logging.basicConfig(level=logging.INFO) @@ -89,6 +74,19 @@ def create_app(): app = Quart(__name__) app.register_blueprint(bp) app.config["TEMPLATES_AUTO_RELOAD"] = True + + # Setup agent initialization and cleanup + @app.before_serving + async def startup(): + app.agent = await AgentFactory.get_instance() + logging.info("Agent initialized during application startup") + + @app.after_serving + async def shutdown(): + await AgentFactory.delete_instance() + app.agent = None + logging.info("Agent cleaned up during application shutdown") + # app.secret_key = secrets.token_hex(16) # app.session_interface = SecureCookieSessionInterface() return app @@ -96,7 +94,9 @@ def create_app(): @bp.route("/") async def index(): - return await render_template("index.html", title=UI_TITLE, favicon=UI_FAVICON) + return await render_template( + "index.html", title=config.UI_TITLE, favicon=config.UI_FAVICON + ) @bp.route("/favicon.ico") @@ -116,89 +116,19 @@ async def assets(path): USER_AGENT = "GitHubSampleWebApp/AsyncAzureOpenAI/1.0.0" -# On Your Data Settings -DATASOURCE_TYPE = os.environ.get("DATASOURCE_TYPE", "AzureCognitiveSearch") - -# ACS Integration Settings -AZURE_SEARCH_SERVICE = os.environ.get("AZURE_SEARCH_SERVICE") -AZURE_SEARCH_INDEX = os.environ.get("AZURE_SEARCH_INDEX") -AZURE_SEARCH_KEY = os.environ.get("AZURE_SEARCH_KEY", None) -AZURE_SEARCH_USE_SEMANTIC_SEARCH = os.environ.get( - "AZURE_SEARCH_USE_SEMANTIC_SEARCH", "false" -) -AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG = os.environ.get( - "AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG", "default" -) -AZURE_SEARCH_TOP_K = os.environ.get("AZURE_SEARCH_TOP_K", 5) -AZURE_SEARCH_ENABLE_IN_DOMAIN = os.environ.get( - "AZURE_SEARCH_ENABLE_IN_DOMAIN", "true" -) -AZURE_SEARCH_CONTENT_COLUMNS = os.environ.get("AZURE_SEARCH_CONTENT_COLUMNS") -AZURE_SEARCH_FILENAME_COLUMN = os.environ.get("AZURE_SEARCH_FILENAME_COLUMN") -AZURE_SEARCH_TITLE_COLUMN = os.environ.get("AZURE_SEARCH_TITLE_COLUMN") -AZURE_SEARCH_URL_COLUMN = os.environ.get("AZURE_SEARCH_URL_COLUMN") -AZURE_SEARCH_VECTOR_COLUMNS = os.environ.get("AZURE_SEARCH_VECTOR_COLUMNS") -AZURE_SEARCH_QUERY_TYPE = os.environ.get("AZURE_SEARCH_QUERY_TYPE") -AZURE_SEARCH_PERMITTED_GROUPS_COLUMN = os.environ.get( - "AZURE_SEARCH_PERMITTED_GROUPS_COLUMN" -) -AZURE_SEARCH_STRICTNESS = os.environ.get("AZURE_SEARCH_STRICTNESS", 3) - -# AOAI Integration Settings -AZURE_OPENAI_RESOURCE = os.environ.get("AZURE_OPENAI_RESOURCE") -AZURE_OPENAI_MODEL = os.environ.get("AZURE_OPENAI_MODEL") -AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT") -AZURE_OPENAI_KEY = os.environ.get("AZURE_OPENAI_KEY") -AZURE_OPENAI_TEMPERATURE = os.environ.get("AZURE_OPENAI_TEMPERATURE", 0) -AZURE_OPENAI_TOP_P = os.environ.get("AZURE_OPENAI_TOP_P", 1.0) -AZURE_OPENAI_MAX_TOKENS = os.environ.get("AZURE_OPENAI_MAX_TOKENS", 1000) -AZURE_OPENAI_STOP_SEQUENCE = os.environ.get("AZURE_OPENAI_STOP_SEQUENCE") -AZURE_OPENAI_SYSTEM_MESSAGE = os.environ.get( - "AZURE_OPENAI_SYSTEM_MESSAGE", - "You are an AI assistant that helps people find information.", -) -AZURE_OPENAI_PREVIEW_API_VERSION = os.environ.get( - "AZURE_OPENAI_PREVIEW_API_VERSION", - MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION, -) -AZURE_OPENAI_STREAM = os.environ.get("AZURE_OPENAI_STREAM", "true") -AZURE_OPENAI_EMBEDDING_ENDPOINT = os.environ.get("AZURE_OPENAI_EMBEDDING_ENDPOINT") -AZURE_OPENAI_EMBEDDING_KEY = os.environ.get("AZURE_OPENAI_EMBEDDING_KEY") -AZURE_OPENAI_EMBEDDING_NAME = os.environ.get("AZURE_OPENAI_EMBEDDING_NAME", "") - -SHOULD_STREAM = True if AZURE_OPENAI_STREAM.lower() == "true" else False - -# Chat History CosmosDB Integration Settings -AZURE_COSMOSDB_DATABASE = os.environ.get("AZURE_COSMOSDB_DATABASE") -AZURE_COSMOSDB_ACCOUNT = os.environ.get("AZURE_COSMOSDB_ACCOUNT") -AZURE_COSMOSDB_CONVERSATIONS_CONTAINER = os.environ.get( - "AZURE_COSMOSDB_CONVERSATIONS_CONTAINER" -) -AZURE_COSMOSDB_ACCOUNT_KEY = os.environ.get("AZURE_COSMOSDB_ACCOUNT_KEY") -AZURE_COSMOSDB_ENABLE_FEEDBACK = ( - os.environ.get("AZURE_COSMOSDB_ENABLE_FEEDBACK", "false").lower() == "true" -) -USE_INTERNAL_STREAM = os.environ.get("USE_INTERNAL_STREAM", "false").lower() == "true" -# Frontend Settings via Environment Variables -AUTH_ENABLED = os.environ.get("AUTH_ENABLED", "true").lower() == "true" -CHAT_HISTORY_ENABLED = ( - AZURE_COSMOSDB_ACCOUNT - and AZURE_COSMOSDB_DATABASE - and AZURE_COSMOSDB_CONVERSATIONS_CONTAINER -) -SANITIZE_ANSWER = os.environ.get("SANITIZE_ANSWER", "false").lower() == "true" frontend_settings = { - "auth_enabled": AUTH_ENABLED, - "feedback_enabled": AZURE_COSMOSDB_ENABLE_FEEDBACK and CHAT_HISTORY_ENABLED, + "auth_enabled": config.AUTH_ENABLED, + "feedback_enabled": config.AZURE_COSMOSDB_ENABLE_FEEDBACK + and config.CHAT_HISTORY_ENABLED, "ui": { - "title": UI_TITLE, - "logo": UI_LOGO, - "chat_logo": UI_CHAT_LOGO or UI_LOGO, - "chat_title": UI_CHAT_TITLE, - "chat_description": UI_CHAT_DESCRIPTION, - "show_share_button": UI_SHOW_SHARE_BUTTON, + "title": config.UI_TITLE, + "logo": config.UI_LOGO, + "chat_logo": config.UI_CHAT_LOGO or config.UI_LOGO, + "chat_title": config.UI_CHAT_TITLE, + "chat_description": config.UI_CHAT_DESCRIPTION, + "show_share_button": config.UI_SHOW_SHARE_BUTTON, }, - "sanitize_answer": SANITIZE_ANSWER, + "sanitize_answer": config.SANITIZE_ANSWER, } # Enable Microsoft Defender for Cloud Integration MS_DEFENDER_ENABLED = os.environ.get("MS_DEFENDER_ENABLED", "false").lower() == "true" @@ -208,7 +138,7 @@ async def assets(path): def should_use_data(): global DATASOURCE_TYPE - if AZURE_SEARCH_SERVICE and AZURE_SEARCH_INDEX: + if config.AZURE_SEARCH_SERVICE and config.AZURE_SEARCH_INDEX: DATASOURCE_TYPE = "AzureCognitiveSearch" logging.debug("Using Azure Cognitive Search") return True @@ -225,27 +155,27 @@ def init_openai_client(use_data=SHOULD_USE_DATA): try: # API version check if ( - AZURE_OPENAI_PREVIEW_API_VERSION - < MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION + config.AZURE_OPENAI_PREVIEW_API_VERSION + < config.MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION ): raise Exception( - f"The minimum supported Azure OpenAI preview API version is '{MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION}'" + f"The minimum supported Azure OpenAI preview API version is '{config.MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION}'" ) # Endpoint - if not AZURE_OPENAI_ENDPOINT and not AZURE_OPENAI_RESOURCE: + if not config.AZURE_OPENAI_ENDPOINT and not config.AZURE_OPENAI_RESOURCE: raise Exception( "AZURE_OPENAI_ENDPOINT or AZURE_OPENAI_RESOURCE is required" ) endpoint = ( - AZURE_OPENAI_ENDPOINT - if AZURE_OPENAI_ENDPOINT - else f"https://{AZURE_OPENAI_RESOURCE}.openai.azure.com/" + config.AZURE_OPENAI_ENDPOINT + if config.AZURE_OPENAI_ENDPOINT + else f"https://{config.AZURE_OPENAI_RESOURCE}.openai.azure.com/" ) # Authentication - aoai_api_key = AZURE_OPENAI_KEY + aoai_api_key = config.AZURE_OPENAI_KEY ad_token_provider = None if not aoai_api_key: logging.debug("No AZURE_OPENAI_KEY found, using Azure AD auth") @@ -254,7 +184,7 @@ def init_openai_client(use_data=SHOULD_USE_DATA): ) # Deployment - deployment = AZURE_OPENAI_MODEL + deployment = config.AZURE_OPENAI_MODEL if not deployment: raise Exception("AZURE_OPENAI_MODEL is required") @@ -262,18 +192,21 @@ def init_openai_client(use_data=SHOULD_USE_DATA): default_headers = {"x-ms-useragent": USER_AGENT} azure_openai_client = AsyncAzureOpenAI( - api_version=AZURE_OPENAI_PREVIEW_API_VERSION, + api_version=config.AZURE_OPENAI_PREVIEW_API_VERSION, api_key=aoai_api_key, azure_ad_token_provider=ad_token_provider, default_headers=default_headers, azure_endpoint=endpoint, ) - track_event_if_configured("AzureOpenAIClientInitialized", { - "status": "success", - "endpoint": endpoint, - "use_api_key": bool(aoai_api_key), - }) + track_event_if_configured( + "AzureOpenAIClientInitialized", + { + "status": "success", + "endpoint": endpoint, + "use_api_key": bool(aoai_api_key), + }, + ) return azure_openai_client except Exception as e: @@ -288,32 +221,35 @@ def init_openai_client(use_data=SHOULD_USE_DATA): def init_cosmosdb_client(): cosmos_conversation_client = None - if CHAT_HISTORY_ENABLED: + if config.CHAT_HISTORY_ENABLED: try: cosmos_endpoint = ( - f"https://{AZURE_COSMOSDB_ACCOUNT}.documents.azure.com:443/" + f"https://{config.AZURE_COSMOSDB_ACCOUNT}.documents.azure.com:443/" ) - if not AZURE_COSMOSDB_ACCOUNT_KEY: + if not config.AZURE_COSMOSDB_ACCOUNT_KEY: credential = DefaultAzureCredential() else: - credential = AZURE_COSMOSDB_ACCOUNT_KEY + credential = config.AZURE_COSMOSDB_ACCOUNT_KEY cosmos_conversation_client = CosmosConversationClient( cosmosdb_endpoint=cosmos_endpoint, credential=credential, - database_name=AZURE_COSMOSDB_DATABASE, - container_name=AZURE_COSMOSDB_CONVERSATIONS_CONTAINER, - enable_message_feedback=AZURE_COSMOSDB_ENABLE_FEEDBACK, + database_name=config.AZURE_COSMOSDB_DATABASE, + container_name=config.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER, + enable_message_feedback=config.AZURE_COSMOSDB_ENABLE_FEEDBACK, ) - track_event_if_configured("CosmosDBClientInitialized", { - "status": "success", - "endpoint": cosmos_endpoint, - "database": AZURE_COSMOSDB_DATABASE, - "container": AZURE_COSMOSDB_CONVERSATIONS_CONTAINER, - "feedback_enabled": AZURE_COSMOSDB_ENABLE_FEEDBACK, - }) + track_event_if_configured( + "CosmosDBClientInitialized", + { + "status": "success", + "endpoint": cosmos_endpoint, + "database": config.AZURE_COSMOSDB_DATABASE, + "container": config.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER, + "feedback_enabled": config.AZURE_COSMOSDB_ENABLE_FEEDBACK, + }, + ) except Exception as e: logging.exception("Exception in CosmosDB initialization", e) span = trace.get_current_span() @@ -332,13 +268,15 @@ def get_configured_data_source(): data_source = {} query_type = "simple" if DATASOURCE_TYPE == "AzureCognitiveSearch": - track_event_if_configured("datasource_selected", {"type": "AzureCognitiveSearch"}) + track_event_if_configured( + "datasource_selected", {"type": "AzureCognitiveSearch"} + ) # Set query type - if AZURE_SEARCH_QUERY_TYPE: - query_type = AZURE_SEARCH_QUERY_TYPE + if config.AZURE_SEARCH_QUERY_TYPE: + query_type = config.AZURE_SEARCH_QUERY_TYPE elif ( - AZURE_SEARCH_USE_SEMANTIC_SEARCH.lower() == "true" - and AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG + config.AZURE_SEARCH_USE_SEMANTIC_SEARCH.lower() == "true" + and config.AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG ): query_type = "semantic" track_event_if_configured("query_type_determined", {"query_type": query_type}) @@ -346,7 +284,7 @@ def get_configured_data_source(): # Set filter filter = None userToken = None - if AZURE_SEARCH_PERMITTED_GROUPS_COLUMN: + if config.AZURE_SEARCH_PERMITTED_GROUPS_COLUMN: userToken = request.headers.get("X-MS-TOKEN-AAD-ACCESS-TOKEN", "") logging.debug(f"USER TOKEN is {'present' if userToken else 'not present'}") if not userToken: @@ -361,59 +299,63 @@ def get_configured_data_source(): # Set authentication authentication = {} - if AZURE_SEARCH_KEY: - authentication = {"type": "api_key", "api_key": AZURE_SEARCH_KEY} + if config.AZURE_SEARCH_KEY: + authentication = {"type": "api_key", "api_key": config.AZURE_SEARCH_KEY} else: # If key is not provided, assume AOAI resource identity has been granted access to the search service authentication = {"type": "system_assigned_managed_identity"} - track_event_if_configured("authentication_set", {"auth_type": authentication["type"]}) + track_event_if_configured( + "authentication_set", {"auth_type": authentication["type"]} + ) data_source = { "type": "azure_search", "parameters": { - "endpoint": f"https://{AZURE_SEARCH_SERVICE}.search.windows.net", + "endpoint": f"https://{config.AZURE_SEARCH_SERVICE}.search.windows.net", "authentication": authentication, - "index_name": AZURE_SEARCH_INDEX, + "index_name": config.AZURE_SEARCH_INDEX, "fields_mapping": { "content_fields": ( - parse_multi_columns(AZURE_SEARCH_CONTENT_COLUMNS) - if AZURE_SEARCH_CONTENT_COLUMNS + parse_multi_columns(config.AZURE_SEARCH_CONTENT_COLUMNS) + if config.AZURE_SEARCH_CONTENT_COLUMNS else [] ), "title_field": ( - AZURE_SEARCH_TITLE_COLUMN if AZURE_SEARCH_TITLE_COLUMN else None + config.AZURE_SEARCH_TITLE_COLUMN + if config.AZURE_SEARCH_TITLE_COLUMN + else None ), "url_field": ( - AZURE_SEARCH_URL_COLUMN if AZURE_SEARCH_URL_COLUMN else None + config.AZURE_SEARCH_URL_COLUMN + if config.AZURE_SEARCH_URL_COLUMN + else None ), "filepath_field": ( - AZURE_SEARCH_FILENAME_COLUMN - if AZURE_SEARCH_FILENAME_COLUMN + config.AZURE_SEARCH_FILENAME_COLUMN + if config.AZURE_SEARCH_FILENAME_COLUMN else None ), "vector_fields": ( - parse_multi_columns(AZURE_SEARCH_VECTOR_COLUMNS) - if AZURE_SEARCH_VECTOR_COLUMNS + parse_multi_columns(config.AZURE_SEARCH_VECTOR_COLUMNS) + if config.AZURE_SEARCH_VECTOR_COLUMNS else [] ), }, "in_scope": ( - True if AZURE_SEARCH_ENABLE_IN_DOMAIN.lower() == "true" else False - ), - "top_n_documents": ( - int(AZURE_SEARCH_TOP_K) + True + if config.AZURE_SEARCH_ENABLE_IN_DOMAIN.lower() == "true" + else False ), + "top_n_documents": (int(config.AZURE_SEARCH_TOP_K)), "query_type": query_type, "semantic_configuration": ( - AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG - if AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG + config.AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG + if config.AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG else "" ), - "role_information": AZURE_OPENAI_SYSTEM_MESSAGE, + "role_information": config.AZURE_OPENAI_SYSTEM_MESSAGE, "filter": filter, - "strictness": ( - int(AZURE_SEARCH_STRICTNESS) - ), + "strictness": (int(config.AZURE_SEARCH_STRICTNESS)), }, } else: @@ -424,36 +366,39 @@ def get_configured_data_source(): if "vector" in query_type.lower() and DATASOURCE_TYPE != "AzureMLIndex": embeddingDependency = {} - if AZURE_OPENAI_EMBEDDING_NAME: + if config.AZURE_OPENAI_EMBEDDING_NAME: embeddingDependency = { "type": "deployment_name", - "deployment_name": AZURE_OPENAI_EMBEDDING_NAME, + "deployment_name": config.AZURE_OPENAI_EMBEDDING_NAME, } - elif AZURE_OPENAI_EMBEDDING_ENDPOINT and AZURE_OPENAI_EMBEDDING_KEY: + elif ( + config.AZURE_OPENAI_EMBEDDING_ENDPOINT and config.AZURE_OPENAI_EMBEDDING_KEY + ): embeddingDependency = { "type": "endpoint", - "endpoint": AZURE_OPENAI_EMBEDDING_ENDPOINT, + "endpoint": config.AZURE_OPENAI_EMBEDDING_ENDPOINT, "authentication": { "type": "api_key", - "key": AZURE_OPENAI_EMBEDDING_KEY, + "key": config.AZURE_OPENAI_EMBEDDING_KEY, }, } else: - track_event_if_configured("embedding_dependency_missing", { - "datasource_type": DATASOURCE_TYPE, - "query_type": query_type - }) + track_event_if_configured( + "embedding_dependency_missing", + {"datasource_type": DATASOURCE_TYPE, "query_type": query_type}, + ) raise Exception( f"Vector query type ({query_type}) is selected for data source type {DATASOURCE_TYPE} but no embedding dependency is configured" ) - track_event_if_configured("embedding_dependency_set", { - "embedding_type": embeddingDependency.get("type") - }) + track_event_if_configured( + "embedding_dependency_set", + {"embedding_type": embeddingDependency.get("type")}, + ) data_source["parameters"]["embedding_dependency"] = embeddingDependency - track_event_if_configured("get_configured_data_source_complete", { - "datasource_type": DATASOURCE_TYPE, - "query_type": query_type - }) + track_event_if_configured( + "get_configured_data_source_complete", + {"datasource_type": DATASOURCE_TYPE, "query_type": query_type}, + ) return data_source @@ -462,7 +407,7 @@ def prepare_model_args(request_body, request_headers): request_messages = request_body.get("messages", []) messages = [] if not SHOULD_USE_DATA: - messages = [{"role": "system", "content": AZURE_OPENAI_SYSTEM_MESSAGE}] + messages = [{"role": "system", "content": config.AZURE_OPENAI_SYSTEM_MESSAGE}] for message in request_messages: if message: @@ -483,25 +428,29 @@ def prepare_model_args(request_body, request_headers): ), } user_json = json.dumps(user_args) - track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]}) + track_event_if_configured( + "ms_defender_user_info_added", {"user_id": user_args["EndUserId"]} + ) model_args = { "messages": messages, - "temperature": float(AZURE_OPENAI_TEMPERATURE), - "max_tokens": int(AZURE_OPENAI_MAX_TOKENS), - "top_p": float(AZURE_OPENAI_TOP_P), + "temperature": float(config.AZURE_OPENAI_TEMPERATURE), + "max_tokens": int(config.AZURE_OPENAI_MAX_TOKENS), + "top_p": float(config.AZURE_OPENAI_TOP_P), "stop": ( - parse_multi_columns(AZURE_OPENAI_STOP_SEQUENCE) - if AZURE_OPENAI_STOP_SEQUENCE + parse_multi_columns(config.AZURE_OPENAI_STOP_SEQUENCE) + if config.AZURE_OPENAI_STOP_SEQUENCE else None ), - "stream": SHOULD_STREAM, - "model": AZURE_OPENAI_MODEL, + "stream": config.SHOULD_STREAM, + "model": config.AZURE_OPENAI_MODEL, "user": user_json, } - if SHOULD_USE_DATA: - track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]}) + if config.SHOULD_USE_DATA: + track_event_if_configured( + "ms_defender_user_info_added", {"user_id": user_args["EndUserId"]} + ) model_args["extra_body"] = {"data_sources": [get_configured_data_source()]} model_args_clean = copy.deepcopy(model_args) @@ -539,7 +488,9 @@ def prepare_model_args(request_body, request_headers): ]["authentication"][field] = "*****" logging.debug(f"REQUEST BODY: {json.dumps(model_args_clean, indent=4)}") - track_event_if_configured("prepare_model_args_complete", {"model": AZURE_OPENAI_MODEL}) + track_event_if_configured( + "prepare_model_args_complete", {"model": config.AZURE_OPENAI_MODEL} + ) return model_args @@ -565,7 +516,9 @@ async def send_chat_request(request_body, request_headers): response = raw_response.parse() apim_request_id = raw_response.headers.get("apim-request-id") - track_event_if_configured("send_chat_request_success", {"model": model_args.get("model")}) + track_event_if_configured( + "send_chat_request_success", {"model": model_args.get("model")} + ) except Exception as e: span = trace.get_current_span() if span is not None: @@ -578,7 +531,7 @@ async def send_chat_request(request_body, request_headers): async def stream_chat_request(request_body, request_headers): track_event_if_configured("stream_chat_request_start", {}) - if USE_INTERNAL_STREAM: + if config.USE_INTERNAL_STREAM: history_metadata = request_body.get("history_metadata", {}) apim_request_id = "" @@ -597,11 +550,11 @@ async def generate(): async for chunk in sk_response(): deltaText = "" - deltaText = chunk + deltaText = chunk.content completionChunk = { "id": chunk_id, - "model": AZURE_OPENAI_MODEL, + "model": config.AZURE_OPENAI_MODEL, "created": created_time, "object": "extensions.chat.completion.chunk", "choices": [ @@ -641,16 +594,20 @@ async def generate(): completionChunk, history_metadata, apim_request_id ) track_event_if_configured("stream_openai_selected", {}) + return generate() async def conversation_internal(request_body, request_headers): - track_event_if_configured("conversation_internal_start", { - "streaming": SHOULD_STREAM, - "internal_stream": USE_INTERNAL_STREAM - }) + track_event_if_configured( + "conversation_internal_start", + { + "streaming": config.SHOULD_STREAM, + "internal_stream": config.USE_INTERNAL_STREAM, + }, + ) try: - if SHOULD_STREAM: + if config.SHOULD_STREAM: return await stream_chat_request(request_body, request_headers) # response = await make_response(format_as_ndjson(result)) # response.timeout = None @@ -697,10 +654,7 @@ def get_frontend_settings(): async def add_conversation(): authenticated_user = get_authenticated_user_details(request_headers=request.headers) user_id = authenticated_user["user_principal_id"] - track_event_if_configured( - "HistoryGenerate_Start", - {"user_id": user_id} - ) + track_event_if_configured("HistoryGenerate_Start", {"user_id": user_id}) # check request for conversation_id request_json = await request.get_json() @@ -728,8 +682,8 @@ async def add_conversation(): { "user_id": user_id, "conversation_id": conversation_id, - "title": title - } + "title": title, + }, ) # Format the incoming message object in the "chat/completions" messages format @@ -754,7 +708,7 @@ async def add_conversation(): "user_id": user_id, "conversation_id": conversation_id, "message": messages[-1], - } + }, ) else: raise Exception("No user message found") @@ -767,18 +721,12 @@ async def add_conversation(): request_body["history_metadata"] = history_metadata track_event_if_configured( "SendingToChatCompletions", - { - "user_id": user_id, - "conversation_id": conversation_id - } + {"user_id": user_id, "conversation_id": conversation_id}, ) track_event_if_configured( "HistoryGenerate_Completed", - { - "user_id": user_id, - "conversation_id": conversation_id - } + {"user_id": user_id, "conversation_id": conversation_id}, ) return await conversation_internal(request_body, request.headers) @@ -800,10 +748,10 @@ async def update_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) - track_event_if_configured("UpdateConversation_Start", { - "user_id": user_id, - "conversation_id": conversation_id - }) + track_event_if_configured( + "UpdateConversation_Start", + {"user_id": user_id, "conversation_id": conversation_id}, + ) try: # make sure cosmos is configured @@ -827,10 +775,10 @@ async def update_conversation(): user_id=user_id, input_message=messages[-2], ) - track_event_if_configured("ToolMessageStored", { - "user_id": user_id, - "conversation_id": conversation_id - }) + track_event_if_configured( + "ToolMessageStored", + {"user_id": user_id, "conversation_id": conversation_id}, + ) # write the assistant message await cosmos_conversation_client.create_message( uuid=messages[-1]["id"], @@ -838,19 +786,22 @@ async def update_conversation(): user_id=user_id, input_message=messages[-1], ) - track_event_if_configured("AssistantMessageStored", { - "user_id": user_id, - "conversation_id": conversation_id, - "message": messages[-1] - }) + track_event_if_configured( + "AssistantMessageStored", + { + "user_id": user_id, + "conversation_id": conversation_id, + "message": messages[-1], + }, + ) else: raise Exception("No bot messages found") # Submit request to Chat Completions for response await cosmos_conversation_client.cosmosdb_client.close() - track_event_if_configured("UpdateConversation_Success", { - "user_id": user_id, - "conversation_id": conversation_id - }) + track_event_if_configured( + "UpdateConversation_Success", + {"user_id": user_id, "conversation_id": conversation_id}, + ) response = {"success": True} return jsonify(response), 200 @@ -874,10 +825,9 @@ async def update_message(): message_id = request_json.get("message_id", None) message_feedback = request_json.get("message_feedback", None) - track_event_if_configured("MessageFeedback_Start", { - "user_id": user_id, - "message_id": message_id - }) + track_event_if_configured( + "MessageFeedback_Start", {"user_id": user_id, "message_id": message_id} + ) try: if not message_id: return jsonify({"error": "message_id is required"}), 400 @@ -890,11 +840,14 @@ async def update_message(): user_id, message_id, message_feedback ) if updated_message: - track_event_if_configured("MessageFeedback_Updated", { - "user_id": user_id, - "message_id": message_id, - "feedback": message_feedback - }) + track_event_if_configured( + "MessageFeedback_Updated", + { + "user_id": user_id, + "message_id": message_id, + "feedback": message_feedback, + }, + ) return ( jsonify( { @@ -905,10 +858,10 @@ async def update_message(): 200, ) else: - track_event_if_configured("MessageFeedback_NotFound", { - "user_id": user_id, - "message_id": message_id - }) + track_event_if_configured( + "MessageFeedback_NotFound", + {"user_id": user_id, "message_id": message_id}, + ) return ( jsonify( { @@ -937,10 +890,10 @@ async def delete_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) - track_event_if_configured("DeleteConversation_Start", { - "user_id": user_id, - "conversation_id": conversation_id - }) + track_event_if_configured( + "DeleteConversation_Start", + {"user_id": user_id, "conversation_id": conversation_id}, + ) try: if not conversation_id: @@ -959,10 +912,10 @@ async def delete_conversation(): await cosmos_conversation_client.cosmosdb_client.close() - track_event_if_configured("DeleteConversation_Success", { - "user_id": user_id, - "conversation_id": conversation_id - }) + track_event_if_configured( + "DeleteConversation_Success", + {"user_id": user_id, "conversation_id": conversation_id}, + ) return ( jsonify( @@ -988,10 +941,9 @@ async def list_conversations(): authenticated_user = get_authenticated_user_details(request_headers=request.headers) user_id = authenticated_user["user_principal_id"] - track_event_if_configured("ListConversations_Start", { - "user_id": user_id, - "offset": offset - }) + track_event_if_configured( + "ListConversations_Start", {"user_id": user_id, "offset": offset} + ) # make sure cosmos is configured cosmos_conversation_client = init_cosmosdb_client() @@ -1004,18 +956,17 @@ async def list_conversations(): ) await cosmos_conversation_client.cosmosdb_client.close() if not isinstance(conversations, list): - track_event_if_configured("ListConversations_Empty", { - "user_id": user_id, - "offset": offset - }) + track_event_if_configured( + "ListConversations_Empty", {"user_id": user_id, "offset": offset} + ) return jsonify({"error": f"No conversations for {user_id} were found"}), 404 # return the conversation ids - track_event_if_configured("ListConversations_Success", { - "user_id": user_id, - "conversation_count": len(conversations) - }) + track_event_if_configured( + "ListConversations_Success", + {"user_id": user_id, "conversation_count": len(conversations)}, + ) return jsonify(conversations), 200 @@ -1029,17 +980,23 @@ async def get_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) - track_event_if_configured("GetConversation_Start", { - "user_id": user_id, - "conversation_id": conversation_id, - }) - - if not conversation_id: - track_event_if_configured("GetConversation_Failed", { + track_event_if_configured( + "GetConversation_Start", + { "user_id": user_id, "conversation_id": conversation_id, - "error": f"Conversation {conversation_id} not found", - }) + }, + ) + + if not conversation_id: + track_event_if_configured( + "GetConversation_Failed", + { + "user_id": user_id, + "conversation_id": conversation_id, + "error": f"Conversation {conversation_id} not found", + }, + ) return jsonify({"error": "conversation_id is required"}), 400 # make sure cosmos is configured @@ -1080,11 +1037,14 @@ async def get_conversation(): ] await cosmos_conversation_client.cosmosdb_client.close() - track_event_if_configured("GetConversation_Success", { - "user_id": user_id, - "conversation_id": conversation_id, - "message_count": len(messages) - }) + track_event_if_configured( + "GetConversation_Success", + { + "user_id": user_id, + "conversation_id": conversation_id, + "message_count": len(messages), + }, + ) return jsonify({"conversation_id": conversation_id, "messages": messages}), 200 @@ -1097,17 +1057,20 @@ async def rename_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) - track_event_if_configured("RenameConversation_Start", { - "user_id": user_id, - "conversation_id": conversation_id - }) + track_event_if_configured( + "RenameConversation_Start", + {"user_id": user_id, "conversation_id": conversation_id}, + ) if not conversation_id: - track_event_if_configured("RenameConversation_Failed", { - "user_id": user_id, - "conversation_id": conversation_id, - "error": f"Conversation {conversation_id} not found", - }) + track_event_if_configured( + "RenameConversation_Failed", + { + "user_id": user_id, + "conversation_id": conversation_id, + "error": f"Conversation {conversation_id} not found", + }, + ) return jsonify({"error": "conversation_id is required"}), 400 # make sure cosmos is configured @@ -1140,11 +1103,10 @@ async def rename_conversation(): await cosmos_conversation_client.cosmosdb_client.close() - track_event_if_configured("RenameConversation_Success", { - "user_id": user_id, - "conversation_id": conversation_id, - "new_title": title - }) + track_event_if_configured( + "RenameConversation_Success", + {"user_id": user_id, "conversation_id": conversation_id, "new_title": title}, + ) return jsonify(updated_conversation), 200 @@ -1154,9 +1116,7 @@ async def delete_all_conversations(): authenticated_user = get_authenticated_user_details(request_headers=request.headers) user_id = authenticated_user["user_principal_id"] - track_event_if_configured("DeleteAllConversations_Start", { - "user_id": user_id - }) + track_event_if_configured("DeleteAllConversations_Start", {"user_id": user_id}) # get conversations for user try: @@ -1169,9 +1129,12 @@ async def delete_all_conversations(): user_id, offset=0, limit=None ) if not conversations: - track_event_if_configured("DeleteAllConversations_Empty", { - "user_id": user_id, - }) + track_event_if_configured( + "DeleteAllConversations_Empty", + { + "user_id": user_id, + }, + ) return jsonify({"error": f"No conversations for {user_id} were found"}), 404 # delete each conversation @@ -1187,10 +1150,10 @@ async def delete_all_conversations(): ) await cosmos_conversation_client.cosmosdb_client.close() - track_event_if_configured("DeleteAllConversations_Success", { - "user_id": user_id, - "conversation_count": len(conversations) - }) + track_event_if_configured( + "DeleteAllConversations_Success", + {"user_id": user_id, "conversation_count": len(conversations)}, + ) return ( jsonify( @@ -1220,18 +1183,24 @@ async def clear_messages(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) - track_event_if_configured("ClearConversationMessages_Start", { - "user_id": user_id, - "conversation_id": conversation_id, - }) + track_event_if_configured( + "ClearConversationMessages_Start", + { + "user_id": user_id, + "conversation_id": conversation_id, + }, + ) try: if not conversation_id: - track_event_if_configured("ClearConversationMessages_Failed", { - "user_id": user_id, - "conversation_id": conversation_id, - "error": "conversation_id is required" - }) + track_event_if_configured( + "ClearConversationMessages_Failed", + { + "user_id": user_id, + "conversation_id": conversation_id, + "error": "conversation_id is required", + }, + ) return jsonify({"error": "conversation_id is required"}), 400 # make sure cosmos is configured @@ -1242,10 +1211,10 @@ async def clear_messages(): # delete the conversation messages from cosmos await cosmos_conversation_client.delete_messages(conversation_id, user_id) - track_event_if_configured("ClearConversationMessages_Success", { - "user_id": user_id, - "conversation_id": conversation_id - }) + track_event_if_configured( + "ClearConversationMessages_Success", + {"user_id": user_id, "conversation_id": conversation_id}, + ) return ( jsonify( @@ -1267,10 +1236,13 @@ async def clear_messages(): @bp.route("/history/ensure", methods=["GET"]) async def ensure_cosmos(): - if not AZURE_COSMOSDB_ACCOUNT: - track_event_if_configured("EnsureCosmosDB_Failed", { - "error": "CosmosDB is not configured", - }) + if not config.AZURE_COSMOSDB_ACCOUNT: + track_event_if_configured( + "EnsureCosmosDB_Failed", + { + "error": "CosmosDB is not configured", + }, + ) return jsonify({"error": "CosmosDB is not configured"}), 404 try: @@ -1278,16 +1250,22 @@ async def ensure_cosmos(): success, err = await cosmos_conversation_client.ensure() if not cosmos_conversation_client or not success: if err: - track_event_if_configured("EnsureCosmosDB_Failed", { - "error": err, - }) + track_event_if_configured( + "EnsureCosmosDB_Failed", + { + "error": err, + }, + ) return jsonify({"error": err}), 422 return jsonify({"error": "CosmosDB is not configured or not working"}), 500 await cosmos_conversation_client.cosmosdb_client.close() - track_event_if_configured("EnsureCosmosDB_Failed", { - "error": "CosmosDB is not configured or not working", - }) + track_event_if_configured( + "EnsureCosmosDB_Failed", + { + "error": "CosmosDB is not configured or not working", + }, + ) return jsonify({"message": "CosmosDB is configured and working"}), 200 except Exception as e: logging.exception("Exception in /history/ensure") @@ -1302,7 +1280,7 @@ async def ensure_cosmos(): return ( jsonify( { - "error": f"{cosmos_exception} {AZURE_COSMOSDB_DATABASE} for account {AZURE_COSMOSDB_ACCOUNT}" + "error": f"{cosmos_exception} {config.AZURE_COSMOSDB_DATABASE} for account {config.AZURE_COSMOSDB_ACCOUNT}" } ), 422, @@ -1311,7 +1289,7 @@ async def ensure_cosmos(): return ( jsonify( { - "error": f"{cosmos_exception}: {AZURE_COSMOSDB_CONVERSATIONS_CONTAINER}" + "error": f"{cosmos_exception}: {config.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER}" } ), 422, @@ -1334,7 +1312,10 @@ async def generate_title(conversation_messages): try: azure_openai_client = init_openai_client(use_data=False) response = await azure_openai_client.chat.completions.create( - model=AZURE_OPENAI_MODEL, messages=messages, temperature=1, max_tokens=64 + model=config.AZURE_OPENAI_MODEL, + messages=messages, + temperature=1, + max_tokens=64, ) title = json.loads(response.choices[0].message.content)["title"] @@ -1350,152 +1331,17 @@ async def generate_title(conversation_messages): @bp.route("/api/users", methods=["GET"]) def get_users(): - track_event_if_configured("UserFetch_Start", {}) - conn = None - try: - conn = get_connection() - cursor = conn.cursor() - sql_stmt = """ - SELECT - ClientId, - Client, - Email, - FORMAT(AssetValue, 'N0') AS AssetValue, - ClientSummary, - CAST(LastMeeting AS DATE) AS LastMeetingDate, - FORMAT(CAST(LastMeeting AS DATE), 'dddd MMMM d, yyyy') AS LastMeetingDateFormatted, - FORMAT(LastMeeting, 'hh:mm tt') AS LastMeetingStartTime, - FORMAT(LastMeetingEnd, 'hh:mm tt') AS LastMeetingEndTime, - CAST(NextMeeting AS DATE) AS NextMeetingDate, - FORMAT(CAST(NextMeeting AS DATE), 'dddd MMMM d, yyyy') AS NextMeetingFormatted, - FORMAT(NextMeeting, 'hh:mm tt') AS NextMeetingStartTime, - FORMAT(NextMeetingEnd, 'hh:mm tt') AS NextMeetingEndTime - FROM ( - SELECT ca.ClientId, Client, Email, AssetValue, ClientSummary, LastMeeting, LastMeetingEnd, NextMeeting, NextMeetingEnd - FROM ( - SELECT c.ClientId, c.Client, c.Email, a.AssetValue, cs.ClientSummary - FROM Clients c - JOIN ( - SELECT a.ClientId, a.Investment AS AssetValue - FROM ( - SELECT ClientId, sum(Investment) as Investment, - ROW_NUMBER() OVER (PARTITION BY ClientId ORDER BY AssetDate DESC) AS RowNum - FROM Assets -         group by ClientId,AssetDate - ) a - WHERE a.RowNum = 1 - ) a ON c.ClientId = a.ClientId - JOIN ClientSummaries cs ON c.ClientId = cs.ClientId - ) ca - JOIN ( - SELECT cm.ClientId, - MAX(CASE WHEN StartTime < GETDATE() THEN StartTime END) AS LastMeeting, - DATEADD(MINUTE, 30, MAX(CASE WHEN StartTime < GETDATE() THEN StartTime END)) AS LastMeetingEnd, - MIN(CASE WHEN StartTime > GETDATE() AND StartTime < GETDATE() + 7 THEN StartTime END) AS NextMeeting, - DATEADD(MINUTE, 30, MIN(CASE WHEN StartTime > GETDATE() AND StartTime < GETDATE() + 7 THEN StartTime END)) AS NextMeetingEnd - FROM ClientMeetings cm - GROUP BY cm.ClientId - ) cm ON ca.ClientId = cm.ClientId - ) x - WHERE NextMeeting IS NOT NULL - ORDER BY NextMeeting ASC; - """ - cursor.execute(sql_stmt) - # Since pyodbc returns query results as a list of tuples, using `dict_cursor` function to convert these tuples into a list of dictionaries - rows = dict_cursor(cursor) - - if len(rows) <= 6: - track_event_if_configured("UserFetch_SampleUpdate", { - "rows_count": len(rows), - }) - # update ClientMeetings,Assets,Retirement tables sample data to current date - cursor = conn.cursor() - combined_stmt = """ - WITH MaxDates AS ( - SELECT - MAX(CAST(StartTime AS Date)) AS MaxClientMeetingDate, - MAX(AssetDate) AS MaxAssetDate, - MAX(StatusDate) AS MaxStatusDate - FROM - (SELECT StartTime, NULL AS AssetDate, NULL AS StatusDate FROM ClientMeetings - UNION ALL - SELECT NULL AS StartTime, AssetDate, NULL AS StatusDate FROM Assets - UNION ALL - SELECT NULL AS StartTime, NULL AS AssetDate, StatusDate FROM Retirement) AS Combined - ), - Today AS ( - SELECT GETDATE() AS TodayDate - ), - DaysDifference AS ( - SELECT - DATEDIFF(DAY, MaxClientMeetingDate, TodayDate) + 3 AS ClientMeetingDaysDifference, - DATEDIFF(DAY, MaxAssetDate, TodayDate) - 30 AS AssetDaysDifference, - DATEDIFF(DAY, MaxStatusDate, TodayDate) - 30 AS StatusDaysDifference - FROM MaxDates, Today - ) - SELECT - ClientMeetingDaysDifference, - AssetDaysDifference / 30 AS AssetMonthsDifference, - StatusDaysDifference / 30 AS StatusMonthsDifference - FROM DaysDifference - """ - cursor.execute(combined_stmt) - # Since pyodbc returns query results as a list of tuples, using `dict_cursor` function to convert these tuples into a list of dictionaries - date_diff_rows = dict_cursor(cursor) - - client_days = ( - date_diff_rows[0]["ClientMeetingDaysDifference"] - if date_diff_rows - else 0 - ) - asset_months = ( - int(date_diff_rows[0]["AssetMonthsDifference"]) if date_diff_rows else 0 - ) - status_months = ( - int(date_diff_rows[0]["StatusMonthsDifference"]) - if date_diff_rows - else 0 - ) - # Update ClientMeetings - if client_days > 0: - client_update_stmt = f"UPDATE ClientMeetings SET StartTime = DATEADD(day, {client_days}, StartTime), EndTime = DATEADD(day, {client_days}, EndTime)" - cursor.execute(client_update_stmt) - conn.commit() - - # Update Assets - if asset_months > 0: - asset_update_stmt = f"UPDATE Assets SET AssetDate = DATEADD(month, {asset_months}, AssetDate)" - cursor.execute(asset_update_stmt) - conn.commit() - - # Update Retirement - if status_months > 0: - retire_update_stmt = f"UPDATE Retirement SET StatusDate = DATEADD(month, {status_months}, StatusDate)" - cursor.execute(retire_update_stmt) - conn.commit() - - users = [] - for row in rows: - user = { - "ClientId": row["ClientId"], - "ClientName": row["Client"], - "ClientEmail": row["Email"], - "AssetValue": row["AssetValue"], - "NextMeeting": row["NextMeetingFormatted"], - "NextMeetingTime": row["NextMeetingStartTime"], - "NextMeetingEndTime": row["NextMeetingEndTime"], - "LastMeeting": row["LastMeetingDateFormatted"], - "LastMeetingStartTime": row["LastMeetingStartTime"], - "LastMeetingEndTime": row["LastMeetingEndTime"], - "ClientSummary": row["ClientSummary"], - } - users.append(user) + try: + users = sqldb_service.get_client_data() - track_event_if_configured("UserFetch_Success", { + track_event_if_configured( + "UserFetch_Success", + { "user_count": len(users), - }) + }, + ) return jsonify(users) @@ -1506,9 +1352,6 @@ def get_users(): span.set_status(Status(StatusCode.ERROR, str(e))) print("Exception occurred:", e) return str(e), 500 - finally: - if conn: - conn.close() app = create_app() diff --git a/src/App/backend/agents/agent_factory.py b/src/App/backend/agents/agent_factory.py new file mode 100644 index 000000000..604f38f05 --- /dev/null +++ b/src/App/backend/agents/agent_factory.py @@ -0,0 +1,63 @@ +""" +Factory module for creating and managing a singleton AzureAIAgent instance. + +This module provides asynchronous methods to get or delete the singleton agent, +ensuring only one instance exists at a time. The agent is configured for Azure AI +and supports plugin integration. +""" + +import asyncio + +from azure.identity.aio import DefaultAzureCredential +from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings + +from backend.plugins.chat_with_data_plugin import ChatWithDataPlugin + + +class AgentFactory: + """ + Singleton factory for creating and managing an AzureAIAgent instance. + """ + + _instance = None + _lock = asyncio.Lock() + + @classmethod + async def get_instance(cls): + """ + Get or create the singleton AzureAIAgent instance. + """ + async with cls._lock: + if cls._instance is None: + ai_agent_settings = AzureAIAgentSettings() + creds = DefaultAzureCredential() + client = AzureAIAgent.create_client( + credential=creds, endpoint=ai_agent_settings.endpoint + ) + + agent_name = "WealthAdvisor" + agent_instructions = "You are a helpful assistant to a Wealth Advisor." + + agent_definition = await client.agents.create_agent( + model=ai_agent_settings.model_deployment_name, + name=agent_name, + instructions=agent_instructions, + ) + agent = AzureAIAgent( + client=client, + definition=agent_definition, + plugins=[ChatWithDataPlugin()], + ) + cls._instance = agent + return cls._instance + + @classmethod + async def delete_instance(cls): + """ + Delete the singleton AzureAIAgent instance if it exists. + Also deletes all threads in ChatService.thread_cache. + """ + async with cls._lock: + if cls._instance is not None: + await cls._instance.client.agents.delete_agent(cls._instance.id) + cls._instance = None diff --git a/src/App/backend/chat_logic_handler.py b/src/App/backend/chat_logic_handler.py deleted file mode 100644 index 8d04a2384..000000000 --- a/src/App/backend/chat_logic_handler.py +++ /dev/null @@ -1,381 +0,0 @@ -import os -import openai -import struct -import logging -import pyodbc -from azure.identity import DefaultAzureCredential -from azure.ai.projects import AIProjectClient -from semantic_kernel.agents.open_ai import AzureAssistantAgent -from semantic_kernel.kernel import Kernel -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.functions.kernel_function_decorator import kernel_function -from typing import Annotated - -# -------------------------- -# Environment Variables -# -------------------------- -endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") -api_key = os.environ.get("AZURE_OPENAI_KEY") -api_version = os.environ.get("AZURE_OPENAI_PREVIEW_API_VERSION") -deployment = os.environ.get("AZURE_OPENAI_MODEL") -search_endpoint = os.environ.get("AZURE_AI_SEARCH_ENDPOINT") -search_key = os.environ.get("AZURE_SEARCH_KEY") -project_connection_string = os.environ.get("AZURE_AI_PROJECT_CONN_STRING") -use_ai_project_client = os.environ.get("USE_AI_PROJECT_CLIENT", "false").lower() == "true" - -# -------------------------- -# ChatWithDataPlugin Class -# -------------------------- - - -class ChatWithDataPlugin: - - @kernel_function(name="GreetingsResponse", description="Respond to any greeting or general questions") - def greeting(self, input: Annotated[str, "the question"]) -> Annotated[str, "The output is a string"]: - """ - Simple greeting handler using Azure OpenAI. - """ - try: - if self.use_ai_project_client: - project = AIProjectClient.from_connection_string( - conn_str=self.azure_ai_project_conn_string, - credential=DefaultAzureCredential() - ) - client = project.inference.get_chat_completions_client() - - completion = client.complete( - model=self.azure_openai_deployment_model, - messages=[ - { - "role": "system", - "content": "You are a helpful assistant to respond to greetings or general questions." - }, - { - "role": "user", - "content": input - }, - ], - temperature=0, - ) - else: - client = openai.AzureOpenAI( - azure_endpoint=endpoint, - api_key=api_key, - api_version=api_version - ) - completion = client.chat.completions.create( - model=deployment, - messages=[ - { - "role": "system", - "content": "You are a helpful assistant to respond to greetings or general questions." - }, - { - "role": "user", - "content": input - }, - ], - temperature=0, - top_p=1, - n=1 - ) - - answer = completion.choices[0].message.content - except Exception as e: - answer = f"Error retrieving greeting response: {str(e)}" - return answer - - @kernel_function(name="ChatWithSQLDatabase", description="Given a query about client assets, investments and scheduled meetings (including upcoming or next meeting dates/times), get details from the database based on the provided question and client id") - def get_SQL_Response( - self, - input: Annotated[str, "the question"], - ClientId: Annotated[str, "the ClientId"] - ) -> Annotated[str, "The output is a string"]: - """ - Dynamically generates a T-SQL query using the Azure OpenAI chat endpoint - and then executes it against the SQL database. - """ - clientid = ClientId - query = input - - # Retrieve the SQL prompt from environment variables (if available) - sql_prompt = os.environ.get("AZURE_SQL_SYSTEM_PROMPT") - if sql_prompt: - sql_prompt = sql_prompt.replace("{query}", query).replace("{clientid}", clientid) - else: - # Fallback prompt if not set in environment - sql_prompt = f'''Generate a valid T-SQL query to find {query} for tables and columns provided below: - 1. Table: Clients - Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents - 2. Table: InvestmentGoals - Columns: ClientId, InvestmentGoal - 3. Table: Assets - Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType - 4. Table: ClientSummaries - Columns: ClientId, ClientSummary - 5. Table: InvestmentGoalsDetails - Columns: ClientId, InvestmentGoal, TargetAmount, Contribution - 6. Table: Retirement - Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress - 7. Table: ClientMeetings - Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail - Always use the Investment column from the Assets table as the value. - Assets table has snapshots of values by date. Do not add numbers across different dates for total values. - Do not use client name in filters. - Do not include assets values unless asked for. - ALWAYS use ClientId = {clientid} in the query filter. - ALWAYS select Client Name (Column: Client) in the query. - Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed. - Only return the generated SQL query. Do not return anything else.''' - - try: - if use_ai_project_client: - project = AIProjectClient.from_connection_string( - conn_str=project_connection_string, - credential=DefaultAzureCredential() - ) - client = project.inference.get_chat_completions_client() - completion = client.complete( - model=deployment, - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": sql_prompt}, - ], - temperature=0, - ) - - else: - # Initialize the Azure OpenAI client - client = openai.AzureOpenAI( - azure_endpoint=endpoint, - api_key=api_key, - api_version=api_version - ) - completion = client.chat.completions.create( - model=deployment, - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": sql_prompt}, - ], - temperature=0, - top_p=1, - n=1 - ) - - sql_query = completion.choices[0].message.content - - # Remove any triple backticks if present - sql_query = sql_query.replace("```sql", "").replace("```", "") - - print("Generated SQL:", sql_query) - - conn = get_connection() - # conn = pyodbc.connect(connectionString) - cursor = conn.cursor() - cursor.execute(sql_query) - - rows = cursor.fetchall() - if not rows: - answer = "No data found for that client." - else: - answer = "" - for row in rows: - answer += str(row) + "\n" - - conn.close() - answer = answer[:20000] if len(answer) > 20000 else answer - - except Exception as e: - answer = f"Error retrieving data from SQL: {str(e)}" - return answer - - @kernel_function(name="ChatWithCallTranscripts", description="given a query about meetings summary or actions or notes, get answer from search index for a given ClientId") - def get_answers_from_calltranscripts( - self, - question: Annotated[str, "the question"], - ClientId: Annotated[str, "the ClientId"] - ) -> Annotated[str, "The output is a string"]: - """ - Uses Azure Cognitive Search (via the Azure OpenAI extension) to find relevant call transcripts. - """ - try: - client = openai.AzureOpenAI( - azure_endpoint=endpoint, - api_key=api_key, - api_version=api_version - ) - - system_message = os.environ.get("AZURE_CALL_TRANSCRIPT_SYSTEM_PROMPT") - if not system_message: - system_message = ( - "You are an assistant who supports wealth advisors in preparing for client meetings. " - "You have access to the client’s past meeting call transcripts. " - "When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. " - "If no data is available, state 'No relevant data found for previous meetings.'" - ) - - completion = client.chat.completions.create( - model=deployment, - messages=[ - {"role": "system", "content": system_message}, - {"role": "user", "content": question} - ], - seed=42, - temperature=0, - top_p=1, - n=1, - max_tokens=800, - extra_body={ - "data_sources": [ - { - "type": "azure_search", - "parameters": { - "endpoint": search_endpoint, - "index_name": os.environ.get("AZURE_SEARCH_INDEX"), - "query_type": "vector_simple_hybrid", - "fields_mapping": { - "content_fields_separator": "\n", - "content_fields": ["content"], - "filepath_field": "chunk_id", - "title_field": "", - "url_field": "sourceurl", - "vector_fields": ["contentVector"] - }, - "semantic_configuration": 'my-semantic-config', - "in_scope": "true", - # "role_information": system_message, - "filter": f"client_id eq '{ClientId}'", - "strictness": 3, - "top_n_documents": 5, - "authentication": { - "type": "api_key", - "key": search_key - }, - "embedding_dependency": { - "type": "deployment_name", - "deployment_name": "text-embedding-ada-002" - }, - } - } - ] - } - ) - - if not completion.choices: - return "No data found for that client." - - response_text = completion.choices[0].message.content - if not response_text.strip(): - return "No data found for that client." - return response_text - - except Exception as e: - return f"Error retrieving data from call transcripts: {str(e)}" - - -# -------------------------- -# Streaming Response Logic -# -------------------------- - - -async def stream_response_from_wealth_assistant(query: str, client_id: str): - """ - Streams real-time chat response from the Wealth Assistant. - Uses Semantic Kernel agent with SQL and Azure Cognitive Search based on the client ID. - """ - - # Dynamically get the name from the database - selected_client_name = get_client_name_from_db(client_id) # Optionally fetch from DB - - # Prepare fallback instructions with the single-line prompt - host_instructions = os.environ.get("AZURE_OPENAI_STREAM_TEXT_SYSTEM_PROMPT") - if not host_instructions: - # Insert the name in the prompt: - host_instructions = ( - "You are a helpful assistant to a Wealth Advisor." - "The currently selected client's name is '{SelectedClientName}'. Treat any case-insensitive or partial mention as referring to this client." - "If the user mentions no name, assume they are asking about '{SelectedClientName}'." - "If the user references a name that clearly differs from '{SelectedClientName}', respond only with: 'Please only ask questions about the selected client or select another client.' Otherwise, provide thorough answers for every question using only data from SQL or call transcripts." - "If no data is found, respond with 'No data found for that client.' Remove any client identifiers from the final response." - ) - host_instructions = host_instructions.replace("{SelectedClientName}", selected_client_name) - - # Create the agent using the Semantic Kernel Assistant Agent - kernel = Kernel() - kernel.add_plugin(ChatWithDataPlugin(), plugin_name="ChatWithData") - - agent = await AzureAssistantAgent.create( - kernel=kernel, - service_id="agent", - name="WealthAdvisor", - instructions=host_instructions, - api_key=api_key, - deployment_name=deployment, - endpoint=endpoint, - api_version=api_version, - ) - - # Create a conversation thread and add the user's message - thread_id = await agent.create_thread() - message = ChatMessageContent(role=AuthorRole.USER, content=query) - await agent.add_chat_message(thread_id=thread_id, message=message) - - # Additional instructions: pass the clientId - additional_instructions = f"Always send clientId as {client_id}" - sk_response = agent.invoke_stream(thread_id=thread_id, additional_instructions=additional_instructions) - - async def generate(): - # yields deltaText strings one-by-one - async for chunk in sk_response: - if not chunk or not chunk.content: - continue - yield chunk.content # just the deltaText - - return generate - - -# -------------------------- -# Get SQL Connection -# -------------------------- -def get_connection(): - driver = "{ODBC Driver 18 for SQL Server}" - server = os.environ.get("SQLDB_SERVER") - database = os.environ.get("SQLDB_DATABASE") - username = os.environ.get("SQLDB_USERNAME") - password = os.environ.get("SQLDB_PASSWORD") - mid_id = os.environ.get("SQLDB_USER_MID") - - try: - credential = DefaultAzureCredential(managed_identity_client_id=mid_id) - token_bytes = credential.get_token("https://database.windows.net/.default").token.encode("utf-16-LE") - token_struct = struct.pack(f" str: - """ - Connects to your SQL database and returns the client name for the given client_id. - """ - - conn = get_connection() - cursor = conn.cursor() - sql = "SELECT Client FROM Clients WHERE ClientId = ?" - cursor.execute(sql, (client_id,)) - row = cursor.fetchone() - conn.close() - if row: - return row[0] # The 'Client' column - else: - return "" diff --git a/src/App/backend/common/config.py b/src/App/backend/common/config.py new file mode 100644 index 000000000..38afe161b --- /dev/null +++ b/src/App/backend/common/config.py @@ -0,0 +1,154 @@ +"""Configuration module for environment variables and Azure service settings. + +This module defines the Config class, which loads configuration values from +environment variables for SQL Database, Azure OpenAI, Azure AI Search, and +other related services. +""" + +import os + +from dotenv import load_dotenv + +load_dotenv() + + +class Config: + def __init__(self): + + # UI configuration (optional) + self.UI_TITLE = os.environ.get("UI_TITLE") or "Woodgrove Bank" + self.UI_LOGO = os.environ.get("UI_LOGO") + self.UI_CHAT_LOGO = os.environ.get("UI_CHAT_LOGO") + self.UI_CHAT_TITLE = os.environ.get("UI_CHAT_TITLE") or "Start chatting" + self.UI_CHAT_DESCRIPTION = ( + os.environ.get("UI_CHAT_DESCRIPTION") + or "This chatbot is configured to answer your questions" + ) + self.UI_FAVICON = os.environ.get("UI_FAVICON") or "/favicon.ico" + self.UI_SHOW_SHARE_BUTTON = ( + os.environ.get("UI_SHOW_SHARE_BUTTON", "true").lower() == "true" + ) + + # Application Insights Instrumentation Key + self.INSTRUMENTATION_KEY = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") + self.APPLICATIONINSIGHTS_CONNECTION_STRING = os.getenv( + "APPLICATIONINSIGHTS_CONNECTION_STRING" + ) + + self.DEBUG = os.environ.get("DEBUG", "false") + + # Current minimum Azure OpenAI version supported + self.MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION = "2024-02-15-preview" + + # On Your Data Settings + self.DATASOURCE_TYPE = os.environ.get("DATASOURCE_TYPE", "AzureCognitiveSearch") + + # ACS Integration Settings + self.AZURE_SEARCH_ENDPOINT = os.environ.get("AZURE_AI_SEARCH_ENDPOINT") + self.AZURE_SEARCH_SERVICE = os.environ.get("AZURE_SEARCH_SERVICE") + self.AZURE_SEARCH_INDEX = os.environ.get("AZURE_SEARCH_INDEX") + self.AZURE_SEARCH_KEY = os.environ.get("AZURE_SEARCH_KEY", None) + self.AZURE_SEARCH_USE_SEMANTIC_SEARCH = os.environ.get( + "AZURE_SEARCH_USE_SEMANTIC_SEARCH", "false" + ) + self.AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG = os.environ.get( + "AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG", "default" + ) + self.AZURE_SEARCH_TOP_K = os.environ.get("AZURE_SEARCH_TOP_K", 5) + self.AZURE_SEARCH_ENABLE_IN_DOMAIN = os.environ.get( + "AZURE_SEARCH_ENABLE_IN_DOMAIN", "true" + ) + self.AZURE_SEARCH_CONTENT_COLUMNS = os.environ.get( + "AZURE_SEARCH_CONTENT_COLUMNS" + ) + self.AZURE_SEARCH_FILENAME_COLUMN = os.environ.get( + "AZURE_SEARCH_FILENAME_COLUMN" + ) + self.AZURE_SEARCH_TITLE_COLUMN = os.environ.get("AZURE_SEARCH_TITLE_COLUMN") + self.AZURE_SEARCH_URL_COLUMN = os.environ.get("AZURE_SEARCH_URL_COLUMN") + self.AZURE_SEARCH_VECTOR_COLUMNS = os.environ.get("AZURE_SEARCH_VECTOR_COLUMNS") + self.AZURE_SEARCH_QUERY_TYPE = os.environ.get("AZURE_SEARCH_QUERY_TYPE") + self.AZURE_SEARCH_PERMITTED_GROUPS_COLUMN = os.environ.get( + "AZURE_SEARCH_PERMITTED_GROUPS_COLUMN" + ) + self.AZURE_SEARCH_STRICTNESS = os.environ.get("AZURE_SEARCH_STRICTNESS", 3) + + # AOAI Integration Settings + self.AZURE_OPENAI_RESOURCE = os.environ.get("AZURE_OPENAI_RESOURCE") + self.AZURE_OPENAI_MODEL = os.environ.get("AZURE_OPENAI_MODEL") + self.AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT") + self.AZURE_OPENAI_KEY = os.environ.get("AZURE_OPENAI_KEY") + self.AZURE_OPENAI_TEMPERATURE = os.environ.get("AZURE_OPENAI_TEMPERATURE", 0) + self.AZURE_OPENAI_TOP_P = os.environ.get("AZURE_OPENAI_TOP_P", 1.0) + self.AZURE_OPENAI_MAX_TOKENS = os.environ.get("AZURE_OPENAI_MAX_TOKENS", 1000) + self.AZURE_OPENAI_STOP_SEQUENCE = os.environ.get("AZURE_OPENAI_STOP_SEQUENCE") + self.AZURE_OPENAI_SYSTEM_MESSAGE = os.environ.get( + "AZURE_OPENAI_SYSTEM_MESSAGE", + "You are an AI assistant that helps people find information.", + ) + self.AZURE_OPENAI_PREVIEW_API_VERSION = os.environ.get( + "AZURE_OPENAI_PREVIEW_API_VERSION", + self.MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION, + ) + self.AZURE_OPENAI_STREAM = os.environ.get("AZURE_OPENAI_STREAM", "true") + self.AZURE_OPENAI_EMBEDDING_ENDPOINT = os.environ.get( + "AZURE_OPENAI_EMBEDDING_ENDPOINT" + ) + self.AZURE_OPENAI_EMBEDDING_KEY = os.environ.get("AZURE_OPENAI_EMBEDDING_KEY") + self.AZURE_OPENAI_EMBEDDING_NAME = os.environ.get( + "AZURE_OPENAI_EMBEDDING_NAME", "" + ) + + self.SHOULD_STREAM = ( + True if self.AZURE_OPENAI_STREAM.lower() == "true" else False + ) + + # Chat History CosmosDB Integration Settings + self.AZURE_COSMOSDB_DATABASE = os.environ.get("AZURE_COSMOSDB_DATABASE") + self.AZURE_COSMOSDB_ACCOUNT = os.environ.get("AZURE_COSMOSDB_ACCOUNT") + self.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER = os.environ.get( + "AZURE_COSMOSDB_CONVERSATIONS_CONTAINER" + ) + self.AZURE_COSMOSDB_ACCOUNT_KEY = os.environ.get("AZURE_COSMOSDB_ACCOUNT_KEY") + self.AZURE_COSMOSDB_ENABLE_FEEDBACK = ( + os.environ.get("AZURE_COSMOSDB_ENABLE_FEEDBACK", "false").lower() == "true" + ) + self.USE_INTERNAL_STREAM = ( + os.environ.get("USE_INTERNAL_STREAM", "false").lower() == "true" + ) + # Frontend Settings via Environment Variables + self.AUTH_ENABLED = os.environ.get("AUTH_ENABLED", "true").lower() == "true" + self.CHAT_HISTORY_ENABLED = ( + self.AZURE_COSMOSDB_ACCOUNT + and self.AZURE_COSMOSDB_DATABASE + and self.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER + ) + self.SANITIZE_ANSWER = ( + os.environ.get("SANITIZE_ANSWER", "false").lower() == "true" + ) + + # AI Project Client configuration + self.USE_AI_PROJECT_CLIENT = ( + os.getenv("USE_AI_PROJECT_CLIENT", "False").lower() == "true" + ) + self.AI_PROJECT_ENDPOINT = os.getenv("AZURE_AI_AGENT_ENDPOINT") + + # SQL Database configuration + self.SQL_DATABASE = os.getenv("SQLDB_DATABASE") + self.SQL_SERVER = os.getenv("SQLDB_SERVER") + self.SQL_USERNAME = os.getenv("SQLDB_USERNAME") + self.SQL_PASSWORD = os.getenv("SQLDB_PASSWORD") + self.ODBC_DRIVER = "{ODBC Driver 18 for SQL Server}" + self.MID_ID = os.getenv("SQLDB_USER_MID") + + # System Prompts + self.SQL_SYSTEM_PROMPT = os.environ.get("AZURE_SQL_SYSTEM_PROMPT") + self.CALL_TRANSCRIPT_SYSTEM_PROMPT = os.environ.get( + "AZURE_CALL_TRANSCRIPT_SYSTEM_PROMPT" + ) + self.STREAM_TEXT_SYSTEM_PROMPT = os.environ.get( + "AZURE_OPENAI_STREAM_TEXT_SYSTEM_PROMPT" + ) + + +config = Config() diff --git a/src/App/backend/event_utils.py b/src/App/backend/common/event_utils.py similarity index 89% rename from src/App/backend/event_utils.py rename to src/App/backend/common/event_utils.py index c04214b64..35824439f 100644 --- a/src/App/backend/event_utils.py +++ b/src/App/backend/common/event_utils.py @@ -1,7 +1,9 @@ import logging -import os + from azure.monitor.events.extension import track_event +from backend.common.config import config + def track_event_if_configured(event_name: str, event_data: dict): """Track an event if Application Insights is configured. @@ -14,7 +16,7 @@ def track_event_if_configured(event_name: str, event_data: dict): event_data: Dictionary of event data/dimensions """ try: - instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") + instrumentation_key = config.APPLICATIONINSIGHTS_CONNECTION_STRING if instrumentation_key: track_event(event_name, event_data) else: diff --git a/src/App/backend/utils.py b/src/App/backend/common/utils.py similarity index 97% rename from src/App/backend/utils.py rename to src/App/backend/common/utils.py index 4c7511d4d..d60136934 100644 --- a/src/App/backend/utils.py +++ b/src/App/backend/common/utils.py @@ -1,17 +1,16 @@ import dataclasses import json import logging -import os import requests -DEBUG = os.environ.get("DEBUG", "false") +from backend.common.config import config + +DEBUG = config.DEBUG if DEBUG.lower() == "true": logging.basicConfig(level=logging.DEBUG) -AZURE_SEARCH_PERMITTED_GROUPS_COLUMN = os.environ.get( - "AZURE_SEARCH_PERMITTED_GROUPS_COLUMN" -) +AZURE_SEARCH_PERMITTED_GROUPS_COLUMN = config.AZURE_SEARCH_PERMITTED_GROUPS_COLUMN class JSONEncoder(json.JSONEncoder): diff --git a/src/App/backend/plugins/chat_with_data_plugin.py b/src/App/backend/plugins/chat_with_data_plugin.py new file mode 100644 index 000000000..13f3952ae --- /dev/null +++ b/src/App/backend/plugins/chat_with_data_plugin.py @@ -0,0 +1,258 @@ +from typing import Annotated + +import openai +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from semantic_kernel.functions.kernel_function_decorator import kernel_function + +from backend.common.config import config +from backend.services.sqldb_service import get_connection + +# -------------------------- +# ChatWithDataPlugin Class +# -------------------------- + + +class ChatWithDataPlugin: + + @kernel_function( + name="GreetingsResponse", + description="Respond to any greeting or general questions", + ) + def greeting( + self, input: Annotated[str, "the question"] + ) -> Annotated[str, "The output is a string"]: + """ + Simple greeting handler using Azure OpenAI. + """ + try: + if config.USE_AI_PROJECT_CLIENT: + client = self.get_project_openai_client() + + else: + client = self.get_openai_client() + + completion = client.chat.completions.create( + model=config.AZURE_OPENAI_MODEL, + messages=[ + { + "role": "system", + "content": "You are a helpful assistant to respond to greetings or general questions.", + }, + {"role": "user", "content": input}, + ], + temperature=0, + top_p=1, + n=1, + ) + + answer = completion.choices[0].message.content + except Exception as e: + answer = f"Error retrieving greeting response: {str(e)}" + return answer + + @kernel_function( + name="ChatWithSQLDatabase", + description="Given a query about client assets, investments and scheduled meetings (including upcoming or next meeting dates/times), get details from the database based on the provided question and client id", + ) + def get_SQL_Response( + self, + input: Annotated[str, "the question"], + ClientId: Annotated[str, "the ClientId"], + ) -> Annotated[str, "The output is a string"]: + """ + Dynamically generates a T-SQL query using the Azure OpenAI chat endpoint + and then executes it against the SQL database. + """ + if not ClientId or not ClientId.strip(): + return "Error: ClientId is required" + + if not input or not input.strip(): + return "Error: Query input is required" + + clientid = ClientId + query = input + + # Retrieve the SQL prompt from environment variables (if available) + sql_prompt = config.SQL_SYSTEM_PROMPT + if sql_prompt: + sql_prompt = sql_prompt.replace("{query}", query).replace( + "{clientid}", clientid + ) + else: + # Fallback prompt if not set in environment + sql_prompt = f"""Generate a valid T-SQL query to find {query} for tables and columns provided below: + 1. Table: Clients + Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents + 2. Table: InvestmentGoals + Columns: ClientId, InvestmentGoal + 3. Table: Assets + Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType + 4. Table: ClientSummaries + Columns: ClientId, ClientSummary + 5. Table: InvestmentGoalsDetails + Columns: ClientId, InvestmentGoal, TargetAmount, Contribution + 6. Table: Retirement + Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress + 7. Table: ClientMeetings + Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail + Always use the Investment column from the Assets table as the value. + Assets table has snapshots of values by date. Do not add numbers across different dates for total values. + Do not use client name in filters. + Do not include assets values unless asked for. + ALWAYS use ClientId = {clientid} in the query filter. + ALWAYS select Client Name (Column: Client) in the query. + Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed. + Only return the generated SQL query. Do not return anything else.""" + + try: + if config.USE_AI_PROJECT_CLIENT: + client = self.get_project_openai_client() + + else: + # Initialize the Azure OpenAI client + client = self.get_openai_client() + + completion = client.chat.completions.create( + model=config.AZURE_OPENAI_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": sql_prompt}, + ], + temperature=0, + top_p=1, + n=1, + ) + + sql_query = completion.choices[0].message.content + + # Remove any triple backticks if present + sql_query = sql_query.replace("```sql", "").replace("```", "") + + # print("Generated SQL:", sql_query) + + conn = get_connection() + # conn = pyodbc.connect(connectionString) + cursor = conn.cursor() + cursor.execute(sql_query) + + rows = cursor.fetchall() + if not rows: + answer = "No data found for that client." + else: + answer = "" + for row in rows: + answer += str(row) + "\n" + + conn.close() + answer = answer[:20000] if len(answer) > 20000 else answer + + except Exception as e: + answer = f"Error retrieving data from SQL: {str(e)}" + return answer + + @kernel_function( + name="ChatWithCallTranscripts", + description="given a query about meetings summary or actions or notes, get answer from search index for a given ClientId", + ) + def get_answers_from_calltranscripts( + self, + question: Annotated[str, "the question"], + ClientId: Annotated[str, "the ClientId"], + ) -> Annotated[str, "The output is a string"]: + """ + Uses Azure Cognitive Search (via the Azure OpenAI extension) to find relevant call transcripts. + """ + if not ClientId or not ClientId.strip(): + return "Error: ClientId is required" + if not question or not question.strip(): + return "Error: Question input is required" + + try: + client = self.get_openai_client() + + system_message = config.CALL_TRANSCRIPT_SYSTEM_PROMPT + if not system_message: + system_message = ( + "You are an assistant who supports wealth advisors in preparing for client meetings. " + "You have access to the client's past meeting call transcripts. " + "When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. " + "If no data is available, state 'No relevant data found for previous meetings.'" + ) + + completion = client.chat.completions.create( + model=config.AZURE_OPENAI_MODEL, + messages=[ + {"role": "system", "content": system_message}, + {"role": "user", "content": question}, + ], + seed=42, + temperature=0, + top_p=1, + n=1, + max_tokens=800, + extra_body={ + "data_sources": [ + { + "type": "azure_search", + "parameters": { + "endpoint": config.AZURE_SEARCH_ENDPOINT, + "index_name": "transcripts_index", + "query_type": "vector_simple_hybrid", + "fields_mapping": { + "content_fields_separator": "\n", + "content_fields": ["content"], + "filepath_field": "chunk_id", + "title_field": "", + "url_field": "sourceurl", + "vector_fields": ["contentVector"], + }, + "semantic_configuration": "my-semantic-config", + "in_scope": "true", + # "role_information": system_message, + "filter": f"client_id eq '{ClientId}'", + "strictness": 3, + "top_n_documents": 5, + "authentication": { + "type": "system_assigned_managed_identity" + }, + "embedding_dependency": { + "type": "deployment_name", + "deployment_name": "text-embedding-ada-002", + }, + }, + } + ] + }, + ) + + if not completion.choices: + return "No data found for that client." + + response_text = completion.choices[0].message.content + if not response_text.strip(): + return "No data found for that client." + return response_text + + except Exception as e: + return f"Error retrieving data from call transcripts: {str(e)}" + + def get_openai_client(self): + token_provider = get_bearer_token_provider( + DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default" + ) + openai_client = openai.AzureOpenAI( + azure_endpoint=config.AZURE_OPENAI_ENDPOINT, + azure_ad_token_provider=token_provider, + api_version=config.AZURE_OPENAI_PREVIEW_API_VERSION, + ) + return openai_client + + def get_project_openai_client(self): + project = AIProjectClient( + endpoint=config.AI_PROJECT_ENDPOINT, credential=DefaultAzureCredential() + ) + openai_client = project.inference.get_azure_openai_client( + api_version=config.AZURE_OPENAI_PREVIEW_API_VERSION + ) + return openai_client diff --git a/src/App/backend/services/chat_service.py b/src/App/backend/services/chat_service.py new file mode 100644 index 000000000..e2060e6ee --- /dev/null +++ b/src/App/backend/services/chat_service.py @@ -0,0 +1,64 @@ +from quart import current_app +from semantic_kernel.agents import AzureAIAgent, AzureAIAgentThread +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole + +from backend.common.config import config +from backend.services.sqldb_service import get_client_name_from_db + + +async def stream_response_from_wealth_assistant(query: str, client_id: str): + """ + Streams real-time chat response from the Wealth Assistant. + Uses Semantic Kernel agent with SQL and Azure Cognitive Search based on the client ID. + """ + try: + # Dynamically get the name from the database + selected_client_name = get_client_name_from_db( + client_id + ) # Optionally fetch from DB + + # Prepare fallback instructions with the single-line prompt + additional_instructions = config.STREAM_TEXT_SYSTEM_PROMPT + if not additional_instructions: + additional_instructions = ( + "The currently selected client's name is '{SelectedClientName}'. Treat any case-insensitive or partial mention as referring to this client." + "If the user mentions no name, assume they are asking about '{SelectedClientName}'." + "If the user references a name that clearly differs from '{SelectedClientName}', respond only with: 'Please only ask questions about the selected client or select another client.' Otherwise, provide thorough answers for every question using only data from SQL or call transcripts." + "If no data is found, respond with 'No data found for that client.' Remove any client identifiers from the final response." + "Always send clientId as '{client_id}'." + ) + + # Replace client name and client id in the additional instructions + additional_instructions = additional_instructions.replace( + "{SelectedClientName}", selected_client_name + ) + additional_instructions = additional_instructions.replace( + "{client_id}", client_id + ) + + agent: AzureAIAgent = current_app.agent + + thread: AzureAIAgentThread = None + message = ChatMessageContent(role=AuthorRole.USER, content=query) + sk_response = agent.invoke_stream( + messages=[message], + thread=thread, + additional_instructions=additional_instructions, + ) + + async def generate(): + try: + # yields deltaText strings one-by-one + async for chunk in sk_response: + if not chunk or not chunk.content: + continue + yield chunk.content # just the deltaText + finally: + thread = chunk.thread + await thread.delete() if thread else None + + return generate + except Exception as e: + await thread.delete() if thread else None + raise e diff --git a/src/App/backend/history/cosmosdbservice.py b/src/App/backend/services/cosmosdb_service.py similarity index 100% rename from src/App/backend/history/cosmosdbservice.py rename to src/App/backend/services/cosmosdb_service.py diff --git a/src/App/backend/services/sqldb_service.py b/src/App/backend/services/sqldb_service.py new file mode 100644 index 000000000..be1c7b358 --- /dev/null +++ b/src/App/backend/services/sqldb_service.py @@ -0,0 +1,244 @@ +# db.py +import logging +import struct + +import pyodbc +from azure.identity import DefaultAzureCredential +from dotenv import load_dotenv + +from backend.common.config import config + +load_dotenv() + +driver = config.ODBC_DRIVER +server = config.SQL_SERVER +database = config.SQL_DATABASE +username = config.SQL_USERNAME +password = config.SQL_PASSWORD +mid_id = config.MID_ID + + +def dict_cursor(cursor): + """ + Converts rows fetched by the cursor into a list of dictionaries. + + Args: + cursor: A database cursor object. + + Returns: + A list of dictionaries representing rows. + """ + columns = [column[0] for column in cursor.description] + return [dict(zip(columns, row)) for row in cursor.fetchall()] + + +def get_connection(): + try: + credential = DefaultAzureCredential(managed_identity_client_id=mid_id) + + token_bytes = credential.get_token( + "https://database.windows.net/.default" + ).token.encode("utf-16-LE") + token_struct = struct.pack( + f" str: + """ + Connects to your SQL database and returns the client name for the given client_id. + """ + + conn = get_connection() + cursor = conn.cursor() + sql = "SELECT Client FROM Clients WHERE ClientId = ?" + cursor.execute(sql, (client_id,)) + row = cursor.fetchone() + conn.close() + if row: + return row[0] # The 'Client' column + else: + return "" + + +def get_client_data(): + """ + Fetches client data with their meeting information and asset values. + Updates sample data if necessary. + + Returns: + list: A list of dictionaries containing client information + """ + conn = None + try: + conn = get_connection() + cursor = conn.cursor() + sql_stmt = """ + SELECT + ClientId, + Client, + Email, + FORMAT(AssetValue, 'N0') AS AssetValue, + ClientSummary, + CAST(LastMeeting AS DATE) AS LastMeetingDate, + FORMAT(CAST(LastMeeting AS DATE), 'dddd MMMM d, yyyy') AS LastMeetingDateFormatted, + FORMAT(LastMeeting, 'hh:mm tt') AS LastMeetingStartTime, + FORMAT(LastMeetingEnd, 'hh:mm tt') AS LastMeetingEndTime, + CAST(NextMeeting AS DATE) AS NextMeetingDate, + FORMAT(CAST(NextMeeting AS DATE), 'dddd MMMM d, yyyy') AS NextMeetingFormatted, + FORMAT(NextMeeting, 'hh:mm tt') AS NextMeetingStartTime, + FORMAT(NextMeetingEnd, 'hh:mm tt') AS NextMeetingEndTime + FROM ( + SELECT ca.ClientId, Client, Email, AssetValue, ClientSummary, LastMeeting, LastMeetingEnd, NextMeeting, NextMeetingEnd + FROM ( + SELECT c.ClientId, c.Client, c.Email, a.AssetValue, cs.ClientSummary + FROM Clients c + JOIN ( + SELECT a.ClientId, a.Investment AS AssetValue + FROM ( + SELECT ClientId, sum(Investment) as Investment, + ROW_NUMBER() OVER (PARTITION BY ClientId ORDER BY AssetDate DESC) AS RowNum + FROM Assets + group by ClientId,AssetDate + ) a + WHERE a.RowNum = 1 + ) a ON c.ClientId = a.ClientId + JOIN ClientSummaries cs ON c.ClientId = cs.ClientId + ) ca + JOIN ( + SELECT cm.ClientId, + MAX(CASE WHEN StartTime < GETDATE() THEN StartTime END) AS LastMeeting, + DATEADD(MINUTE, 30, MAX(CASE WHEN StartTime < GETDATE() THEN StartTime END)) AS LastMeetingEnd, + MIN(CASE WHEN StartTime > GETDATE() AND StartTime < GETDATE() + 7 THEN StartTime END) AS NextMeeting, + DATEADD(MINUTE, 30, MIN(CASE WHEN StartTime > GETDATE() AND StartTime < GETDATE() + 7 THEN StartTime END)) AS NextMeetingEnd + FROM ClientMeetings cm + GROUP BY cm.ClientId + ) cm ON ca.ClientId = cm.ClientId + ) x + WHERE NextMeeting IS NOT NULL + ORDER BY NextMeeting ASC; + """ + cursor.execute(sql_stmt) + rows = dict_cursor(cursor) + + if len(rows) <= 6: + update_sample_data(conn) + + formatted_users = [] + for row in rows: + user = { + "ClientId": row["ClientId"], + "ClientName": row["Client"], + "ClientEmail": row["Email"], + "AssetValue": row["AssetValue"], + "NextMeeting": row["NextMeetingFormatted"], + "NextMeetingTime": row["NextMeetingStartTime"], + "NextMeetingEndTime": row["NextMeetingEndTime"], + "LastMeeting": row["LastMeetingDateFormatted"], + "LastMeetingStartTime": row["LastMeetingStartTime"], + "LastMeetingEndTime": row["LastMeetingEndTime"], + "ClientSummary": row["ClientSummary"], + } + formatted_users.append(user) + + return formatted_users + + except Exception as e: + logging.exception("Exception occurred in get_client_data") + raise e + finally: + if conn: + conn.close() + + +def update_sample_data(conn): + """ + Updates sample data in ClientMeetings, Assets, and Retirement tables to use current dates. + + Args: + conn: Database connection object + """ + try: + cursor = conn.cursor() + combined_stmt = """ + WITH MaxDates AS ( + SELECT + MAX(CAST(StartTime AS Date)) AS MaxClientMeetingDate, + MAX(AssetDate) AS MaxAssetDate, + MAX(StatusDate) AS MaxStatusDate + FROM + (SELECT StartTime, NULL AS AssetDate, NULL AS StatusDate FROM ClientMeetings + UNION ALL + SELECT NULL AS StartTime, AssetDate, NULL AS StatusDate FROM Assets + UNION ALL + SELECT NULL AS StartTime, NULL AS AssetDate, StatusDate FROM Retirement) AS Combined + ), + Today AS ( + SELECT GETDATE() AS TodayDate + ), + DaysDifference AS ( + SELECT + DATEDIFF(DAY, MaxClientMeetingDate, TodayDate) + 3 AS ClientMeetingDaysDifference, + DATEDIFF(DAY, MaxAssetDate, TodayDate) - 30 AS AssetDaysDifference, + DATEDIFF(DAY, MaxStatusDate, TodayDate) - 30 AS StatusDaysDifference + FROM MaxDates, Today + ) + SELECT + ClientMeetingDaysDifference, + AssetDaysDifference / 30 AS AssetMonthsDifference, + StatusDaysDifference / 30 AS StatusMonthsDifference + FROM DaysDifference + """ + cursor.execute(combined_stmt) + date_diff_rows = dict_cursor(cursor) + + client_days = ( + date_diff_rows[0]["ClientMeetingDaysDifference"] if date_diff_rows else 0 + ) + asset_months = ( + int(date_diff_rows[0]["AssetMonthsDifference"]) if date_diff_rows else 0 + ) + status_months = ( + int(date_diff_rows[0]["StatusMonthsDifference"]) if date_diff_rows else 0 + ) + + # Update ClientMeetings + if client_days > 0: + client_update_stmt = f"UPDATE ClientMeetings SET StartTime = DATEADD(day, {client_days}, StartTime), EndTime = DATEADD(day, {client_days}, EndTime)" + cursor.execute(client_update_stmt) + conn.commit() + + # Update Assets + if asset_months > 0: + asset_update_stmt = f"UPDATE Assets SET AssetDate = DATEADD(month, {asset_months}, AssetDate)" + cursor.execute(asset_update_stmt) + conn.commit() + + # Update Retirement + if status_months > 0: + retire_update_stmt = f"UPDATE Retirement SET StatusDate = DATEADD(month, {status_months}, StatusDate)" + cursor.execute(retire_update_stmt) + conn.commit() + + logging.info("Sample data updated successfully") + except Exception as e: + logging.exception("Error updating sample data") + raise e diff --git a/src/App/db.py b/src/App/db.py deleted file mode 100644 index d0a81bec4..000000000 --- a/src/App/db.py +++ /dev/null @@ -1,60 +0,0 @@ -# db.py -import os - -from dotenv import load_dotenv -from azure.identity import DefaultAzureCredential -import pyodbc -import struct -import logging - - -load_dotenv() - -driver = "{ODBC Driver 18 for SQL Server}" -server = os.environ.get("SQLDB_SERVER") -database = os.environ.get("SQLDB_DATABASE") -username = os.environ.get("SQLDB_USERNAME") -password = os.environ.get("SQLDB_PASSWORD") -mid_id = os.environ.get("SQLDB_USER_MID") - - -def dict_cursor(cursor): - """ - Converts rows fetched by the cursor into a list of dictionaries. - - Args: - cursor: A database cursor object. - - Returns: - A list of dictionaries representing rows. - """ - columns = [column[0] for column in cursor.description] - return [dict(zip(columns, row)) for row in cursor.fetchall()] - - -def get_connection(): - try: - credential = DefaultAzureCredential(managed_identity_client_id=mid_id) - - token_bytes = credential.get_token( - "https://database.windows.net/.default" - ).token.encode("utf-16-LE") - token_struct = struct.pack(f" 0 + assert "client_id eq 'client123'" in data_sources[0]["parameters"]["filter"] + + @patch.object(ChatWithDataPlugin, "get_openai_client") + def test_get_answers_from_calltranscripts_no_results(self, mock_get_openai_client): + """Test call transcripts search with no results.""" + mock_client = MagicMock() + mock_get_openai_client.return_value = mock_client + + # Mock empty response + mock_completion = MagicMock() + mock_completion.choices = [] + mock_client.chat.completions.create.return_value = mock_completion + + result = self.plugin.get_answers_from_calltranscripts( + "Nonexistent query", "client123" + ) + + assert "No data found for that client." in result + + @patch.object(ChatWithDataPlugin, "get_openai_client") + def test_get_answers_from_calltranscripts_openai_error( + self, mock_get_openai_client + ): + """Test call transcripts with OpenAI processing error.""" + mock_client = MagicMock() + mock_get_openai_client.return_value = mock_client + + # Simulate OpenAI error + mock_client.chat.completions.create.side_effect = Exception( + "OpenAI processing failed" + ) + + result = self.plugin.get_answers_from_calltranscripts("Test query", "client123") + + assert "Error retrieving data from call transcripts" in result + assert "OpenAI processing failed" in result + + def test_get_sql_response_missing_client_id(self): + """Test SQL response with missing ClientId.""" + result = self.plugin.get_SQL_Response("Test query", "") + assert "Error: ClientId is required" in result + + result = self.plugin.get_SQL_Response("Test query", None) + assert "Error: ClientId is required" in result + + def test_get_sql_response_missing_input(self): + """Test SQL response with missing input query.""" + result = self.plugin.get_SQL_Response("", "client123") + assert "Error: Query input is required" in result + + result = self.plugin.get_SQL_Response(None, "client123") + assert "Error: Query input is required" in result + + def test_get_answers_from_calltranscripts_missing_client_id(self): + """Test call transcripts search with missing ClientId.""" + result = self.plugin.get_answers_from_calltranscripts("Test query", "") + assert "Error: ClientId is required" in result + + result = self.plugin.get_answers_from_calltranscripts("Test query", None) + assert "Error: ClientId is required" in result + + def test_get_answers_from_calltranscripts_missing_question(self): + """Test call transcripts search with missing question.""" + result = self.plugin.get_answers_from_calltranscripts("", "client123") + assert "Error: Question input is required" in result + + result = self.plugin.get_answers_from_calltranscripts(None, "client123") + assert "Error: Question input is required" in result diff --git a/src/App/tests/backend/services/test_chat_service.py b/src/App/tests/backend/services/test_chat_service.py new file mode 100644 index 000000000..effa70c2b --- /dev/null +++ b/src/App/tests/backend/services/test_chat_service.py @@ -0,0 +1,196 @@ +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from backend.services.chat_service import stream_response_from_wealth_assistant + + +class TestChatService: + """Test suite for chat service functions.""" + + @pytest.mark.asyncio + async def test_stream_response_happy_path(self): + """Test successful streaming response with default prompt.""" + # Arrange + query = "What is the portfolio value for my client?" + client_id = "123" + client_name = "John Doe" + + # Create mock agent + mock_agent = MagicMock() + mock_thread = MagicMock() + mock_thread.delete = AsyncMock() + mock_chunk = MagicMock() + mock_chunk.content = "Response chunk" + mock_chunk.thread = mock_thread + + # Create a simple async generator function + async def mock_stream(): + yield mock_chunk + + # Mock invoke_stream to return the async generator + mock_agent.invoke_stream = MagicMock(return_value=mock_stream()) + + # Mock current_app.agent + mock_current_app = MagicMock() + mock_current_app.agent = mock_agent + + # Mock config + mock_config = MagicMock() + mock_config.STREAM_TEXT_SYSTEM_PROMPT = "" # Use default prompt + + with patch( + "backend.services.chat_service.current_app", mock_current_app + ), patch( + "backend.services.chat_service.get_client_name_from_db", + return_value=client_name, + ), patch( + "backend.services.chat_service.config", mock_config + ): + + # Act + generator_func = await stream_response_from_wealth_assistant( + query, client_id + ) + response_chunks = [] + async for chunk in generator_func(): + response_chunks.append(chunk) + + # Assert + assert len(response_chunks) == 1 + assert response_chunks[0] == "Response chunk" + mock_agent.invoke_stream.assert_called_once() + + # Verify the additional_instructions were set correctly + call_args = mock_agent.invoke_stream.call_args + assert call_args[1]["additional_instructions"].find(client_name) != -1 + assert call_args[1]["additional_instructions"].find(client_id) != -1 + mock_thread.delete.assert_called_once() + + @pytest.mark.asyncio + async def test_stream_response_exception_handling(self): + """Test that exceptions are properly handled.""" + # Arrange + query = "Test query" + client_id = "999" + client_name = "Test Client" + + mock_agent = MagicMock() + mock_agent.invoke_stream.side_effect = Exception("Test exception") + + mock_current_app = MagicMock() + mock_current_app.agent = mock_agent + + mock_config = MagicMock() + mock_config.STREAM_TEXT_SYSTEM_PROMPT = "Test prompt" + + with patch( + "backend.services.chat_service.current_app", mock_current_app + ), patch( + "backend.services.chat_service.get_client_name_from_db", + return_value=client_name, + ), patch( + "backend.services.chat_service.config", mock_config + ): + + # Act & Assert + with pytest.raises(Exception, match="Test exception"): + await stream_response_from_wealth_assistant(query, client_id) + + @pytest.mark.asyncio + async def test_stream_response_empty_iterator(self): + """Test behavior with empty iterator (no chunks) - tests the UnboundLocalError bug.""" + # Arrange + query = "Test query" + client_id = "123" + client_name = "Test Client" + + mock_agent = MagicMock() + + # Empty iterator - no chunks yielded + async def mock_stream(): + # Empty generator - yields nothing + return + yield # This line never executes + + mock_agent.invoke_stream = MagicMock(return_value=mock_stream()) + + mock_current_app = MagicMock() + mock_current_app.agent = mock_agent + + mock_config = MagicMock() + mock_config.STREAM_TEXT_SYSTEM_PROMPT = "" + + with patch( + "backend.services.chat_service.current_app", mock_current_app + ), patch( + "backend.services.chat_service.get_client_name_from_db", + return_value=client_name, + ), patch( + "backend.services.chat_service.config", mock_config + ): + + # Act - This should catch the UnboundLocalError from the implementation + with pytest.raises( + UnboundLocalError, match="cannot access local variable 'chunk'" + ): + generator_func = await stream_response_from_wealth_assistant( + query, client_id + ) + response_chunks = [] + async for chunk in generator_func(): + response_chunks.append(chunk) + + @pytest.mark.asyncio + async def test_default_prompt_formatting(self): + """Test the default prompt template replacement logic.""" + # Arrange + query = "Investment question" + client_id = "client_123" + client_name = "Alice Cooper" + + mock_agent = MagicMock() + mock_thread = MagicMock() + mock_thread.delete = AsyncMock() + mock_chunk = MagicMock() + mock_chunk.content = "Default prompt response" + mock_chunk.thread = mock_thread + + async def mock_stream(): + yield mock_chunk + + mock_agent.invoke_stream = MagicMock(return_value=mock_stream()) + + mock_current_app = MagicMock() + mock_current_app.agent = mock_agent + + mock_config = MagicMock() + mock_config.STREAM_TEXT_SYSTEM_PROMPT = "" # Empty, should use default + + with patch( + "backend.services.chat_service.current_app", mock_current_app + ), patch( + "backend.services.chat_service.get_client_name_from_db", + return_value=client_name, + ), patch( + "backend.services.chat_service.config", mock_config + ): + + # Act + generator_func = await stream_response_from_wealth_assistant( + query, client_id + ) + response_chunks = [] + async for chunk in generator_func(): + response_chunks.append(chunk) + + # Assert + call_args = mock_agent.invoke_stream.call_args + additional_instructions = call_args[1]["additional_instructions"] + + # Verify the default prompt contains expected elements + assert client_name in additional_instructions + assert client_id in additional_instructions + assert "selected client" in additional_instructions.lower() + assert "sql" in additional_instructions.lower() + mock_thread.delete.assert_called_once() diff --git a/src/App/tests/backend/history/test_cosmosdb_service.py b/src/App/tests/backend/services/test_cosmosdb_service.py similarity index 98% rename from src/App/tests/backend/history/test_cosmosdb_service.py rename to src/App/tests/backend/services/test_cosmosdb_service.py index ff0a51e5b..0484d5b07 100644 --- a/src/App/tests/backend/history/test_cosmosdb_service.py +++ b/src/App/tests/backend/services/test_cosmosdb_service.py @@ -3,7 +3,7 @@ import pytest from azure.cosmos import exceptions -from backend.history.cosmosdbservice import CosmosConversationClient +from backend.services.cosmosdb_service import CosmosConversationClient # Helper function to create an async iterable diff --git a/src/App/tests/backend/services/test_sqldb_service.py b/src/App/tests/backend/services/test_sqldb_service.py new file mode 100644 index 000000000..3a3745c3f --- /dev/null +++ b/src/App/tests/backend/services/test_sqldb_service.py @@ -0,0 +1,443 @@ +import struct +from unittest.mock import MagicMock, patch + +import pyodbc + +import backend.services.sqldb_service as sql_db + +# Mock configuration +sql_db.server = "mock_server" +sql_db.username = "mock_user" +sql_db.password = "mock_password" +sql_db.database = "mock_database" +sql_db.driver = "mock_driver" +sql_db.mid_id = "mock_mid_id" # Managed identity client ID if needed + + +@patch("backend.services.sqldb_service.pyodbc.connect") # Mock pyodbc.connect +@patch( + "backend.services.sqldb_service.DefaultAzureCredential" +) # Mock DefaultAzureCredential +def test_get_connection(mock_credential_class, mock_connect): + # Mock the DefaultAzureCredential and get_token method + mock_credential = MagicMock() + mock_credential_class.return_value = mock_credential + mock_token = MagicMock() + mock_token.token = "mock_token" + mock_credential.get_token.return_value = mock_token + # Create a mock connection object + mock_conn = MagicMock() + mock_connect.return_value = mock_conn + + # Call the function + conn = sql_db.get_connection() + + # Assert that DefaultAzureCredential and get_token were called correctly + mock_credential_class.assert_called_once_with( + managed_identity_client_id=sql_db.mid_id + ) + mock_credential.get_token.assert_called_once_with( + "https://database.windows.net/.default" + ) + + # Assert that pyodbc.connect was called with the correct parameters, including the token + expected_attrs_before = { + 1256: struct.pack( + f" 6) + mock_client_data = [ + { + "ClientId": "client1", + "Client": "John Doe", + "Email": "john@example.com", + "AssetValue": "100,000", + "ClientSummary": "High net worth client", + "NextMeetingFormatted": "Monday January 1, 2024", + "NextMeetingStartTime": "10:00 AM", + "NextMeetingEndTime": "11:00 AM", + "LastMeetingDateFormatted": "Friday December 15, 2023", + "LastMeetingStartTime": "02:00 PM", + "LastMeetingEndTime": "03:00 PM", + }, + # Add 6 more records to trigger no update + *[ + { + "ClientId": f"client{i}", + "Client": f"Client {i}", + "Email": f"client{i}@example.com", + "AssetValue": "50,000", + "ClientSummary": f"Client {i} summary", + "NextMeetingFormatted": "Monday January 1, 2024", + "NextMeetingStartTime": "10:00 AM", + "NextMeetingEndTime": "11:00 AM", + "LastMeetingDateFormatted": "Friday December 15, 2023", + "LastMeetingStartTime": "02:00 PM", + "LastMeetingEndTime": "03:00 PM", + } + for i in range(2, 8) + ], + ] + mock_dict_cursor.return_value = mock_client_data + + # Call the function + result = sql_db.get_client_data() + + # Verify the result + assert len(result) == 7 + assert result[0]["ClientId"] == "client1" + assert result[0]["ClientName"] == "John Doe" + assert result[0]["ClientEmail"] == "john@example.com" + assert result[0]["AssetValue"] == "100,000" + + # Verify function calls + mock_get_connection.assert_called_once() + mock_conn.cursor.assert_called_once() + mock_cursor.execute.assert_called_once() + mock_dict_cursor.assert_called_once_with(mock_cursor) + mock_update_sample_data.assert_not_called() # Should not be called when > 6 records + mock_conn.close.assert_called_once() + + +@patch.object(sql_db, "update_sample_data") +@patch.object(sql_db, "dict_cursor") +@patch.object(sql_db, "get_connection") +def test_get_client_data_success_with_update( + mock_get_connection, mock_dict_cursor, mock_update_sample_data +): + """Test successful retrieval of client data when update is needed.""" + # Setup mocks + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_conn.cursor.return_value = mock_cursor + mock_get_connection.return_value = mock_conn + + # Mock dict_cursor return with few records (<= 6) + mock_client_data = [ + { + "ClientId": "client1", + "Client": "John Doe", + "Email": "john@example.com", + "AssetValue": "100,000", + "ClientSummary": "High net worth client", + "NextMeetingFormatted": "Monday January 1, 2024", + "NextMeetingStartTime": "10:00 AM", + "NextMeetingEndTime": "11:00 AM", + "LastMeetingDateFormatted": "Friday December 15, 2023", + "LastMeetingStartTime": "02:00 PM", + "LastMeetingEndTime": "03:00 PM", + } + ] + mock_dict_cursor.return_value = mock_client_data + + # Call the function + result = sql_db.get_client_data() + + # Verify the result + assert len(result) == 1 + assert result[0]["ClientName"] == "John Doe" + + # Verify function calls + mock_get_connection.assert_called_once() + mock_update_sample_data.assert_called_once_with( + mock_conn + ) # Should be called when <= 6 records + mock_conn.close.assert_called_once() + + +@patch.object(sql_db, "get_connection") +def test_get_client_data_exception_with_finally(mock_get_connection): + """Test exception handling with proper cleanup in finally block.""" + # Setup mocks + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_conn.cursor.return_value = mock_cursor + mock_cursor.execute.side_effect = Exception("Database query failed") + mock_get_connection.return_value = mock_conn + + # Call the function and expect exception to be raised + try: + sql_db.get_client_data() + assert False, "Expected exception was not raised" + except Exception as e: + assert str(e) == "Database query failed" + + # Verify connection is closed even when exception occurs + mock_conn.close.assert_called_once() + + +@patch.object(sql_db, "get_connection") +def test_get_client_data_exception_no_connection(mock_get_connection): + """Test exception handling when connection fails.""" + # Setup mocks + mock_get_connection.side_effect = Exception("Connection failed") + + # Call the function and expect exception to be raised + try: + sql_db.get_client_data() + assert False, "Expected exception was not raised" + except Exception as e: + assert str(e) == "Connection failed" + + +@patch.object(sql_db, "dict_cursor") +def test_update_sample_data_all_updates_needed(mock_dict_cursor): + """Test update_sample_data when all tables need updates.""" + # Setup mocks + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_conn.cursor.return_value = mock_cursor + + # Mock dict_cursor return indicating updates needed + mock_dict_cursor.return_value = [ + { + "ClientMeetingDaysDifference": 10, + "AssetMonthsDifference": 3, + "StatusMonthsDifference": 2, + } + ] + + # Call the function + sql_db.update_sample_data(mock_conn) + + # Verify function calls + mock_conn.cursor.assert_called_once() + mock_cursor.execute.assert_any_call( + "UPDATE ClientMeetings SET StartTime = DATEADD(day, 10, StartTime), EndTime = DATEADD(day, 10, EndTime)" + ) + mock_cursor.execute.assert_any_call( + "UPDATE Assets SET AssetDate = DATEADD(month, 3, AssetDate)" + ) + mock_cursor.execute.assert_any_call( + "UPDATE Retirement SET StatusDate = DATEADD(month, 2, StatusDate)" + ) + + # Verify commits were called + assert mock_conn.commit.call_count == 3 + + +@patch.object(sql_db, "dict_cursor") +def test_update_sample_data_no_updates_needed(mock_dict_cursor): + """Test update_sample_data when no updates are needed.""" + # Setup mocks + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_conn.cursor.return_value = mock_cursor + + # Mock dict_cursor return indicating no updates needed + mock_dict_cursor.return_value = [ + { + "ClientMeetingDaysDifference": 0, + "AssetMonthsDifference": 0, + "StatusMonthsDifference": 0, + } + ] + + # Call the function + sql_db.update_sample_data(mock_conn) + + # Verify function calls - only the initial query should be executed + assert mock_cursor.execute.call_count == 1 # Only the combined_stmt query + mock_conn.commit.assert_not_called() # No commits should happen + + +@patch.object(sql_db, "dict_cursor") +def test_update_sample_data_empty_result(mock_dict_cursor): + """Test update_sample_data when dict_cursor returns empty result.""" + # Setup mocks + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_conn.cursor.return_value = mock_cursor + + # Mock dict_cursor return empty + mock_dict_cursor.return_value = [] + + # Call the function + sql_db.update_sample_data(mock_conn) + + # Verify function calls - only the initial query should be executed + assert mock_cursor.execute.call_count == 1 # Only the combined_stmt query + mock_conn.commit.assert_not_called() # No commits should happen + + +@patch.object(sql_db, "dict_cursor") +def test_update_sample_data_exception_handling(mock_dict_cursor): + """Test exception handling in update_sample_data.""" + # Setup mocks + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_conn.cursor.return_value = mock_cursor + mock_cursor.execute.side_effect = Exception("Update query failed") + + # Call the function and expect exception to be raised + try: + sql_db.update_sample_data(mock_conn) + assert False, "Expected exception was not raised" + except Exception as e: + assert str(e) == "Update query failed" + """Test suite for get_client_name_from_db function.""" + + @patch.object(sql_db, "get_connection") + def test_get_client_name_from_db_success(self, mock_get_connection): + """Test successful retrieval of client name.""" + # Setup mocks + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = ("John Doe",) + mock_get_connection.return_value = mock_conn + + # Call the function + result = sql_db.get_client_name_from_db("client123") + + # Verify the result + assert result == "John Doe" + + # Verify the function calls + mock_get_connection.assert_called_once() + mock_conn.cursor.assert_called_once() + mock_cursor.execute.assert_called_once_with( + "SELECT Client FROM Clients WHERE ClientId = ?", ("client123",) + ) + mock_cursor.fetchone.assert_called_once() + mock_conn.close.assert_called_once() diff --git a/src/App/tests/test_app.py b/src/App/tests/test_app.py index ff0ef42c2..ffa747097 100644 --- a/src/App/tests/test_app.py +++ b/src/App/tests/test_app.py @@ -3,10 +3,16 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest - from quart import Response -from app import (create_app, delete_all_conversations, generate_title, - init_cosmosdb_client, init_openai_client, stream_chat_request) + +from app import ( + create_app, + delete_all_conversations, + generate_title, + init_cosmosdb_client, + init_openai_client, + stream_chat_request, +) # Constants for testing INVALID_API_VERSION = "2022-01-01" @@ -21,22 +27,35 @@ @pytest.fixture(autouse=True) def set_env_vars(): - with patch("app.AZURE_OPENAI_PREVIEW_API_VERSION", "2024-02-15-preview"), patch( - "app.AZURE_OPENAI_ENDPOINT", "https://example.com/" - ), patch("app.AZURE_OPENAI_MODEL", "openai_model"), patch( - "app.CHAT_HISTORY_ENABLED", True + with patch( + "backend.common.config.config.AZURE_OPENAI_PREVIEW_API_VERSION", + "2024-02-15-preview", + ), patch( + "backend.common.config.config.AZURE_OPENAI_ENDPOINT", "https://example.com/" + ), patch( + "backend.common.config.config.AZURE_OPENAI_MODEL", "openai_model" + ), patch( + "backend.common.config.config.CHAT_HISTORY_ENABLED", True + ), patch( + "backend.common.config.config.AZURE_COSMOSDB_ACCOUNT", "test_account" ), patch( - "app.AZURE_COSMOSDB_ACCOUNT", "test_account" + "backend.common.config.config.AZURE_COSMOSDB_ACCOUNT_KEY", "test_key" ), patch( - "app.AZURE_COSMOSDB_ACCOUNT_KEY", "test_key" + "backend.common.config.config.AZURE_COSMOSDB_DATABASE", "test_database" ), patch( - "app.AZURE_COSMOSDB_DATABASE", "test_database" + "backend.common.config.config.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER", + "test_container", ), patch( - "app.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER", "test_container" + "backend.common.config.config.AZURE_COSMOSDB_ENABLE_FEEDBACK", True ), patch( - "app.AZURE_COSMOSDB_ENABLE_FEEDBACK", True + "backend.common.config.config.AZURE_OPENAI_KEY", "valid_key" ), patch( - "app.AZURE_OPENAI_KEY", "valid_key" + "backend.common.config.config.UI_TITLE", "Woodgrove Bank" + ), patch( + "backend.common.config.config.UI_FAVICON", "/favicon.ico" + ), patch( + "backend.common.config.config.MINIMUM_SUPPORTED_AZURE_OPENAI_PREVIEW_API_VERSION", + "2023-01-01", ): yield @@ -111,7 +130,7 @@ async def test_favicon(mock_send_static_file, client): @pytest.mark.asyncio async def test_ensure_cosmos_not_configured(client): - with patch("app.AZURE_COSMOSDB_ACCOUNT", ""): + with patch("backend.common.config.config.AZURE_COSMOSDB_ACCOUNT", ""): response = await client.get("/history/ensure") res_text = await response.get_data(as_text=True) assert response.status_code == 404 @@ -159,9 +178,9 @@ async def test_ensure_cosmos_exception(mock_init_cosmosdb_client, client): @pytest.mark.asyncio @patch("app.init_cosmosdb_client") async def test_ensure_cosmos_invalid_db_name(mock_init_cosmosdb_client, client): - with patch("app.AZURE_COSMOSDB_DATABASE", "your_db_name"), patch( - "app.AZURE_COSMOSDB_ACCOUNT", "your_account" - ): + with patch( + "backend.common.config.config.AZURE_COSMOSDB_DATABASE", "your_db_name" + ), patch("backend.common.config.config.AZURE_COSMOSDB_ACCOUNT", "your_account"): mock_init_cosmosdb_client.side_effect = Exception( "Invalid CosmosDB database name" ) @@ -177,7 +196,10 @@ async def test_ensure_cosmos_invalid_db_name(mock_init_cosmosdb_client, client): @pytest.mark.asyncio @patch("app.init_cosmosdb_client") async def test_ensure_cosmos_invalid_container_name(mock_init_cosmosdb_client, client): - with patch("app.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER", "your_container_name"): + with patch( + "backend.common.config.config.AZURE_COSMOSDB_CONVERSATIONS_CONTAINER", + "your_container_name", + ): mock_init_cosmosdb_client.side_effect = Exception( "Invalid CosmosDB container name" ) @@ -202,39 +224,23 @@ async def test_ensure_cosmos_generic_exception(mock_init_cosmosdb_client, client @pytest.mark.asyncio -@patch("app.get_connection") -@patch("app.dict_cursor") -async def test_get_users_success(mock_dict_cursor, mock_get_connection, client): - # Mock database connection and cursor - mock_conn = MagicMock() - mock_cursor = MagicMock() - mock_get_connection.return_value = mock_conn - mock_conn.cursor.return_value = mock_cursor - - # Mock query results - mock_dict_cursor.side_effect = [ - [ # First call (client data) - { - "ClientId": 1, - "Client": "Client A", - "Email": "clienta@example.com", - "AssetValue": "1,000,000", - "ClientSummary": "Summary A", - "LastMeetingDateFormatted": "Monday January 1, 2023", - "LastMeetingStartTime": "10:00 AM", - "LastMeetingEndTime": "10:30 AM", - "NextMeetingFormatted": "Monday January 8, 2023", - "NextMeetingStartTime": "11:00 AM", - "NextMeetingEndTime": "11:30 AM", - } - ], - [ # Second call (date difference query) - { - "ClientMeetingDaysDifference": 5, - "AssetMonthsDifference": 1, - "StatusMonthsDifference": 1 - } - ] +@patch("backend.services.sqldb_service.get_client_data") +async def test_get_users_success(mock_get_client_data, client): + # Mock the service function return + mock_get_client_data.return_value = [ + { + "ClientId": 1, + "ClientName": "Client A", + "ClientEmail": "clienta@example.com", + "AssetValue": "1,000,000", + "ClientSummary": "Summary A", + "LastMeeting": "Monday January 1, 2023", + "LastMeetingStartTime": "10:00 AM", + "LastMeetingEndTime": "10:30 AM", + "NextMeeting": "Monday January 8, 2023", + "NextMeetingTime": "11:00 AM", + "NextMeetingEndTime": "11:30 AM", + } ] # Call the function @@ -259,31 +265,25 @@ async def test_get_users_success(mock_dict_cursor, mock_get_connection, client): @pytest.mark.asyncio -async def test_get_users_no_users(client): - mock_conn = MagicMock() - mock_cursor = MagicMock() - mock_conn.cursor.return_value = mock_cursor - mock_cursor.fetchall.return_value = [] - - with patch("app.get_connection", return_value=mock_conn): - response = await client.get("/api/users") - assert response.status_code == 200 - res_text = await response.get_data(as_text=True) - assert json.loads(res_text) == [] +@patch("backend.services.sqldb_service.get_client_data") +async def test_get_users_no_users(mock_get_client_data, client): + mock_get_client_data.return_value = [] + + response = await client.get("/api/users") + assert response.status_code == 200 + res_text = await response.get_data(as_text=True) + assert json.loads(res_text) == [] @pytest.mark.asyncio -async def test_get_users_sql_execution_failure(client): - mock_conn = MagicMock() - mock_cursor = MagicMock() - mock_conn.cursor.return_value = mock_cursor - mock_cursor.execute.side_effect = Exception("SQL execution failed") - - with patch("app.get_connection", return_value=mock_conn): - response = await client.get("/api/users") - assert response.status_code == 500 - res_text = await response.get_data(as_text=True) - assert "SQL execution failed" in res_text +@patch("backend.services.sqldb_service.get_client_data") +async def test_get_users_sql_execution_failure(mock_get_client_data, client): + mock_get_client_data.side_effect = Exception("SQL execution failed") + + response = await client.get("/api/users") + assert response.status_code == 500 + res_text = await response.get_data(as_text=True) + assert "SQL execution failed" in res_text @pytest.fixture @@ -1308,13 +1308,19 @@ def __init__(self, id, model, created, object, choices): self.choices = choices +# Mock chunk object with content attribute +class MockStreamChunk: + def __init__(self, content): + self.content = content + + # Simulated async generator for testing purposes async def fake_internal_stream_response(): # Simulating streaming data chunk by chunk chunks = ["chunk1", "chunk2"] for chunk in chunks: await asyncio.sleep(0.1) - yield chunk + yield MockStreamChunk(chunk) @pytest.mark.asyncio @@ -1328,8 +1334,10 @@ async def test_stream_chat_request_with_internal_stream(): request_headers = {"apim-request-id": "test_id"} # Patch stream_response_from_wealth_assistant and USE_INTERNAL_STREAM - with patch("app.stream_response_from_wealth_assistant", return_value=fake_internal_stream_response), \ - patch("app.USE_INTERNAL_STREAM", True): + with patch( + "app.stream_response_from_wealth_assistant", + return_value=fake_internal_stream_response, + ), patch("backend.common.config.config.USE_INTERNAL_STREAM", True): # Create the Quart app context for the test async with create_app().app_context(): @@ -1343,7 +1351,7 @@ async def test_stream_chat_request_with_internal_stream(): # Create an async generator for iterating over the streamed content async def async_response_data(): - for chunk in response_data.split('\n'): + for chunk in response_data.split("\n"): if chunk.strip(): # Ignore empty chunks yield chunk @@ -1365,7 +1373,7 @@ async def test_stream_chat_request_no_client_id(): request_headers = {"apim-request-id": "test_id"} async with create_app().app_context(): - with patch("app.USE_INTERNAL_STREAM", True): + with patch("backend.common.config.config.USE_INTERNAL_STREAM", True): response, status_code = await stream_chat_request( request_body, request_headers ) @@ -1383,7 +1391,7 @@ async def test_stream_chat_request_without_azurefunction(): } request_headers = {"apim-request-id": "test_id"} - with patch("app.USE_INTERNAL_STREAM", False): + with patch("backend.common.config.config.USE_INTERNAL_STREAM", False): with patch("app.send_chat_request", new_callable=AsyncMock) as mock_send: mock_send.return_value = ( async_generator( diff --git a/src/App/tests/test_db.py b/src/App/tests/test_db.py deleted file mode 100644 index 19e0dc2e8..000000000 --- a/src/App/tests/test_db.py +++ /dev/null @@ -1,92 +0,0 @@ -import struct -from unittest.mock import MagicMock, patch - -import db -import pyodbc - -# Mock configuration -db.server = "mock_server" -db.username = "mock_user" -db.password = "mock_password" -db.database = "mock_database" -db.driver = "mock_driver" -db.mid_id = "mock_mid_id" # Managed identity client ID if needed - - -@patch("db.pyodbc.connect") # Mock pyodbc.connect -@patch("db.DefaultAzureCredential") # Mock DefaultAzureCredential -def test_get_connection(mock_credential_class, mock_connect): - # Mock the DefaultAzureCredential and get_token method - mock_credential = MagicMock() - mock_credential_class.return_value = mock_credential - mock_token = MagicMock() - mock_token.token = "mock_token" - mock_credential.get_token.return_value = mock_token - # Create a mock connection object - mock_conn = MagicMock() - mock_connect.return_value = mock_conn - - # Call the function - conn = db.get_connection() - - # Assert that DefaultAzureCredential and get_token were called correctly - mock_credential_class.assert_called_once_with(managed_identity_client_id=db.mid_id) - mock_credential.get_token.assert_called_once_with("https://database.windows.net/.default") - - # Assert that pyodbc.connect was called with the correct parameters, including the token - expected_attrs_before = { - 1256: struct.pack(f" Date: Thu, 19 Jun 2025 11:33:48 +0530 Subject: [PATCH 6/6] refactor: couple of typo fix (#570) * initial bicep changes for fdp * update role assignments in bicep * feat: initial fdp changes for client advisor * updated post deployment scripts to use keyless authentication * rebuilt main.json * fix configuration handling and error checking in backend services * updated unit tests * Refactor code for improved readability and maintainability by organizing imports and formatting code blocks consistently across multiple files. * fix: correct variable names for managed identity and AI foundry in scripts and templates --- infra/main.bicep | 8 ++++---- infra/main.json | 10 +++++----- infra/scripts/process_sample_data.sh | 6 +++--- src/App/backend/services/chat_service.py | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/infra/main.bicep b/infra/main.bicep index 4e7f4de11..9edebe267 100644 --- a/infra/main.bicep +++ b/infra/main.bicep @@ -100,7 +100,7 @@ var functionAppCallTranscriptSystemPrompt = '''You are an assistant who supports If no data is available, state 'No relevant data found for previous meetings.''' var functionAppStreamTextSystemPrompt = '''The currently selected client's name is '{SelectedClientName}'. Treat any case-insensitive or partial mention as referring to this client. - If the user mentions no name, assume they are asking about '{SelectedClientName}'.. + If the user mentions no name, assume they are asking about '{SelectedClientName}'. If the user references a name that clearly differs from '{SelectedClientName}', respond only with: 'Please only ask questions about the selected client or select another client.' Otherwise, provide thorough answers for every question using only data from SQL or call transcripts.' If no data is found, respond with 'No data found for that client.' Remove any client identifiers from the final response. Always send clientId as '{client_id}'.''' @@ -253,7 +253,7 @@ output COSMOSDB_ACCOUNT_NAME string = cosmosDBModule.outputs.cosmosAccountName output RESOURCE_GROUP_NAME string = resourceGroup().name output SQLDB_SERVER string = sqlDBModule.outputs.sqlServerName output SQLDB_DATABASE string = sqlDBModule.outputs.sqlDbName -output MANAGEDINDENTITY_WEBAPP_NAME string = managedIdentityModule.outputs.managedIdentityWebAppOutput.name -output MANAGEDINDENTITY_WEBAPP_CLIENTID string = managedIdentityModule.outputs.managedIdentityWebAppOutput.clientId -output AI_FOUNDARY_NAME string = aifoundry.outputs.aiFoundryName +output MANAGEDIDENTITY_WEBAPP_NAME string = managedIdentityModule.outputs.managedIdentityWebAppOutput.name +output MANAGEDIDENTITY_WEBAPP_CLIENTID string = managedIdentityModule.outputs.managedIdentityWebAppOutput.clientId +output AI_FOUNDRY_NAME string = aifoundry.outputs.aiFoundryName output AI_SEARCH_SERVICE_NAME string = aifoundry.outputs.aiSearchService diff --git a/infra/main.json b/infra/main.json index fe41cf42c..b1483eb4e 100644 --- a/infra/main.json +++ b/infra/main.json @@ -5,7 +5,7 @@ "_generator": { "name": "bicep", "version": "0.36.1.42791", - "templateHash": "8950753165543697743" + "templateHash": "461277054460209703" } }, "parameters": { @@ -350,7 +350,7 @@ "abbrs": "[variables('$fxv#0')]", "functionAppSqlPrompt": "Generate a valid T-SQL query to find {query} for tables and columns provided below:\n 1. Table: Clients\n Columns: ClientId, Client, Email, Occupation, MaritalStatus, Dependents\n 2. Table: InvestmentGoals\n Columns: ClientId, InvestmentGoal\n 3. Table: Assets\n Columns: ClientId, AssetDate, Investment, ROI, Revenue, AssetType\n 4. Table: ClientSummaries\n Columns: ClientId, ClientSummary\n 5. Table: InvestmentGoalsDetails\n Columns: ClientId, InvestmentGoal, TargetAmount, Contribution\n 6. Table: Retirement\n Columns: ClientId, StatusDate, RetirementGoalProgress, EducationGoalProgress\n 7. Table: ClientMeetings\n Columns: ClientId, ConversationId, Title, StartTime, EndTime, Advisor, ClientEmail\n Always use the Investment column from the Assets table as the value.\n Assets table has snapshots of values by date. Do not add numbers across different dates for total values.\n Do not use client name in filters.\n Do not include assets values unless asked for.\n ALWAYS use ClientId = {clientid} in the query filter.\n ALWAYS select Client Name (Column: Client) in the query.\n Query filters are IMPORTANT. Add filters like AssetType, AssetDate, etc. if needed.\n Only return the generated SQL query. Do not return anything else.", "functionAppCallTranscriptSystemPrompt": "You are an assistant who supports wealth advisors in preparing for client meetings. \n You have access to the client’s past meeting call transcripts. \n When answering questions, especially summary requests, provide a detailed and structured response that includes key topics, concerns, decisions, and trends. \n If no data is available, state 'No relevant data found for previous meetings.", - "functionAppStreamTextSystemPrompt": "The currently selected client's name is '{SelectedClientName}'. Treat any case-insensitive or partial mention as referring to this client.\n If the user mentions no name, assume they are asking about '{SelectedClientName}'..\n If the user references a name that clearly differs from '{SelectedClientName}', respond only with: 'Please only ask questions about the selected client or select another client.' Otherwise, provide thorough answers for every question using only data from SQL or call transcripts.'\n If no data is found, respond with 'No data found for that client.' Remove any client identifiers from the final response.\n Always send clientId as '{client_id}'." + "functionAppStreamTextSystemPrompt": "The currently selected client's name is '{SelectedClientName}'. Treat any case-insensitive or partial mention as referring to this client.\n If the user mentions no name, assume they are asking about '{SelectedClientName}'.\n If the user references a name that clearly differs from '{SelectedClientName}', respond only with: 'Please only ask questions about the selected client or select another client.' Otherwise, provide thorough answers for every question using only data from SQL or call transcripts.'\n If no data is found, respond with 'No data found for that client.' Remove any client identifiers from the final response.\n Always send clientId as '{client_id}'." }, "resources": [ { @@ -2681,15 +2681,15 @@ "type": "string", "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_sql_db'), '2022-09-01').outputs.sqlDbName.value]" }, - "MANAGEDINDENTITY_WEBAPP_NAME": { + "MANAGEDIDENTITY_WEBAPP_NAME": { "type": "string", "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_managed_identity'), '2022-09-01').outputs.managedIdentityWebAppOutput.value.name]" }, - "MANAGEDINDENTITY_WEBAPP_CLIENTID": { + "MANAGEDIDENTITY_WEBAPP_CLIENTID": { "type": "string", "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_managed_identity'), '2022-09-01').outputs.managedIdentityWebAppOutput.value.clientId]" }, - "AI_FOUNDARY_NAME": { + "AI_FOUNDRY_NAME": { "type": "string", "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiFoundryName.value]" }, diff --git a/infra/scripts/process_sample_data.sh b/infra/scripts/process_sample_data.sh index 4523f60ee..62f260f0c 100644 --- a/infra/scripts/process_sample_data.sh +++ b/infra/scripts/process_sample_data.sh @@ -43,15 +43,15 @@ if [ -z "$SqlDatabaseName" ]; then fi if [ -z "$webAppManagedIdentityClientId" ]; then - webAppManagedIdentityClientId=$(azd env get-value MANAGEDINDENTITY_WEBAPP_CLIENTID) + webAppManagedIdentityClientId=$(azd env get-value MANAGEDIDENTITY_WEBAPP_CLIENTID) fi if [ -z "$webAppManagedIdentityDisplayName" ]; then - webAppManagedIdentityDisplayName=$(azd env get-value MANAGEDINDENTITY_WEBAPP_NAME) + webAppManagedIdentityDisplayName=$(azd env get-value MANAGEDIDENTITY_WEBAPP_NAME) fi if [ -z "$aiFoundryName" ]; then - aiFoundryName=$(azd env get-value AI_FOUNDARY_NAME) + aiFoundryName=$(azd env get-value AI_FOUNDRY_NAME) fi if [ -z "$aiSearchName" ]; then diff --git a/src/App/backend/services/chat_service.py b/src/App/backend/services/chat_service.py index e2060e6ee..8dc8375a4 100644 --- a/src/App/backend/services/chat_service.py +++ b/src/App/backend/services/chat_service.py @@ -55,7 +55,7 @@ async def generate(): continue yield chunk.content # just the deltaText finally: - thread = chunk.thread + thread = chunk.thread if chunk else None await thread.delete() if thread else None return generate