diff --git a/infra/deploy_ai_foundry.bicep b/infra/deploy_ai_foundry.bicep index d6a8c611b..b8954277d 100644 --- a/infra/deploy_ai_foundry.bicep +++ b/infra/deploy_ai_foundry.bicep @@ -492,3 +492,5 @@ output aiProjectName string = aiHubProject.name output applicationInsightsId string = applicationInsights.id output logAnalyticsWorkspaceResourceName string = logAnalytics.name output storageAccountName string = storageNameCleaned +output applicationInsightsConnectionString string = applicationInsights.properties.ConnectionString + diff --git a/infra/deploy_app_service.bicep b/infra/deploy_app_service.bicep index 3d30f5291..a5531ce4f 100644 --- a/infra/deploy_app_service.bicep +++ b/infra/deploy_app_service.bicep @@ -178,6 +178,7 @@ param streamTextSystemPrompt string param aiProjectConnectionString string param useAIProjectClientFlag string = 'false' param aiProjectName string +param applicationInsightsConnectionString string // var WebAppImageName = 'DOCKER|byoaiacontainer.azurecr.io/byoaia-app:latest' @@ -215,6 +216,10 @@ resource Website 'Microsoft.Web/sites@2020-06-01' = { name: 'APPINSIGHTS_INSTRUMENTATIONKEY' value: reference(applicationInsightsId, '2015-05-01').InstrumentationKey } + { + name: 'APPLICATIONINSIGHTS_CONNECTION_STRING' + value: applicationInsightsConnectionString + } { name: 'AZURE_SEARCH_SERVICE' value: AzureSearchService diff --git a/infra/main.bicep b/infra/main.bicep index 3e286e79f..e437e94d1 100644 --- a/infra/main.bicep +++ b/infra/main.bicep @@ -246,6 +246,7 @@ module appserviceModule 'deploy_app_service.bicep' = { streamTextSystemPrompt: functionAppStreamTextSystemPrompt aiProjectConnectionString:keyVault.getSecret('AZURE-AI-PROJECT-CONN-STRING') aiProjectName:aifoundry.outputs.aiProjectName + applicationInsightsConnectionString:aifoundry.outputs.applicationInsightsConnectionString } scope: resourceGroup(resourceGroup().name) } diff --git a/infra/main.json b/infra/main.json index 0e4dc7597..ce81ca845 100644 --- a/infra/main.json +++ b/infra/main.json @@ -5,7 +5,7 @@ "_generator": { "name": "bicep", "version": "0.34.44.8038", - "templateHash": "1797657337218629559" + "templateHash": "9713836480105967098" } }, "parameters": { @@ -708,7 +708,7 @@ "_generator": { "name": "bicep", "version": "0.34.44.8038", - "templateHash": "3569608512312433081" + "templateHash": "18186919711353368589" } }, "parameters": { @@ -1016,11 +1016,11 @@ "name": "[format('{0}/{1}', variables('aiHubName'), format('{0}-connection-AzureOpenAI', variables('aiHubName')))]", "properties": { "category": "AIServices", - "target": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]", + "target": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]", "authType": "ApiKey", "isSharedToAll": true, "credentials": { - "key": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').key1]" + "key": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]" }, "metadata": { "ApiType": "Azure", @@ -1122,7 +1122,7 @@ }, { "type": "Microsoft.CognitiveServices/accounts", - "apiVersion": "2021-10-01", + "apiVersion": "2024-04-01-preview", "name": "[variables('aiServicesName')]", "location": "[variables('location')]", "sku": { @@ -1131,9 +1131,6 @@ "kind": "AIServices", "properties": { "customSubDomainName": "[variables('aiServicesName')]", - "apiProperties": { - "statisticsEnabled": false - }, "publicNetworkAccess": "Enabled" } }, @@ -1303,7 +1300,7 @@ "apiVersion": "2021-11-01-preview", "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPENAI-KEY')]", "properties": { - "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').key1]" + "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]" }, "dependsOn": [ "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" @@ -1330,7 +1327,7 @@ "apiVersion": "2021-11-01-preview", "name": "[format('{0}/{1}', parameters('keyVaultName'), 'AZURE-OPENAI-ENDPOINT')]", "properties": { - "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]" + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]" }, "dependsOn": [ "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" @@ -1393,7 +1390,7 @@ "apiVersion": "2021-11-01-preview", "name": "[format('{0}/{1}', parameters('keyVaultName'), 'COG-SERVICES-ENDPOINT')]", "properties": { - "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]" + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]" }, "dependsOn": [ "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" @@ -1404,7 +1401,7 @@ "apiVersion": "2021-11-01-preview", "name": "[format('{0}/{1}', parameters('keyVaultName'), 'COG-SERVICES-KEY')]", "properties": { - "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').key1]" + "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').key1]" }, "dependsOn": [ "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName'))]" @@ -1454,7 +1451,7 @@ }, "aiServicesTarget": { "type": "string", - "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2021-10-01').endpoint]" + "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServicesName')), '2024-04-01-preview').endpoint]" }, "aiServicesName": { "type": "string", @@ -1495,6 +1492,10 @@ "storageAccountName": { "type": "string", "value": "[variables('storageNameCleaned')]" + }, + "applicationInsightsConnectionString": { + "type": "string", + "value": "[reference(resourceId('Microsoft.Insights/components', variables('applicationInsightsName')), '2020-02-02').ConnectionString]" } } } @@ -2296,6 +2297,9 @@ }, "aiProjectName": { "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.aiProjectName.value]" + }, + "applicationInsightsConnectionString": { + "value": "[reference(extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', subscription().subscriptionId, resourceGroup().name), 'Microsoft.Resources/deployments', 'deploy_ai_foundry'), '2022-09-01').outputs.applicationInsightsConnectionString.value]" } }, "template": { @@ -2305,7 +2309,7 @@ "_generator": { "name": "bicep", "version": "0.34.44.8038", - "templateHash": "8701343999231764795" + "templateHash": "15866728948176241669" } }, "parameters": { @@ -2693,6 +2697,9 @@ }, "aiProjectName": { "type": "string" + }, + "applicationInsightsConnectionString": { + "type": "string" } }, "variables": { @@ -2732,6 +2739,10 @@ "name": "APPINSIGHTS_INSTRUMENTATIONKEY", "value": "[reference(parameters('applicationInsightsId'), '2015-05-01').InstrumentationKey]" }, + { + "name": "APPLICATIONINSIGHTS_CONNECTION_STRING", + "value": "[parameters('applicationInsightsConnectionString')]" + }, { "name": "AZURE_SEARCH_SERVICE", "value": "[parameters('AzureSearchService')]" diff --git a/src/App/app.py b/src/App/app.py index 411829551..4c9357573 100644 --- a/src/App/app.py +++ b/src/App/app.py @@ -37,6 +37,10 @@ from db import dict_cursor from backend.chat_logic_handler import stream_response_from_wealth_assistant +from backend.event_utils import track_event_if_configured +from azure.monitor.opentelemetry import configure_azure_monitor +from opentelemetry import trace +from opentelemetry.trace import Status, StatusCode bp = Blueprint("routes", __name__, static_folder="static", template_folder="static") @@ -61,6 +65,30 @@ UI_FAVICON = os.environ.get("UI_FAVICON") or "/favicon.ico" UI_SHOW_SHARE_BUTTON = os.environ.get("UI_SHOW_SHARE_BUTTON", "true").lower() == "true" +# Check if the Application Insights Instrumentation Key is set in the environment variables +instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") +if instrumentation_key: + # Configure Application Insights if the Instrumentation Key is found + configure_azure_monitor(connection_string=instrumentation_key) + logging.info("Application Insights configured with the provided Instrumentation Key") +else: + # Log a warning if the Instrumentation Key is not found + logging.warning("No Application Insights Instrumentation Key found. Skipping configuration") + +# Configure logging +logging.basicConfig(level=logging.INFO) + +# Suppress INFO logs from 'azure.core.pipeline.policies.http_logging_policy' +logging.getLogger("azure.core.pipeline.policies.http_logging_policy").setLevel( + logging.WARNING +) +logging.getLogger("azure.identity.aio._internal").setLevel(logging.WARNING) + +# Suppress info logs from OpenTelemetry exporter +logging.getLogger("azure.monitor.opentelemetry.exporter.export._base").setLevel( + logging.WARNING +) + def create_app(): app = Quart(__name__) @@ -384,9 +412,19 @@ def init_openai_client(use_data=SHOULD_USE_DATA): azure_endpoint=endpoint, ) + track_event_if_configured("AzureOpenAIClientInitialized", { + "status": "success", + "endpoint": endpoint, + "use_api_key": bool(aoai_api_key), + }) + return azure_openai_client except Exception as e: logging.exception("Exception in Azure OpenAI initialization", e) + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) azure_openai_client = None raise e @@ -411,8 +449,20 @@ def init_cosmosdb_client(): container_name=AZURE_COSMOSDB_CONVERSATIONS_CONTAINER, enable_message_feedback=AZURE_COSMOSDB_ENABLE_FEEDBACK, ) + + track_event_if_configured("CosmosDBClientInitialized", { + "status": "success", + "endpoint": cosmos_endpoint, + "database": AZURE_COSMOSDB_DATABASE, + "container": AZURE_COSMOSDB_CONVERSATIONS_CONTAINER, + "feedback_enabled": AZURE_COSMOSDB_ENABLE_FEEDBACK, + }) except Exception as e: logging.exception("Exception in CosmosDB initialization", e) + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) cosmos_conversation_client = None raise e else: @@ -425,6 +475,7 @@ def get_configured_data_source(): data_source = {} query_type = "simple" if DATASOURCE_TYPE == "AzureCognitiveSearch": + track_event_if_configured("datasource_selected", {"type": "AzureCognitiveSearch"}) # Set query type if AZURE_SEARCH_QUERY_TYPE: query_type = AZURE_SEARCH_QUERY_TYPE @@ -433,6 +484,7 @@ def get_configured_data_source(): and AZURE_SEARCH_SEMANTIC_SEARCH_CONFIG ): query_type = "semantic" + track_event_if_configured("query_type_determined", {"query_type": query_type}) # Set filter filter = None @@ -441,11 +493,13 @@ def get_configured_data_source(): userToken = request.headers.get("X-MS-TOKEN-AAD-ACCESS-TOKEN", "") logging.debug(f"USER TOKEN is {'present' if userToken else 'not present'}") if not userToken: + track_event_if_configured("user_token_missing", {}) raise Exception( "Document-level access control is enabled, but user access token could not be fetched." ) filter = generateFilterString(userToken) + track_event_if_configured("filter_generated", {"filter": filter}) logging.debug(f"FILTER: {filter}") # Set authentication @@ -455,6 +509,7 @@ def get_configured_data_source(): else: # If key is not provided, assume AOAI resource identity has been granted access to the search service authentication = {"type": "system_assigned_managed_identity"} + track_event_if_configured("authentication_set", {"auth_type": authentication["type"]}) data_source = { "type": "azure_search", @@ -508,6 +563,7 @@ def get_configured_data_source(): } elif DATASOURCE_TYPE == "AzureCosmosDB": query_type = "vector" + track_event_if_configured("datasource_selected", {"type": "AzureCosmosDB"}) data_source = { "type": "azure_cosmos_db", @@ -566,8 +622,10 @@ def get_configured_data_source(): }, } elif DATASOURCE_TYPE == "Elasticsearch": + track_event_if_configured("datasource_selected", {"type": "Elasticsearch"}) if ELASTICSEARCH_QUERY_TYPE: query_type = ELASTICSEARCH_QUERY_TYPE + track_event_if_configured("query_type_determined", {"query_type": query_type}) data_source = { "type": "elasticsearch", @@ -621,8 +679,10 @@ def get_configured_data_source(): }, } elif DATASOURCE_TYPE == "AzureMLIndex": + track_event_if_configured("datasource_selected", {"type": "AzureMLIndex"}) if AZURE_MLINDEX_QUERY_TYPE: query_type = AZURE_MLINDEX_QUERY_TYPE + track_event_if_configured("query_type_determined", {"query_type": query_type}) data_source = { "type": "azure_ml_index", @@ -674,6 +734,7 @@ def get_configured_data_source(): } elif DATASOURCE_TYPE == "Pinecone": query_type = "vector" + track_event_if_configured("datasource_selected", {"type": "Pinecone"}) data_source = { "type": "pinecone", @@ -716,6 +777,7 @@ def get_configured_data_source(): }, } else: + track_event_if_configured("unknown_datasource_type", {"type": DATASOURCE_TYPE}) raise Exception( f"DATASOURCE_TYPE is not configured or unknown: {DATASOURCE_TYPE}" ) @@ -742,15 +804,26 @@ def get_configured_data_source(): "model_id": ELASTICSEARCH_EMBEDDING_MODEL_ID, } else: + track_event_if_configured("embedding_dependency_missing", { + "datasource_type": DATASOURCE_TYPE, + "query_type": query_type + }) raise Exception( f"Vector query type ({query_type}) is selected for data source type {DATASOURCE_TYPE} but no embedding dependency is configured" ) + track_event_if_configured("embedding_dependency_set", { + "embedding_type": embeddingDependency.get("type") + }) data_source["parameters"]["embedding_dependency"] = embeddingDependency - + track_event_if_configured("get_configured_data_source_complete", { + "datasource_type": DATASOURCE_TYPE, + "query_type": query_type + }) return data_source def prepare_model_args(request_body, request_headers): + track_event_if_configured("prepare_model_args_start", {}) request_messages = request_body.get("messages", []) messages = [] if not SHOULD_USE_DATA: @@ -775,6 +848,7 @@ def prepare_model_args(request_body, request_headers): ), } user_json = json.dumps(user_args) + track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]}) model_args = { "messages": messages, @@ -792,6 +866,7 @@ def prepare_model_args(request_body, request_headers): } if SHOULD_USE_DATA: + track_event_if_configured("ms_defender_user_info_added", {"user_id": user_args["EndUserId"]}) model_args["extra_body"] = {"data_sources": [get_configured_data_source()]} model_args_clean = copy.deepcopy(model_args) @@ -829,11 +904,13 @@ def prepare_model_args(request_body, request_headers): ]["authentication"][field] = "*****" logging.debug(f"REQUEST BODY: {json.dumps(model_args_clean, indent=4)}") + track_event_if_configured("prepare_model_args_complete", {"model": AZURE_OPENAI_MODEL}) return model_args async def promptflow_request(request): + track_event_if_configured("promptflow_request_start", {}) try: headers = { "Content-Type": "application/json", @@ -861,12 +938,18 @@ async def promptflow_request(request): ) resp = response.json() resp["id"] = request["messages"][-1]["id"] + track_event_if_configured("promptflow_request_success", {}) return resp except Exception as e: + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) logging.error(f"An error occurred while making promptflow_request: {e}") async def send_chat_request(request_body, request_headers): + track_event_if_configured("send_chat_request_start", {}) filtered_messages = [] messages = request_body.get("messages", []) for message in messages: @@ -885,13 +968,20 @@ async def send_chat_request(request_body, request_headers): ) response = raw_response.parse() apim_request_id = raw_response.headers.get("apim-request-id") + + track_event_if_configured("send_chat_request_success", {"model": model_args.get("model")}) except Exception as e: + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) logging.exception("Exception in send_chat_request") raise e return response, apim_request_id async def complete_chat_request(request_body, request_headers): + track_event_if_configured("complete_chat_request_start", {}) if USE_PROMPTFLOW and PROMPTFLOW_ENDPOINT and PROMPTFLOW_API_KEY: response = await promptflow_request(request_body) history_metadata = request_body.get("history_metadata", {}) @@ -902,6 +992,7 @@ async def complete_chat_request(request_body, request_headers): PROMPTFLOW_CITATIONS_FIELD_NAME, ) elif USE_INTERNAL_STREAM: + track_event_if_configured("internal_stream_selected", {}) request_body = await request.get_json() client_id = request_body.get("client_id") print(request_body) @@ -963,10 +1054,13 @@ async def complete_chat_request(request_body, request_headers): {"role": "assistant", "content": query_response} ) + track_event_if_configured("complete_chat_request_success", {"client_id": client_id}) + return response async def stream_chat_request(request_body, request_headers): + track_event_if_configured("stream_chat_request_start", {}) if USE_INTERNAL_STREAM: history_metadata = request_body.get("history_metadata", {}) # function_url = STREAMING_AZUREFUNCTION_ENDPOINT @@ -974,8 +1068,10 @@ async def stream_chat_request(request_body, request_headers): client_id = request_body.get("client_id") if client_id is None: + track_event_if_configured("client_id_missing", {}) return jsonify({"error": "No client ID provided"}), 400 query = request_body.get("messages")[-1].get("content") + track_event_if_configured("stream_internal_selected", {"client_id": client_id}) sk_response = await stream_response_from_wealth_assistant(query, client_id) @@ -1028,11 +1124,16 @@ async def generate(): yield format_stream_response( completionChunk, history_metadata, apim_request_id ) - + track_event_if_configured("stream_openai_selected", {}) return generate() async def conversation_internal(request_body, request_headers): + track_event_if_configured("conversation_internal_start", { + "streaming": SHOULD_STREAM, + "promptflow": USE_PROMPTFLOW, + "internal_stream": USE_INTERNAL_STREAM + }) try: if SHOULD_STREAM: return await stream_chat_request(request_body, request_headers) @@ -1042,9 +1143,14 @@ async def conversation_internal(request_body, request_headers): # return response else: result = await complete_chat_request(request_body, request_headers) + track_event_if_configured("conversation_internal_success", {}) return jsonify(result) except Exception as ex: + span = trace.get_current_span() + if span is not None: + span.record_exception(ex) + span.set_status(Status(StatusCode.ERROR, str(ex))) logging.exception(ex) if hasattr(ex, "status_code"): return jsonify({"error": str(ex)}), ex.status_code @@ -1055,9 +1161,10 @@ async def conversation_internal(request_body, request_headers): @bp.route("/conversation", methods=["POST"]) async def conversation(): if not request.is_json: + track_event_if_configured("invalid_request_format", {}) return jsonify({"error": "request must be json"}), 415 request_json = await request.get_json() - + track_event_if_configured("conversation_api_invoked", {}) return await conversation_internal(request_json, request.headers) @@ -1067,6 +1174,10 @@ def get_frontend_settings(): return jsonify(frontend_settings), 200 except Exception as e: logging.exception("Exception in /frontend_settings") + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) return jsonify({"error": str(e)}), 500 @@ -1075,6 +1186,10 @@ def get_frontend_settings(): async def add_conversation(): authenticated_user = get_authenticated_user_details(request_headers=request.headers) user_id = authenticated_user["user_principal_id"] + track_event_if_configured( + "HistoryGenerate_Start", + {"user_id": user_id} + ) # check request for conversation_id request_json = await request.get_json() @@ -1097,6 +1212,15 @@ async def add_conversation(): history_metadata["title"] = title history_metadata["date"] = conversation_dict["createdAt"] + track_event_if_configured( + "ConversationCreated", + { + "user_id": user_id, + "conversation_id": conversation_id, + "title": title + } + ) + # Format the incoming message object in the "chat/completions" messages format # then write it to the conversation history in cosmos messages = request_json["messages"] @@ -1113,6 +1237,14 @@ async def add_conversation(): + conversation_id + "." ) + track_event_if_configured( + "UserMessageAdded", + { + "user_id": user_id, + "conversation_id": conversation_id, + "message": messages[-1], + } + ) else: raise Exception("No user message found") @@ -1122,9 +1254,28 @@ async def add_conversation(): request_body = await request.get_json() history_metadata["conversation_id"] = conversation_id request_body["history_metadata"] = history_metadata + track_event_if_configured( + "SendingToChatCompletions", + { + "user_id": user_id, + "conversation_id": conversation_id + } + ) + + track_event_if_configured( + "HistoryGenerate_Completed", + { + "user_id": user_id, + "conversation_id": conversation_id + } + ) return await conversation_internal(request_body, request.headers) except Exception as e: + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) logging.exception("Exception in /history/generate") return jsonify({"error": str(e)}), 500 @@ -1138,6 +1289,11 @@ async def update_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) + track_event_if_configured("UpdateConversation_Start", { + "user_id": user_id, + "conversation_id": conversation_id + }) + try: # make sure cosmos is configured cosmos_conversation_client = init_cosmosdb_client() @@ -1160,6 +1316,10 @@ async def update_conversation(): user_id=user_id, input_message=messages[-2], ) + track_event_if_configured("ToolMessageStored", { + "user_id": user_id, + "conversation_id": conversation_id + }) # write the assistant message await cosmos_conversation_client.create_message( uuid=messages[-1]["id"], @@ -1167,16 +1327,28 @@ async def update_conversation(): user_id=user_id, input_message=messages[-1], ) + track_event_if_configured("AssistantMessageStored", { + "user_id": user_id, + "conversation_id": conversation_id, + "message": messages[-1] + }) else: raise Exception("No bot messages found") - # Submit request to Chat Completions for response await cosmos_conversation_client.cosmosdb_client.close() + track_event_if_configured("UpdateConversation_Success", { + "user_id": user_id, + "conversation_id": conversation_id + }) response = {"success": True} return jsonify(response), 200 except Exception as e: logging.exception("Exception in /history/update") + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) return jsonify({"error": str(e)}), 500 @@ -1190,6 +1362,11 @@ async def update_message(): request_json = await request.get_json() message_id = request_json.get("message_id", None) message_feedback = request_json.get("message_feedback", None) + + track_event_if_configured("MessageFeedback_Start", { + "user_id": user_id, + "message_id": message_id + }) try: if not message_id: return jsonify({"error": "message_id is required"}), 400 @@ -1202,6 +1379,11 @@ async def update_message(): user_id, message_id, message_feedback ) if updated_message: + track_event_if_configured("MessageFeedback_Updated", { + "user_id": user_id, + "message_id": message_id, + "feedback": message_feedback + }) return ( jsonify( { @@ -1212,6 +1394,10 @@ async def update_message(): 200, ) else: + track_event_if_configured("MessageFeedback_NotFound", { + "user_id": user_id, + "message_id": message_id + }) return ( jsonify( { @@ -1223,6 +1409,10 @@ async def update_message(): except Exception as e: logging.exception("Exception in /history/message_feedback") + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) return jsonify({"error": str(e)}), 500 @@ -1236,6 +1426,11 @@ async def delete_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) + track_event_if_configured("DeleteConversation_Start", { + "user_id": user_id, + "conversation_id": conversation_id + }) + try: if not conversation_id: return jsonify({"error": "conversation_id is required"}), 400 @@ -1253,6 +1448,11 @@ async def delete_conversation(): await cosmos_conversation_client.cosmosdb_client.close() + track_event_if_configured("DeleteConversation_Success", { + "user_id": user_id, + "conversation_id": conversation_id + }) + return ( jsonify( { @@ -1264,6 +1464,10 @@ async def delete_conversation(): ) except Exception as e: logging.exception("Exception in /history/delete") + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) return jsonify({"error": str(e)}), 500 @@ -1273,6 +1477,11 @@ async def list_conversations(): authenticated_user = get_authenticated_user_details(request_headers=request.headers) user_id = authenticated_user["user_principal_id"] + track_event_if_configured("ListConversations_Start", { + "user_id": user_id, + "offset": offset + }) + # make sure cosmos is configured cosmos_conversation_client = init_cosmosdb_client() if not cosmos_conversation_client: @@ -1284,10 +1493,19 @@ async def list_conversations(): ) await cosmos_conversation_client.cosmosdb_client.close() if not isinstance(conversations, list): + track_event_if_configured("ListConversations_Empty", { + "user_id": user_id, + "offset": offset + }) return jsonify({"error": f"No conversations for {user_id} were found"}), 404 # return the conversation ids + track_event_if_configured("ListConversations_Success", { + "user_id": user_id, + "conversation_count": len(conversations) + }) + return jsonify(conversations), 200 @@ -1300,7 +1518,17 @@ async def get_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) + track_event_if_configured("GetConversation_Start", { + "user_id": user_id, + "conversation_id": conversation_id, + }) + if not conversation_id: + track_event_if_configured("GetConversation_Failed", { + "user_id": user_id, + "conversation_id": conversation_id, + "error": f"Conversation {conversation_id} not found", + }) return jsonify({"error": "conversation_id is required"}), 400 # make sure cosmos is configured @@ -1341,6 +1569,11 @@ async def get_conversation(): ] await cosmos_conversation_client.cosmosdb_client.close() + track_event_if_configured("GetConversation_Success", { + "user_id": user_id, + "conversation_id": conversation_id, + "message_count": len(messages) + }) return jsonify({"conversation_id": conversation_id, "messages": messages}), 200 @@ -1353,7 +1586,17 @@ async def rename_conversation(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) + track_event_if_configured("RenameConversation_Start", { + "user_id": user_id, + "conversation_id": conversation_id + }) + if not conversation_id: + track_event_if_configured("RenameConversation_Failed", { + "user_id": user_id, + "conversation_id": conversation_id, + "error": f"Conversation {conversation_id} not found", + }) return jsonify({"error": "conversation_id is required"}), 400 # make sure cosmos is configured @@ -1385,6 +1628,12 @@ async def rename_conversation(): ) await cosmos_conversation_client.cosmosdb_client.close() + + track_event_if_configured("RenameConversation_Success", { + "user_id": user_id, + "conversation_id": conversation_id, + "new_title": title + }) return jsonify(updated_conversation), 200 @@ -1394,6 +1643,10 @@ async def delete_all_conversations(): authenticated_user = get_authenticated_user_details(request_headers=request.headers) user_id = authenticated_user["user_principal_id"] + track_event_if_configured("DeleteAllConversations_Start", { + "user_id": user_id + }) + # get conversations for user try: # make sure cosmos is configured @@ -1405,6 +1658,9 @@ async def delete_all_conversations(): user_id, offset=0, limit=None ) if not conversations: + track_event_if_configured("DeleteAllConversations_Empty", { + "user_id": user_id, + }) return jsonify({"error": f"No conversations for {user_id} were found"}), 404 # delete each conversation @@ -1419,6 +1675,12 @@ async def delete_all_conversations(): user_id, conversation["id"] ) await cosmos_conversation_client.cosmosdb_client.close() + + track_event_if_configured("DeleteAllConversations_Success", { + "user_id": user_id, + "conversation_count": len(conversations) + }) + return ( jsonify( { @@ -1430,6 +1692,10 @@ async def delete_all_conversations(): except Exception as e: logging.exception("Exception in /history/delete_all") + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) return jsonify({"error": str(e)}), 500 @@ -1443,8 +1709,18 @@ async def clear_messages(): request_json = await request.get_json() conversation_id = request_json.get("conversation_id", None) + track_event_if_configured("ClearConversationMessages_Start", { + "user_id": user_id, + "conversation_id": conversation_id, + }) + try: if not conversation_id: + track_event_if_configured("ClearConversationMessages_Failed", { + "user_id": user_id, + "conversation_id": conversation_id, + "error": "conversation_id is required" + }) return jsonify({"error": "conversation_id is required"}), 400 # make sure cosmos is configured @@ -1455,6 +1731,11 @@ async def clear_messages(): # delete the conversation messages from cosmos await cosmos_conversation_client.delete_messages(conversation_id, user_id) + track_event_if_configured("ClearConversationMessages_Success", { + "user_id": user_id, + "conversation_id": conversation_id + }) + return ( jsonify( { @@ -1466,12 +1747,19 @@ async def clear_messages(): ) except Exception as e: logging.exception("Exception in /history/clear_messages") + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) return jsonify({"error": str(e)}), 500 @bp.route("/history/ensure", methods=["GET"]) async def ensure_cosmos(): if not AZURE_COSMOSDB_ACCOUNT: + track_event_if_configured("EnsureCosmosDB_Failed", { + "error": "CosmosDB is not configured", + }) return jsonify({"error": "CosmosDB is not configured"}), 404 try: @@ -1479,13 +1767,23 @@ async def ensure_cosmos(): success, err = await cosmos_conversation_client.ensure() if not cosmos_conversation_client or not success: if err: + track_event_if_configured("EnsureCosmosDB_Failed", { + "error": err, + }) return jsonify({"error": err}), 422 return jsonify({"error": "CosmosDB is not configured or not working"}), 500 await cosmos_conversation_client.cosmosdb_client.close() + track_event_if_configured("EnsureCosmosDB_Failed", { + "error": "CosmosDB is not configured or not working", + }) return jsonify({"message": "CosmosDB is configured and working"}), 200 except Exception as e: logging.exception("Exception in /history/ensure") + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) cosmos_exception = str(e) if "Invalid credentials" in cosmos_exception: return jsonify({"error": cosmos_exception}), 401 @@ -1512,6 +1810,7 @@ async def ensure_cosmos(): async def generate_title(conversation_messages): + # make sure the messages are sorted by _ts descending title_prompt = 'Summarize the conversation so far into a 4-word or less title. Do not use any quotation marks or punctuation. Respond with a json object in the format {{"title": string}}. Do not include any other commentary or description.' @@ -1540,6 +1839,8 @@ async def generate_title(conversation_messages): @bp.route("/api/users", methods=["GET"]) def get_users(): + + track_event_if_configured("UserFetch_Start", {}) conn = None try: conn = get_connection() @@ -1594,6 +1895,9 @@ def get_users(): rows = dict_cursor(cursor) if len(rows) <= 6: + track_event_if_configured("UserFetch_SampleUpdate", { + "rows_count": len(rows), + }) # update ClientMeetings,Assets,Retirement tables sample data to current date cursor = conn.cursor() combined_stmt = """ @@ -1678,9 +1982,17 @@ def get_users(): } users.append(user) + track_event_if_configured("UserFetch_Success", { + "user_count": len(users), + }) + return jsonify(users) except Exception as e: + span = trace.get_current_span() + if span is not None: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) print("Exception occurred:", e) return str(e), 500 finally: diff --git a/src/App/backend/event_utils.py b/src/App/backend/event_utils.py new file mode 100644 index 000000000..c04214b64 --- /dev/null +++ b/src/App/backend/event_utils.py @@ -0,0 +1,29 @@ +import logging +import os +from azure.monitor.events.extension import track_event + + +def track_event_if_configured(event_name: str, event_data: dict): + """Track an event if Application Insights is configured. + + This function safely wraps the Azure Monitor track_event function + to handle potential errors with the ProxyLogger. + + Args: + event_name: The name of the event to track + event_data: Dictionary of event data/dimensions + """ + try: + instrumentation_key = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING") + if instrumentation_key: + track_event(event_name, event_data) + else: + logging.warning( + f"Skipping track_event for {event_name} as Application Insights is not configured" + ) + except AttributeError as e: + # Handle the 'ProxyLogger' object has no attribute 'resource' error + logging.warning(f"ProxyLogger error in track_event: {e}") + except Exception as e: + # Catch any other exceptions to prevent them from bubbling up + logging.warning(f"Error in track_event: {e}") diff --git a/src/App/requirements.txt b/src/App/requirements.txt index 1a87b8001..4d52ee10d 100644 --- a/src/App/requirements.txt +++ b/src/App/requirements.txt @@ -31,4 +31,14 @@ pyodbc==5.2.0 semantic_kernel==1.21.3 azure-search-documents==11.6.0b9 azure-ai-projects==1.0.0b9 -azure-ai-inference==1.0.0b9 \ No newline at end of file +azure-ai-inference==1.0.0b9 + +opentelemetry-exporter-otlp-proto-grpc +opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-grpc +azure-monitor-events-extension +opentelemetry-sdk==1.31.1 +opentelemetry-api==1.31.1 +opentelemetry-semantic-conventions==0.52b1 +opentelemetry-instrumentation==0.52b1 +azure-monitor-opentelemetry==1.6.8 \ No newline at end of file