Skip to content
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/python-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ jobs:
cd ./app/frontend
npm install
npm run build
- name: Check i18n translations
run: npx -y @lingual/i18n-check --locales app/frontend/src/locales -s en -f i18next -r summary
- name: Install dependencies
run: |
uv pip install -r requirements-dev.txt
Expand Down
3 changes: 0 additions & 3 deletions app/backend/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,6 @@ async def setup_clients():
USE_CHAT_HISTORY_BROWSER = os.getenv("USE_CHAT_HISTORY_BROWSER", "").lower() == "true"
USE_CHAT_HISTORY_COSMOS = os.getenv("USE_CHAT_HISTORY_COSMOS", "").lower() == "true"
USE_AGENTIC_RETRIEVAL = os.getenv("USE_AGENTIC_RETRIEVAL", "").lower() == "true"
ENABLE_AGENTIC_RETRIEVAL_SOURCE_DATA = os.getenv("ENABLE_AGENTIC_RETRIEVAL_SOURCE_DATA", "").lower() == "true"

# WEBSITE_HOSTNAME is always set by App Service, RUNNING_IN_PRODUCTION is set in main.bicep
RUNNING_ON_AZURE = os.getenv("WEBSITE_HOSTNAME") is not None or os.getenv("RUNNING_IN_PRODUCTION") is not None
Expand Down Expand Up @@ -690,7 +689,6 @@ async def setup_clients():
query_speller=AZURE_SEARCH_QUERY_SPELLER,
prompt_manager=prompt_manager,
reasoning_effort=OPENAI_REASONING_EFFORT,
hydrate_references=ENABLE_AGENTIC_RETRIEVAL_SOURCE_DATA,
multimodal_enabled=USE_MULTIMODAL,
image_embeddings_client=image_embeddings_client,
global_blob_manager=global_blob_manager,
Expand Down Expand Up @@ -718,7 +716,6 @@ async def setup_clients():
query_speller=AZURE_SEARCH_QUERY_SPELLER,
prompt_manager=prompt_manager,
reasoning_effort=OPENAI_REASONING_EFFORT,
hydrate_references=ENABLE_AGENTIC_RETRIEVAL_SOURCE_DATA,
multimodal_enabled=USE_MULTIMODAL,
image_embeddings_client=image_embeddings_client,
global_blob_manager=global_blob_manager,
Expand Down
124 changes: 34 additions & 90 deletions app/backend/approaches/approach.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@

from azure.search.documents.agent.aio import KnowledgeAgentRetrievalClient
from azure.search.documents.agent.models import (
KnowledgeAgentAzureSearchDocReference,
KnowledgeAgentIndexParams,
KnowledgeAgentMessage,
KnowledgeAgentMessageTextContent,
KnowledgeAgentRetrievalRequest,
KnowledgeAgentRetrievalResponse,
KnowledgeAgentSearchActivityRecord,
KnowledgeAgentSearchIndexActivityRecord,
KnowledgeAgentSearchIndexReference,
SearchIndexKnowledgeSourceParams,
)
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import (
Expand Down Expand Up @@ -162,7 +162,6 @@ def __init__(
openai_host: str,
prompt_manager: PromptManager,
reasoning_effort: Optional[str] = None,
hydrate_references: bool = False,
multimodal_enabled: bool = False,
image_embeddings_client: Optional[ImageEmbeddings] = None,
global_blob_manager: Optional[BlobManager] = None,
Expand All @@ -180,7 +179,6 @@ def __init__(
self.openai_host = openai_host
self.prompt_manager = prompt_manager
self.reasoning_effort = reasoning_effort
self.hydrate_references = hydrate_references
self.include_token_usage = True
self.multimodal_enabled = multimodal_enabled
self.image_embeddings_client = image_embeddings_client
Expand Down Expand Up @@ -276,7 +274,6 @@ async def run_agentic_retrieval(
top: Optional[int] = None,
filter_add_on: Optional[str] = None,
minimum_reranker_score: Optional[float] = None,
max_docs_for_reranker: Optional[int] = None,
results_merge_strategy: Optional[str] = None,
) -> tuple[KnowledgeAgentRetrievalResponse, list[Document]]:
# STEP 1: Invoke agentic retrieval
Expand All @@ -289,13 +286,10 @@ async def run_agentic_retrieval(
for msg in messages
if msg["role"] != "system"
],
target_index_params=[
KnowledgeAgentIndexParams(
index_name=search_index_name,
reranker_threshold=minimum_reranker_score,
max_docs_for_reranker=max_docs_for_reranker,
knowledge_source_params=[
SearchIndexKnowledgeSourceParams(
knowledge_source_name=search_index_name,
filter_add_on=filter_add_on,
include_reference_source_data=True,
)
],
)
Expand All @@ -305,12 +299,12 @@ async def run_agentic_retrieval(
activities = response.activity
activity_mapping: dict[int, str] = (
{
activity.id: activity.query.search
activity.id: activity.search_index_arguments.search
for activity in activities
if (
isinstance(activity, KnowledgeAgentSearchActivityRecord)
and activity.query
and activity.query.search is not None
isinstance(activity, KnowledgeAgentSearchIndexActivityRecord)
and activity.search_index_arguments
and activity.search_index_arguments.search is not None
)
}
if activities
Expand All @@ -322,92 +316,42 @@ async def run_agentic_retrieval(
return response, []

# Extract references
refs = [r for r in response.references if isinstance(r, KnowledgeAgentAzureSearchDocReference)]

refs = [r for r in response.references if isinstance(r, KnowledgeAgentSearchIndexReference)]
documents: list[Document] = []

if self.hydrate_references:
# Hydrate references to get full documents
documents = await self.hydrate_agent_references(
references=refs,
top=top,
)
else:
# Create documents from reference source data
for ref in refs:
if ref.source_data:
documents.append(
Document(
id=ref.doc_key,
content=ref.source_data.get("content"),
sourcepage=ref.source_data.get("sourcepage"),
)
)
if top and len(documents) >= top:
break

# Build mappings for agent queries and sorting
ref_to_activity: dict[str, int] = {}
doc_to_ref_id: dict[str, str] = {}

# Create documents from reference source data
for ref in refs:
if ref.doc_key:
ref_to_activity[ref.doc_key] = ref.activity_source
if ref.source_data and ref.doc_key:
# Note that ref.doc_key is the same as source_data["id"]
documents.append(
Document(
id=ref.source_data.get("id"),
content=ref.source_data.get("content"),
category=ref.source_data.get("category"),
sourcepage=ref.source_data.get("sourcepage"),
sourcefile=ref.source_data.get("sourcefile"),
oids=ref.source_data.get("oids"),
groups=ref.source_data.get("groups"),
reranker_score=ref.reranker_score,
images=ref.source_data.get("images"),
search_agent_query=activity_mapping[ref.activity_source],
)
)
doc_to_ref_id[ref.doc_key] = ref.id
if top and len(documents) >= top:
break

# Inject agent search queries into all documents
for doc in documents:
if doc.id and doc.id in ref_to_activity:
activity_id = ref_to_activity[doc.id]
doc.search_agent_query = activity_mapping.get(activity_id, "")
if minimum_reranker_score is not None:
documents = [doc for doc in documents if (doc.reranker_score or 0) >= minimum_reranker_score]

# Apply sorting strategy to the documents
if results_merge_strategy == "interleaved": # Use interleaved reference order
if results_merge_strategy == "interleaved":
documents = sorted(
documents,
key=lambda d: int(doc_to_ref_id.get(d.id, 0)) if d.id and doc_to_ref_id.get(d.id) else 0,
)
# else: Default - preserve original order

return response, documents

async def hydrate_agent_references(
self,
references: list[KnowledgeAgentAzureSearchDocReference],
top: Optional[int],
) -> list[Document]:
doc_keys: set[str] = set()

for ref in references:
if not ref.doc_key:
continue
doc_keys.add(ref.doc_key)
if top and len(doc_keys) >= top:
break

if not doc_keys:
return []

# Build search filter only on unique doc IDs
id_csv = ",".join(doc_keys)
id_filter = f"search.in(id, '{id_csv}', ',')"

# Fetch full documents
hydrated_docs: list[Document] = await self.search(
top=len(doc_keys),
query_text=None,
filter=id_filter,
vectors=[],
use_text_search=False,
use_vector_search=False,
use_semantic_ranker=False,
use_semantic_captions=False,
minimum_search_score=None,
minimum_reranker_score=None,
use_query_rewriting=False,
)

return hydrated_docs

async def get_sources_content(
self,
results: list[Document],
Expand Down
9 changes: 1 addition & 8 deletions app/backend/approaches/chatreadretrieveread.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ def __init__(
query_speller: str,
prompt_manager: PromptManager,
reasoning_effort: Optional[str] = None,
hydrate_references: bool = False,
multimodal_enabled: bool = False,
image_embeddings_client: Optional[ImageEmbeddings] = None,
global_blob_manager: Optional[BlobManager] = None,
Expand Down Expand Up @@ -84,7 +83,6 @@ def __init__(
self.query_rewrite_tools = self.prompt_manager.load_tools("chat_query_rewrite_tools.json")
self.answer_prompt = self.prompt_manager.load_prompt("chat_answer_question.prompty")
self.reasoning_effort = reasoning_effort
self.hydrate_references = hydrate_references
self.include_token_usage = True
self.multimodal_enabled = multimodal_enabled
self.image_embeddings_client = image_embeddings_client
Expand Down Expand Up @@ -390,13 +388,10 @@ async def run_agentic_retrieval_approach(
overrides: dict[str, Any],
auth_claims: dict[str, Any],
):
minimum_reranker_score = overrides.get("minimum_reranker_score", 0)
search_index_filter = self.build_filter(overrides, auth_claims)
minimum_reranker_score = overrides.get("minimum_reranker_score", 0)
top = overrides.get("top", 3)
max_subqueries = overrides.get("max_subqueries", 10)
results_merge_strategy = overrides.get("results_merge_strategy", "interleaved")
# 50 is the amount of documents that the reranker can process per query
max_docs_for_reranker = max_subqueries * 50
send_text_sources = overrides.get("send_text_sources", True)
send_image_sources = overrides.get("send_image_sources", self.multimodal_enabled) and self.multimodal_enabled

Expand All @@ -407,7 +402,6 @@ async def run_agentic_retrieval_approach(
top=top,
filter_add_on=search_index_filter,
minimum_reranker_score=minimum_reranker_score,
max_docs_for_reranker=max_docs_for_reranker,
results_merge_strategy=results_merge_strategy,
)

Expand All @@ -426,7 +420,6 @@ async def run_agentic_retrieval_approach(
messages,
{
"reranker_threshold": minimum_reranker_score,
"max_docs_for_reranker": max_docs_for_reranker,
"results_merge_strategy": results_merge_strategy,
"filter": search_index_filter,
},
Expand Down
7 changes: 0 additions & 7 deletions app/backend/approaches/retrievethenread.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ def __init__(
query_speller: str,
prompt_manager: PromptManager,
reasoning_effort: Optional[str] = None,
hydrate_references: bool = False,
multimodal_enabled: bool = False,
image_embeddings_client: Optional[ImageEmbeddings] = None,
global_blob_manager: Optional[BlobManager] = None,
Expand Down Expand Up @@ -74,7 +73,6 @@ def __init__(
self.answer_prompt = self.prompt_manager.load_prompt("ask_answer_question.prompty")
self.reasoning_effort = reasoning_effort
self.include_token_usage = True
self.hydrate_references = hydrate_references
self.multimodal_enabled = multimodal_enabled
self.image_embeddings_client = image_embeddings_client
self.global_blob_manager = global_blob_manager
Expand Down Expand Up @@ -229,10 +227,7 @@ async def run_agentic_retrieval_approach(
minimum_reranker_score = overrides.get("minimum_reranker_score", 0)
search_index_filter = self.build_filter(overrides, auth_claims)
top = overrides.get("top", 3)
max_subqueries = overrides.get("max_subqueries", 10)
results_merge_strategy = overrides.get("results_merge_strategy", "interleaved")
# 50 is the amount of documents that the reranker can process per query
max_docs_for_reranker = max_subqueries * 50
send_text_sources = overrides.get("send_text_sources", True)
send_image_sources = overrides.get("send_image_sources", self.multimodal_enabled) and self.multimodal_enabled

Expand All @@ -243,7 +238,6 @@ async def run_agentic_retrieval_approach(
top=top,
filter_add_on=search_index_filter,
minimum_reranker_score=minimum_reranker_score,
max_docs_for_reranker=max_docs_for_reranker,
results_merge_strategy=results_merge_strategy,
)

Expand All @@ -263,7 +257,6 @@ async def run_agentic_retrieval_approach(
messages,
{
"reranker_threshold": minimum_reranker_score,
"max_docs_for_reranker": max_docs_for_reranker,
"results_merge_strategy": results_merge_strategy,
"filter": search_index_filter,
},
Expand Down
27 changes: 22 additions & 5 deletions app/backend/prepdocslib/searchmanager.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,14 @@
KnowledgeAgent,
KnowledgeAgentAzureOpenAIModel,
KnowledgeAgentRequestLimits,
KnowledgeAgentTargetIndex,
KnowledgeSourceReference,
RescoringOptions,
SearchableField,
SearchField,
SearchFieldDataType,
SearchIndex,
SearchIndexKnowledgeSource,
SearchIndexKnowledgeSourceParameters,
SemanticConfiguration,
SemanticField,
SemanticPrioritizedFields,
Expand Down Expand Up @@ -83,7 +85,6 @@ async def create_index(self):
logger.info("Checking whether search index %s exists...", self.search_info.index_name)

async with self.search_info.create_search_index_client() as search_index_client:

embedding_field = None
images_field = None
text_vector_search_profile = None
Expand Down Expand Up @@ -439,13 +440,29 @@ async def create_agent(self):
if self.search_info.agent_name:
logger.info(f"Creating search agent named {self.search_info.agent_name}")

field_names = ["id", "sourcepage", "sourcefile", "content", "category"]
if self.use_acls:
field_names.extend(["oids", "groups"])
if self.search_images:
field_names.append("images/url")
async with self.search_info.create_search_index_client() as search_index_client:
knowledge_source = SearchIndexKnowledgeSource(
name=self.search_info.index_name, # Use the same name for convenience
description="Default knowledge source using the main search index",
search_index_parameters=SearchIndexKnowledgeSourceParameters(
search_index_name=self.search_info.index_name,
source_data_select=",".join(field_names),
),
)
await search_index_client.create_or_update_knowledge_source(
knowledge_source=knowledge_source, api_version="2025-08-01-preview"
)
await search_index_client.create_or_update_agent(
agent=KnowledgeAgent(
name=self.search_info.agent_name,
target_indexes=[
KnowledgeAgentTargetIndex(
index_name=self.search_info.index_name, default_include_reference_source_data=True
knowledge_sources=[
KnowledgeSourceReference(
name=knowledge_source.name, include_references=True, include_reference_source_data=True
)
],
models=[
Expand Down
2 changes: 1 addition & 1 deletion app/backend/requirements.in
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ tenacity
azure-ai-documentintelligence==1.0.0b4
azure-cognitiveservices-speech
azure-cosmos
azure-search-documents==11.6.0b12
azure-search-documents==11.7.0b1
azure-storage-blob
azure-storage-file-datalake
uvicorn
Expand Down
Loading
Loading