diff --git a/app/backend/approaches/chatapproach.py b/app/backend/approaches/chatapproach.py index ea1857da3b..65e86abd07 100644 --- a/app/backend/approaches/chatapproach.py +++ b/app/backend/approaches/chatapproach.py @@ -45,17 +45,24 @@ def system_message_chat_conversation(self) -> str: async def run_until_final_call(self, messages, overrides, auth_claims, should_stream) -> tuple: pass - def get_system_prompt(self, override_prompt: Optional[str], follow_up_questions_prompt: str) -> str: + def get_system_prompt(self, override_prompt: Optional[str], follow_up_questions_prompt: str, sources_reference_content: str = "") -> str: if override_prompt is None: return self.system_message_chat_conversation.format( - injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt + injected_prompt="", + follow_up_questions_prompt=follow_up_questions_prompt, + sources_reference_content=sources_reference_content ) elif override_prompt.startswith(">>>"): return self.system_message_chat_conversation.format( - injected_prompt=override_prompt[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt + injected_prompt=override_prompt[3:] + "\n", + follow_up_questions_prompt=follow_up_questions_prompt, + sources_reference_content=sources_reference_content ) else: - return override_prompt.format(follow_up_questions_prompt=follow_up_questions_prompt) + return override_prompt.format( + follow_up_questions_prompt=follow_up_questions_prompt, + sources_reference_content=sources_reference_content + ) def get_search_query(self, chat_completion: ChatCompletion, user_query: str): response_message = chat_completion.choices[0].message diff --git a/app/backend/approaches/chatreadretrieveread.py b/app/backend/approaches/chatreadretrieveread.py index b752547e71..897bad8b08 100644 --- a/app/backend/approaches/chatreadretrieveread.py +++ b/app/backend/approaches/chatreadretrieveread.py @@ -58,7 +58,7 @@ def system_message_chat_conversation(self): return """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. If the question is not in English, answer in the language used in the question. - Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. + {sources_reference_content} {follow_up_questions_prompt} {injected_prompt} """ @@ -96,6 +96,7 @@ async def run_until_final_call( top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) + include_category = overrides.get("include_category") filter = self.build_filter(overrides, auth_claims) original_user_query = messages[-1]["content"] @@ -103,99 +104,81 @@ async def run_until_final_call( raise ValueError("The most recent message content must be a string.") user_query_request = "Generate search query for: " + original_user_query - tools: List[ChatCompletionToolParam] = [ - { - "type": "function", - "function": { - "name": "search_sources", - "description": "Retrieve sources from the Azure AI Search index", - "parameters": { - "type": "object", - "properties": { - "search_query": { - "type": "string", - "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", - } + sources_content = [] + extra_info = {"thoughts": [], 'data_points': []} + + if include_category != "__NONE__": + tools: List[ChatCompletionToolParam] = [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", + } + }, + "required": ["search_query"], }, - "required": ["search_query"], }, - }, - } - ] + } + ] - # STEP 1: Generate an optimized keyword search query based on the chat history and the last question - query_response_token_limit = 100 - query_messages = build_messages( - model=self.chatgpt_model, - system_prompt=self.query_prompt_template, - tools=tools, - few_shots=self.query_prompt_few_shots, - past_messages=messages[:-1], - new_user_content=user_query_request, - max_tokens=self.chatgpt_token_limit - query_response_token_limit, - fallback_to_default=self.ALLOW_NON_GPT_MODELS, - ) + # STEP 1: Generate an optimized keyword search query based on the chat history and the last question + query_response_token_limit = 100 + query_messages = build_messages( + model=self.chatgpt_model, + system_prompt=self.query_prompt_template, + tools=tools, + few_shots=self.query_prompt_few_shots, + past_messages=messages[:-1], + new_user_content=user_query_request, + max_tokens=self.chatgpt_token_limit - query_response_token_limit, + fallback_to_default=self.ALLOW_NON_GPT_MODELS, + ) - chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( - messages=query_messages, # type: ignore - # Azure OpenAI takes the deployment name as the model name - model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, - temperature=0.0, # Minimize creativity for search query generation - max_tokens=query_response_token_limit, # Setting too low risks malformed JSON, setting too high may affect performance - n=1, - tools=tools, - seed=seed, - ) + chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( + messages=query_messages, # type: ignore + # Azure OpenAI takes the deployment name as the model name + model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, + temperature=0.0, # Minimize creativity for search query generation + max_tokens=query_response_token_limit, # Setting too low risks malformed JSON, setting too high may affect performance + n=1, + tools=tools, + seed=seed, + ) - query_text = self.get_search_query(chat_completion, original_user_query) - - # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query - - # If retrieval mode includes vectors, compute an embedding for the query - vectors: list[VectorQuery] = [] - if use_vector_search: - vectors.append(await self.compute_text_embedding(query_text)) - - results = await self.search( - top, - query_text, - filter, - vectors, - use_text_search, - use_vector_search, - use_semantic_ranker, - use_semantic_captions, - minimum_search_score, - minimum_reranker_score, - ) - - sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False) - content = "\n".join(sources_content) + query_text = self.get_search_query(chat_completion, original_user_query) - # STEP 3: Generate a contextual and content specific answer using the search results and chat history + # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query - # Allow client to replace the entire prompt, or to inject into the exiting prompt using >>> - system_message = self.get_system_prompt( - overrides.get("prompt_template"), - self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "", - ) + # If retrieval mode includes vectors, compute an embedding for the query + vectors: list[VectorQuery] = [] + if use_vector_search: + vectors.append(await self.compute_text_embedding(query_text)) - response_token_limit = 1024 - messages = build_messages( - model=self.chatgpt_model, - system_prompt=system_message, - past_messages=messages[:-1], - # Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt. - new_user_content=original_user_query + "\n\nSources:\n" + content, - max_tokens=self.chatgpt_token_limit - response_token_limit, - fallback_to_default=self.ALLOW_NON_GPT_MODELS, - ) + results = await self.search( + top, + query_text, + filter, + vectors, + use_text_search, + use_vector_search, + use_semantic_ranker, + use_semantic_captions, + minimum_search_score, + minimum_reranker_score, + ) - data_points = {"text": sources_content} + sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=False) + if sources_content: + extra_info["data_points"] = {"text": sources_content} - extra_info = { - "data_points": data_points, - "thoughts": [ + extra_info["thoughts"].extend([ ThoughtStep( "Prompt to generate search query", query_messages, @@ -221,20 +204,47 @@ async def run_until_final_call( "Search results", [result.serialize_for_results() for result in results], ), - ThoughtStep( - "Prompt to generate answer", - messages, - ( - {"model": self.chatgpt_model, "deployment": self.chatgpt_deployment} - if self.chatgpt_deployment - else {"model": self.chatgpt_model} - ), + ]) + + # STEP 3: Generate a contextual and content specific answer + + # Additional prompt injected into the masterprompt if RAG is enabled + sources_reference_content = """ + Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. + """ if include_category != "__NONE__" else "" + + # Allow client to replace the entire prompt, or to inject into the existing prompt using >>> + system_message = self.get_system_prompt( + overrides.get("prompt_template"), + self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "", + sources_reference_content=sources_reference_content + ) + + response_token_limit = 1024 + messages = build_messages( + model=self.chatgpt_model, + system_prompt=system_message, + past_messages=messages[:-1], + new_user_content=original_user_query + ("\n\nSources:\n" + "\n".join(sources_content) if sources_content else ""), + max_tokens=self.chatgpt_token_limit - response_token_limit, + fallback_to_default=self.ALLOW_NON_GPT_MODELS, + ) + + data_points = {"text": sources_content} + + extra_info["thoughts"].append( + ThoughtStep( + "Prompt to generate answer", + messages, + ( + {"model": self.chatgpt_model, "deployment": self.chatgpt_deployment} + if self.chatgpt_deployment + else {"model": self.chatgpt_model} ), - ], - } + ) + ) chat_coroutine = self.openai_client.chat.completions.create( - # Azure OpenAI takes the deployment name as the model name model=self.chatgpt_deployment if self.chatgpt_deployment else self.chatgpt_model, messages=messages, temperature=overrides.get("temperature", 0.3), diff --git a/app/backend/approaches/chatreadretrievereadvision.py b/app/backend/approaches/chatreadretrievereadvision.py index 6b48643077..c4ae3d4ff9 100644 --- a/app/backend/approaches/chatreadretrievereadvision.py +++ b/app/backend/approaches/chatreadretrievereadvision.py @@ -69,9 +69,7 @@ def __init__( def system_message_chat_conversation(self): return """ You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. - Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName: - Each text source starts in a new line and has the file name followed by colon and the actual information - Always include the source name from the image or text for each fact you use in the response in the format: [filename] + {sources_reference_content} Answer the following question using only the data provided in the sources below. If asking a clarifying question to the user would help, ask the question. Be brief in your answers. @@ -96,6 +94,7 @@ async def run_until_final_call( top = overrides.get("top", 3) minimum_search_score = overrides.get("minimum_search_score", 0.0) minimum_reranker_score = overrides.get("minimum_reranker_score", 0.0) + include_category = overrides.get("include_category") filter = self.build_filter(overrides, auth_claims) vector_fields = overrides.get("vector_fields", ["embedding"]) @@ -107,66 +106,79 @@ async def run_until_final_call( raise ValueError("The most recent message content must be a string.") past_messages: list[ChatCompletionMessageParam] = messages[:-1] - # STEP 1: Generate an optimized keyword search query based on the chat history and the last question - user_query_request = "Generate search query for: " + original_user_query - - query_response_token_limit = 100 - query_model = self.chatgpt_model - query_deployment = self.chatgpt_deployment - query_messages = build_messages( - model=query_model, - system_prompt=self.query_prompt_template, - few_shots=self.query_prompt_few_shots, - past_messages=past_messages, - new_user_content=user_query_request, - max_tokens=self.chatgpt_token_limit - query_response_token_limit, - ) + sources_content = [] + content = "" + image_list: list[ChatCompletionContentPartImageParam] = [] - chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( - model=query_deployment if query_deployment else query_model, - messages=query_messages, - temperature=0.0, # Minimize creativity for search query generation - max_tokens=query_response_token_limit, - n=1, - seed=seed, - ) + if include_category != "__NONE__": + # STEP 1: Generate an optimized keyword search query based on the chat history and the last question + user_query_request = "Generate search query for: " + original_user_query - query_text = self.get_search_query(chat_completion, original_user_query) - - # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query - - # If retrieval mode includes vectors, compute an embedding for the query - vectors = [] - if use_vector_search: - for field in vector_fields: - vector = ( - await self.compute_text_embedding(query_text) - if field == "embedding" - else await self.compute_image_embedding(query_text) - ) - vectors.append(vector) - - results = await self.search( - top, - query_text, - filter, - vectors, - use_text_search, - use_vector_search, - use_semantic_ranker, - use_semantic_captions, - minimum_search_score, - minimum_reranker_score, - ) - sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) - content = "\n".join(sources_content) + query_response_token_limit = 100 + query_model = self.chatgpt_model + query_deployment = self.chatgpt_deployment + query_messages = build_messages( + model=query_model, + system_prompt=self.query_prompt_template, + few_shots=self.query_prompt_few_shots, + past_messages=past_messages, + new_user_content=user_query_request, + max_tokens=self.chatgpt_token_limit - query_response_token_limit, + ) + + chat_completion: ChatCompletion = await self.openai_client.chat.completions.create( + model=query_deployment if query_deployment else query_model, + messages=query_messages, + temperature=0.0, # Minimize creativity for search query generation + max_tokens=query_response_token_limit, + n=1, + seed=seed, + ) + + query_text = self.get_search_query(chat_completion, original_user_query) + + # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query + + # If retrieval mode includes vectors, compute an embedding for the query + vectors = [] + if use_vector_search: + for field in vector_fields: + vector = ( + await self.compute_text_embedding(query_text) + if field == "embedding" + else await self.compute_image_embedding(query_text) + ) + vectors.append(vector) + + results = await self.search( + top, + query_text, + filter, + vectors, + use_text_search, + use_vector_search, + use_semantic_ranker, + use_semantic_captions, + minimum_search_score, + minimum_reranker_score, + ) + sources_content = self.get_sources_content(results, use_semantic_captions, use_image_citation=True) + content = "\n".join(sources_content) # STEP 3: Generate a contextual and content specific answer using the search results and chat history + # Additional prompt injected into the masterprompt if RAG is enabled + sources_reference_content = """ + Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName: + Each text source starts in a new line and has the file name followed by colon and the actual information + Always include the source name from the image or text for each fact you use in the response in the format: [filename] + """ if include_category != "__NONE__" else "" + # Allow client to replace the entire prompt, or to inject into the existing prompt using >>> system_message = self.get_system_prompt( overrides.get("prompt_template"), self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "", + sources_reference_content=sources_reference_content ) user_content: list[ChatCompletionContentPartParam] = [{"text": original_user_query, "type": "text"}] @@ -198,43 +210,50 @@ async def run_until_final_call( extra_info = { "data_points": data_points, - "thoughts": [ - ThoughtStep( - "Prompt to generate search query", - query_messages, - ( - {"model": query_model, "deployment": query_deployment} - if query_deployment - else {"model": query_model} + "thoughts": [] + } + + # Overwrite extra_info with extended version if RAG is enabled + if include_category != "__NONE__": + extra_info = { + "data_points": data_points, + "thoughts": [ + ThoughtStep( + "Prompt to generate search query", + query_messages, + ( + {"model": query_model, "deployment": query_deployment} + if query_deployment + else {"model": query_model} + ), ), - ), - ThoughtStep( - "Search using generated search query", - query_text, - { - "use_semantic_captions": use_semantic_captions, - "use_semantic_ranker": use_semantic_ranker, - "top": top, - "filter": filter, - "vector_fields": vector_fields, - "use_text_search": use_text_search, - }, - ), - ThoughtStep( - "Search results", - [result.serialize_for_results() for result in results], - ), - ThoughtStep( - "Prompt to generate answer", - messages, - ( - {"model": self.gpt4v_model, "deployment": self.gpt4v_deployment} - if self.gpt4v_deployment - else {"model": self.gpt4v_model} + ThoughtStep( + "Search using generated search query", + query_text, + { + "use_semantic_captions": use_semantic_captions, + "use_semantic_ranker": use_semantic_ranker, + "top": top, + "filter": filter, + "vector_fields": vector_fields, + "use_text_search": use_text_search, + }, ), - ), - ], - } + ThoughtStep( + "Search results", + [result.serialize_for_results() for result in results], + ), + ThoughtStep( + "Prompt to generate answer", + messages, + ( + {"model": self.gpt4v_model, "deployment": self.gpt4v_deployment} + if self.gpt4v_deployment + else {"model": self.gpt4v_model} + ), + ), + ], + } chat_coroutine = self.openai_client.chat.completions.create( model=self.gpt4v_deployment if self.gpt4v_deployment else self.gpt4v_model, diff --git a/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx b/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx index 2cee00c761..2ece0a48a0 100644 --- a/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx +++ b/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx @@ -25,7 +25,7 @@ const pivotItemDisabledStyle = { disabled: true, style: { color: "grey" } }; export const AnalysisPanel = ({ answer, activeTab, activeCitation, citationHeight, className, onActiveTabChanged }: Props) => { const isDisabledThoughtProcessTab: boolean = !answer.context.thoughts; - const isDisabledSupportingContentTab: boolean = !answer.context.data_points; + const isDisabledSupportingContentTab: boolean = answer.context?.data_points?.length == 0; const isDisabledCitationTab: boolean = !activeCitation; const [citation, setCitation] = useState(""); diff --git a/app/frontend/src/components/Answer/Answer.tsx b/app/frontend/src/components/Answer/Answer.tsx index e5619e0c33..264ee6fa11 100644 --- a/app/frontend/src/components/Answer/Answer.tsx +++ b/app/frontend/src/components/Answer/Answer.tsx @@ -61,14 +61,15 @@ export const Answer = ({ onClick={() => onThoughtProcessClicked()} disabled={!answer.context.thoughts?.length} /> - onSupportingContentClicked()} - disabled={!answer.context.data_points} - /> + {answer.context?.data_points?.length != 0 && ( // Only show SupportingContent button when there actually is supporting content + onSupportingContentClicked()} + /> + )} {showSpeechOutputAzure && ( )} diff --git a/app/frontend/src/components/Settings/Settings.tsx b/app/frontend/src/components/Settings/Settings.tsx index de404297ab..81081f8029 100644 --- a/app/frontend/src/components/Settings/Settings.tsx +++ b/app/frontend/src/components/Settings/Settings.tsx @@ -198,7 +198,8 @@ export const Settings = ({ onChange={(_ev?: React.FormEvent, option?: IDropdownOption) => onChange("includeCategory", option?.key || "")} aria-labelledby={includeCategoryId} options={[ - { key: "", text: t("labels.includeCategoryOptions.all") } + { key: "", text: t("labels.includeCategoryOptions.all") }, + { key: "__NONE__", text: t("labels.includeCategoryOptions.none") } // { key: "example", text: "Example Category" } // Add more categories as needed ]} onRenderLabel={props => renderLabel(props, includeCategoryId, includeCategoryFieldId, t("helpTexts.includeCategory"))} diff --git a/app/frontend/src/locales/da/translation.json b/app/frontend/src/locales/da/translation.json index da1f083fb6..12d2e95ae9 100644 --- a/app/frontend/src/locales/da/translation.json +++ b/app/frontend/src/locales/da/translation.json @@ -79,7 +79,8 @@ "retrieveCount": "Hent dette antal søgeresultater:", "includeCategory": "Inkludér kategori", "includeCategoryOptions": { - "all": "Alle" + "all": "Alle", + "none": "Ingen" }, "excludeCategory": "Ekskludér kategori", "useSemanticRanker": "Brug semantisk ranking til søgning", diff --git a/app/frontend/src/locales/en/translation.json b/app/frontend/src/locales/en/translation.json index 4369556006..87b5723977 100644 --- a/app/frontend/src/locales/en/translation.json +++ b/app/frontend/src/locales/en/translation.json @@ -83,7 +83,8 @@ "retrieveCount": "Retrieve this many search results:", "includeCategory": "Include category", "includeCategoryOptions": { - "all": "All" + "all": "All", + "none": "None" }, "excludeCategory": "Exclude category", "useSemanticRanker": "Use semantic ranker for retrieval", diff --git a/app/frontend/src/locales/es/translation.json b/app/frontend/src/locales/es/translation.json index 303b9d32a5..bcc54f6bd2 100644 --- a/app/frontend/src/locales/es/translation.json +++ b/app/frontend/src/locales/es/translation.json @@ -83,7 +83,8 @@ "retrieveCount": "Obtén éste número de resultados de la búsqueda:", "includeCategory": "Incluir categoría", "includeCategoryOptions": { - "all": "Todas" + "all": "Todas", + "none": "Ninguna" }, "excludeCategory": "Excluir categoría", "useSemanticRanker": "Usar clasificador semántico para la recuperación", diff --git a/app/frontend/src/locales/fr/translation.json b/app/frontend/src/locales/fr/translation.json index 28363da398..2508e893b0 100644 --- a/app/frontend/src/locales/fr/translation.json +++ b/app/frontend/src/locales/fr/translation.json @@ -83,7 +83,8 @@ "retrieveCount": "Récupérer ce nombre de résultats de recherche :", "includeCategory": "Inclure la catégorie", "includeCategoryOptions": { - "all": "Toutes" + "all": "Toutes", + "none": "Aucune" }, "excludeCategory": "Exclure la catégorie", "useSemanticRanker": "Utiliser le reclasseur sémantique", diff --git a/app/frontend/src/locales/ja/translation.json b/app/frontend/src/locales/ja/translation.json index b6b82fa8bd..3bcf31cdad 100644 --- a/app/frontend/src/locales/ja/translation.json +++ b/app/frontend/src/locales/ja/translation.json @@ -83,7 +83,8 @@ "retrieveCount": "ここで指定する検索結果数を取得:", "includeCategory": "カテゴリを指定", "includeCategoryOptions": { - "all": "全て" + "all": "全て", + "none": "なし" }, "excludeCategory": "カテゴリを除外", "useSemanticRanker": "取得にセマンティック・ランカーを使用", diff --git a/app/frontend/src/locales/nl/translation.json b/app/frontend/src/locales/nl/translation.json index 427eb9985f..0747604108 100644 --- a/app/frontend/src/locales/nl/translation.json +++ b/app/frontend/src/locales/nl/translation.json @@ -11,7 +11,7 @@ "openChatHistory": "Open chatgeschiedenis", "noHistory": "Geen chatgeschiedenis", "deleteModalTitle": "Chatgeschiedenis verwijderen", - "deleteModalDescription": "Deze actie kan niet ongedaan worden gemaakt. Deze chatgeschiedenis verwijderen?", + "deleteModalDescription": "Deze actie kan niet ongedaan worden gemaakt. Wil je deze chatgeschiedenis verwijderen?", "deleteLabel": "Verwijderen", "cancelLabel": "Annuleren", "today": "Vandaag", @@ -34,11 +34,11 @@ }, "developerSettings": "Ontwikkelaarinstellingen", - "chatEmptyStateTitle": "Chat met je gegevens", + "chatEmptyStateTitle": "Chat met je data", "chatEmptyStateSubtitle": "Stel een vraag of probeer een voorbeeld", "defaultExamples": { "1": "Wat zit er in mijn Northwind Health Plus-pakket dat niet in het standaardpakket zit?", - "2": "Wat gebeurt er tijdens een functioneringsgesprek?", + "2": "Hoe gaat een functioneringsgesprek?", "3": "Wat doet een Product Manager?", "placeholder": "Typ een nieuwe vraag (bijv. dekt mijn pakket jaarlijkse oogonderzoeken?)" }, @@ -83,7 +83,8 @@ "retrieveCount": "Dit aantal zoekresultaten ophalen:", "includeCategory": "Categorie opnemen", "includeCategoryOptions": { - "all": "Alle" + "all": "Alle", + "none": "Geen" }, "excludeCategory": "Categorie uitsluiten", "useSemanticRanker": "Semantische rangschikking gebruiken", @@ -107,7 +108,7 @@ } }, "vector": { - "label": "Vectorvelden (Multi-query vectorzoektocht)", + "label": "Vectorvelden (Multi-query vectorsearch)", "options": { "embedding": "Tekst Embeddings", "imageEmbedding": "Afbeelding Embeddings",