Skip to content

Commit 916278e

Browse files
committed
Change frontend for LLM inputs
1 parent 75c3a0f commit 916278e

File tree

10 files changed

+83
-60
lines changed

10 files changed

+83
-60
lines changed

app/frontend/src/api/models.ts

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,6 @@ export const enum RetrievalMode {
44
Text = "text"
55
}
66

7-
export const enum LLMInputs {
8-
TextAndImages = "textAndImages",
9-
Images = "images",
10-
Texts = "texts"
11-
}
12-
137
export type ChatAppRequestOverrides = {
148
retrieval_mode?: RetrievalMode;
159
semantic_ranker?: boolean;
@@ -31,7 +25,8 @@ export type ChatAppRequestOverrides = {
3125
suggest_followup_questions?: boolean;
3226
use_oid_security_filter?: boolean;
3327
use_groups_security_filter?: boolean;
34-
llm_inputs: LLMInputs;
28+
send_text_sources: boolean;
29+
send_image_sources: boolean;
3530
search_text_embeddings: boolean;
3631
search_image_embeddings: boolean;
3732
language: string;

app/frontend/src/components/Settings/Settings.module.css

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,3 +53,14 @@
5353
font-weight: bold;
5454
color: #fff;
5555
}
56+
57+
.fieldset {
58+
border: none;
59+
padding: 0;
60+
}
61+
62+
.legend {
63+
font-size: 14px;
64+
margin-bottom: 5px;
65+
padding: 0;
66+
}

app/frontend/src/components/Settings/Settings.tsx

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import { useId } from "@fluentui/react-hooks";
22
import { useTranslation } from "react-i18next";
3-
import { TextField, ITextFieldProps, Checkbox, ICheckboxProps, Dropdown, IDropdownProps, IDropdownOption } from "@fluentui/react";
3+
import { TextField, ITextFieldProps, Checkbox, ICheckboxProps, Dropdown, IDropdownProps, IDropdownOption, Stack } from "@fluentui/react";
44
import { HelpCallout } from "../HelpCallout";
55
import { VectorSettings } from "../VectorSettings";
6-
import { RetrievalMode, LLMInputs } from "../../api";
6+
import { RetrievalMode } from "../../api";
77
import styles from "./Settings.module.css";
88

99
// Add type for onRenderLabel
@@ -25,7 +25,8 @@ export interface SettingsProps {
2525
excludeCategory: string;
2626
includeCategory: string;
2727
retrievalMode: RetrievalMode;
28-
llmInputs: LLMInputs;
28+
sendTextSources: boolean;
29+
sendImageSources: boolean;
2930
searchTextEmbeddings: boolean;
3031
searchImageEmbeddings: boolean;
3132
showSemanticRankerOption: boolean;
@@ -68,7 +69,8 @@ export const Settings = ({
6869
retrievalMode,
6970
searchTextEmbeddings,
7071
searchImageEmbeddings,
71-
llmInputs,
72+
sendTextSources,
73+
sendImageSources,
7274
showSemanticRankerOption,
7375
showQueryRewritingOption,
7476
showReasoningEffortOption,
@@ -402,20 +404,27 @@ export const Settings = ({
402404
aria-labelledby={seedId}
403405
onRenderLabel={props => renderLabel(props, seedId, seedFieldId, t("helpTexts.seed"))}
404406
/>
407+
405408
{showMultimodalOptions && !useAgenticRetrieval && (
406-
<Dropdown
407-
id="llmInputsDropdown"
408-
className={styles.settingsSeparator}
409-
label={t("labels.llmInputs")}
410-
selectedKey={llmInputs}
411-
onChange={(_ev?: React.FormEvent<HTMLElement | HTMLInputElement>, option?: IDropdownOption) => onChange("llmInputs", option?.key)}
412-
options={[
413-
{ key: LLMInputs.Texts, text: t("labels.llmInputsOptions.texts") },
414-
{ key: LLMInputs.Images, text: t("labels.llmInputsOptions.images") },
415-
{ key: LLMInputs.TextAndImages, text: t("labels.llmInputsOptions.textAndImages") }
416-
]}
417-
onRenderLabel={props => renderLabel(props, "llmInputsDropdownLabel", "llmInputsDropdown", t("helpTexts.llmInputs"))}
418-
/>
409+
<fieldset className={styles.fieldset + " " + styles.settingsSeparator}>
410+
<legend className={styles.legend}>{t("labels.llmInputs")}</legend>
411+
<Stack tokens={{ childrenGap: 8 }}>
412+
<Checkbox
413+
id="sendTextSources"
414+
label={t("labels.llmInputsOptions.texts")}
415+
checked={sendTextSources}
416+
onChange={(_ev, checked) => onChange("send_text_sources", !!checked)}
417+
onRenderLabel={props => renderLabel(props, "sendTextSourcesLabel", "sendTextSources", t("helpTexts.llmTextInputs"))}
418+
/>
419+
<Checkbox
420+
id="sendImageSources"
421+
label={t("labels.llmInputsOptions.images")}
422+
checked={sendImageSources}
423+
onChange={(_ev, checked) => onChange("send_image_sources", !!checked)}
424+
onRenderLabel={props => renderLabel(props, "sendImageSourcesLabel", "sendImageSources", t("helpTexts.llmImageInputs"))}
425+
/>
426+
</Stack>
427+
</fieldset>
419428
)}
420429
</div>
421430
);

app/frontend/src/components/TokenClaimsDisplay/TokenClaimsDisplay.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ export const TokenClaimsDisplay = () => {
8383
];
8484

8585
return (
86-
<div>
86+
<div style={{ marginTop: "20px" }}>
8787
<Label>ID Token Claims</Label>
8888
<DataGrid items={items} columns={columns} sortable getRowId={item => item.name}>
8989
<DataGridHeader>

app/frontend/src/components/VectorSettings/VectorSettings.module.css

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
.fieldset {
66
border: none;
77
padding: 0;
8-
margin: 0;
98
}
109

1110
.legend {

app/frontend/src/components/VectorSettings/VectorSettings.tsx

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,6 @@ export const VectorSettings = ({
9999
<Stack tokens={{ childrenGap: 8 }}>
100100
<Checkbox
101101
id={vectorFieldsFieldId + "-text"}
102-
className={styles.settingsSeparator}
103102
label={t("labels.vector.options.embedding")}
104103
checked={searchTextEmbeddings}
105104
onChange={onSearchTextEmbeddingsChange}
@@ -115,7 +114,6 @@ export const VectorSettings = ({
115114
/>
116115
<Checkbox
117116
id={vectorFieldsFieldId + "-image"}
118-
className={styles.settingsSeparator}
119117
label={t("labels.vector.options.imageEmbedding")}
120118
checked={searchImageEmbeddings}
121119
onChange={onSearchImageEmbeddingsChange}

app/frontend/src/locales/en/translation.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -105,11 +105,11 @@
105105
},
106106
"useSuggestFollowupQuestions": "Suggest follow-up questions",
107107
"useAgenticRetrieval": "Use agentic retrieval",
108-
"llmInputs": "Inputs for LLM",
108+
"llmInputs": "LLM input sources",
109109
"llmInputsOptions": {
110-
"texts": "Texts",
111-
"images": "Images",
112-
"textAndImages": "Texts and Images"
110+
"texts": "Text sources",
111+
"images": "Image sources",
112+
"textAndImages": "Text and image sources"
113113
},
114114
"retrievalMode": {
115115
"label": "Retrieval mode",
@@ -162,8 +162,8 @@
162162
"suggestFollowupQuestions": "Asks the LLM to suggest follow-up questions based on the user's query.",
163163
"textEmbeddings": "When selected, search will use embeddings from the text-only embeddings model of extracted text chunks.",
164164
"imageEmbeddings": "When selected, search will use embeddings from the multimodal embeddings model of extracted images.",
165-
"llmInputs":
166-
"Sets what will be sent to the vision model. 'Images and text' sends both images and text to the model, 'Images' sends only images, and 'Text' sends only text.",
165+
"llmTextInputs": "When selected, text content from the search results will be sent to the LLM as context.",
166+
"llmImageInputs": "When selected, images from the search results will be sent to the LLM as context.",
167167
"retrievalMode":
168168
"Sets the retrieval mode for the Azure AI Search query. `Vectors + Text (Hybrid)` uses a combination of vector search and full text search, `Vectors` uses only vector search, and `Text` uses only full text search. Hybrid is generally optimal.",
169169
"streamChat": "Continuously streams the response to the chat UI as it is generated.",

app/frontend/src/pages/ask/Ask.tsx

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { Panel, DefaultButton, Spinner } from "@fluentui/react";
55

66
import styles from "./Ask.module.css";
77

8-
import { askApi, configApi, ChatAppResponse, ChatAppRequest, RetrievalMode, LLMInputs, SpeechConfig } from "../../api";
8+
import { askApi, configApi, ChatAppResponse, ChatAppRequest, RetrievalMode, SpeechConfig } from "../../api";
99
import { Answer, AnswerError } from "../../components/Answer";
1010
import { QuestionInput } from "../../components/QuestionInput";
1111
import { ExampleList } from "../../components/Example";
@@ -36,7 +36,8 @@ export function Component(): JSX.Element {
3636
const [useSemanticCaptions, setUseSemanticCaptions] = useState<boolean>(false);
3737
const [useQueryRewriting, setUseQueryRewriting] = useState<boolean>(false);
3838
const [reasoningEffort, setReasoningEffort] = useState<string>("");
39-
const [llmInputs, setLLMInputs] = useState<LLMInputs>(LLMInputs.Texts);
39+
const [sendTextSources, setSendTextSources] = useState<boolean>(true);
40+
const [sendImageSources, setSendImageSources] = useState<boolean>(true);
4041
const [includeCategory, setIncludeCategory] = useState<string>("");
4142

4243
const [excludeCategory, setExcludeCategory] = useState<string>("");
@@ -87,8 +88,8 @@ export function Component(): JSX.Element {
8788
setShowMultimodalOptions(config.showMultimodalOptions);
8889
if (config.showMultimodalOptions) {
8990
// Set default LLM inputs based on config override or fallback to Texts
90-
const defaultLlmInputs = config.ragLlmInputsOverride ? (config.ragLlmInputsOverride as LLMInputs) : LLMInputs.Texts;
91-
setLLMInputs(defaultLlmInputs);
91+
setSendTextSources(true);
92+
setSendImageSources(true);
9293
// Set default vector field settings
9394
setSearchTextEmbeddings(true);
9495
setSearchImageEmbeddings(true);
@@ -162,7 +163,8 @@ export function Component(): JSX.Element {
162163
use_groups_security_filter: useGroupsSecurityFilter,
163164
search_text_embeddings: searchTextEmbeddings,
164165
search_image_embeddings: searchImageEmbeddings,
165-
llm_inputs: llmInputs,
166+
send_text_sources: sendTextSources,
167+
send_image_sources: sendImageSources,
166168
language: i18n.language,
167169
use_agentic_retrieval: useAgenticRetrieval,
168170
...(seed !== null ? { seed: seed } : {})
@@ -238,7 +240,12 @@ export function Component(): JSX.Element {
238240
setUseGroupsSecurityFilter(value);
239241
break;
240242
case "llmInputs":
241-
setLLMInputs(value);
243+
break;
244+
case "sendTextSources":
245+
setSendTextSources(value);
246+
break;
247+
case "sendImageSources":
248+
setSendImageSources(value);
242249
break;
243250
case "searchTextEmbeddings":
244251
setSearchTextEmbeddings(value);
@@ -375,7 +382,8 @@ export function Component(): JSX.Element {
375382
excludeCategory={excludeCategory}
376383
includeCategory={includeCategory}
377384
retrievalMode={retrievalMode}
378-
llmInputs={llmInputs}
385+
sendTextSources={sendTextSources}
386+
sendImageSources={sendImageSources}
379387
searchTextEmbeddings={searchTextEmbeddings}
380388
searchImageEmbeddings={searchImageEmbeddings}
381389
showSemanticRankerOption={showSemanticRankerOption}

app/frontend/src/pages/chat/Chat.tsx

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,7 @@ import readNDJSONStream from "ndjson-readablestream";
77
import appLogo from "../../assets/applogo.svg";
88
import styles from "./Chat.module.css";
99

10-
import {
11-
chatApi,
12-
configApi,
13-
RetrievalMode,
14-
ChatAppResponse,
15-
ChatAppResponseOrError,
16-
ChatAppRequest,
17-
ResponseMessage,
18-
LLMInputs,
19-
SpeechConfig
20-
} from "../../api";
10+
import { chatApi, configApi, RetrievalMode, ChatAppResponse, ChatAppResponseOrError, ChatAppRequest, ResponseMessage, SpeechConfig } from "../../api";
2111
import { Answer, AnswerError, AnswerLoading } from "../../components/Answer";
2212
import { QuestionInput } from "../../components/QuestionInput";
2313
import { ExampleList } from "../../components/Example";
@@ -61,7 +51,8 @@ const Chat = () => {
6151
const [searchImageEmbeddings, setSearchImageEmbeddings] = useState<boolean>(true);
6252
const [useOidSecurityFilter, setUseOidSecurityFilter] = useState<boolean>(false);
6353
const [useGroupsSecurityFilter, setUseGroupsSecurityFilter] = useState<boolean>(false);
64-
const [llmInputs, setLLMInputs] = useState<LLMInputs>(LLMInputs.TextAndImages);
54+
const [sendTextSources, setSendTextSources] = useState<boolean>(true);
55+
const [sendImageSources, setSendImageSources] = useState<boolean>(true);
6556

6657
const lastQuestionRef = useRef<string>("");
6758
const chatMessageStreamEnd = useRef<HTMLDivElement | null>(null);
@@ -109,8 +100,8 @@ const Chat = () => {
109100
setShowMultimodalOptions(config.showMultimodalOptions);
110101
if (config.showMultimodalOptions) {
111102
// Set default LLM inputs based on config override or fallback to TextAndImages
112-
const defaultLlmInputs = config.ragLlmInputsOverride ? (config.ragLlmInputsOverride as LLMInputs) : LLMInputs.TextAndImages;
113-
setLLMInputs(defaultLlmInputs);
103+
setSendTextSources(true);
104+
setSendImageSources(true);
114105
// Set default vector fields based on config override or fallback to TextAndImageEmbeddings
115106
// Set default vector field settings
116107
setSearchTextEmbeddings(true);
@@ -239,7 +230,8 @@ const Chat = () => {
239230
use_groups_security_filter: useGroupsSecurityFilter,
240231
search_text_embeddings: searchTextEmbeddings,
241232
search_image_embeddings: searchImageEmbeddings,
242-
llm_inputs: llmInputs,
233+
send_text_sources: sendTextSources,
234+
send_image_sources: sendImageSources,
243235
language: i18n.language,
244236
use_agentic_retrieval: useAgenticRetrieval,
245237
...(seed !== null ? { seed: seed } : {})
@@ -357,7 +349,12 @@ const Chat = () => {
357349
setUseSuggestFollowupQuestions(value);
358350
break;
359351
case "llmInputs":
360-
setLLMInputs(value);
352+
break;
353+
case "sendTextSources":
354+
setSendTextSources(value);
355+
break;
356+
case "sendImageSources":
357+
setSendImageSources(value);
361358
break;
362359
case "searchTextEmbeddings":
363360
setSearchTextEmbeddings(value);
@@ -561,7 +558,8 @@ const Chat = () => {
561558
includeCategory={includeCategory}
562559
retrievalMode={retrievalMode}
563560
showMultimodalOptions={showMultimodalOptions}
564-
llmInputs={llmInputs}
561+
sendTextSources={sendTextSources}
562+
sendImageSources={sendImageSources}
565563
searchTextEmbeddings={searchTextEmbeddings}
566564
searchImageEmbeddings={searchImageEmbeddings}
567565
showSemanticRankerOption={showSemanticRankerOption}

todo.txt

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,13 @@ TODO:
1010
# Inputs for LLM:
1111
# [X] text sources , use_text_sources = True
1212
# [X] image sources , use_image_sources = True
13-
14-
13+
Update:
14+
// Set default LLM inputs based on config override or fallback to Texts
15+
setSendTextSources(true);
16+
setSendImageSources(true);
17+
// Set default vector field settings
18+
setSearchTextEmbeddings(true);
19+
setSearchImageEmbeddings(true);
1520
To decide:
1621
* For user data lake client, how often should we double check the ACL matches the oid, versus assuming the URLs convey that? (Like when fetching the image?)
1722
* add a note that we only check owner, not full access control

0 commit comments

Comments
 (0)