diff --git a/.azdo/pipelines/azure-dev.yml b/.azdo/pipelines/azure-dev.yml index 3016dd301c..bb5da8069f 100644 --- a/.azdo/pipelines/azure-dev.yml +++ b/.azdo/pipelines/azure-dev.yml @@ -86,6 +86,7 @@ steps: AZURE_COMPUTER_VISION_RESOURCE_GROUP: $(AZURE_COMPUTER_VISION_RESOURCE_GROUP) AZURE_COMPUTER_VISION_LOCATION: $(AZURE_COMPUTER_VISION_LOCATION) AZURE_COMPUTER_VISION_SKU: $(AZURE_COMPUTER_VISION_SKU) + ENABLE_LANGUAGE_PICKER: $(ENABLE_LANGUAGE_PICKER) USE_SPEECH_INPUT_BROWSER: $(USE_SPEECH_INPUT_BROWSER) USE_SPEECH_OUTPUT_BROWSER: $(USE_SPEECH_OUTPUT_BROWSER) USE_SPEECH_OUTPUT_AZURE: $(USE_SPEECH_OUTPUT_AZURE) diff --git a/.github/workflows/azure-dev.yml b/.github/workflows/azure-dev.yml index b00a1bb0ee..819d6cff1d 100644 --- a/.github/workflows/azure-dev.yml +++ b/.github/workflows/azure-dev.yml @@ -73,6 +73,7 @@ jobs: USE_GPT4V: ${{ vars.USE_GPT4V }} AZURE_VISION_ENDPOINT: ${{ vars.AZURE_VISION_ENDPOINT }} VISION_SECRET_NAME: ${{ vars.VISION_SECRET_NAME }} + ENABLE_LANGUAGE_PICKER: ${{ vars.ENABLE_LANGUAGE_PICKER }} USE_SPEECH_INPUT_BROWSER: ${{ vars.USE_SPEECH_INPUT_BROWSER }} USE_SPEECH_OUTPUT_BROWSER: ${{ vars.USE_SPEECH_OUTPUT_BROWSER }} USE_SPEECH_OUTPUT_AZURE: ${{ vars.USE_SPEECH_OUTPUT_AZURE }} diff --git a/app/backend/app.py b/app/backend/app.py index 5ae60e289a..60c125d866 100644 --- a/app/backend/app.py +++ b/app/backend/app.py @@ -59,6 +59,7 @@ CONFIG_CREDENTIAL, CONFIG_GPT4V_DEPLOYED, CONFIG_INGESTER, + CONFIG_LANGUAGE_PICKER_ENABLED, CONFIG_OPENAI_CLIENT, CONFIG_SEARCH_CLIENT, CONFIG_SEMANTIC_RANKER_DEPLOYED, @@ -267,6 +268,7 @@ def config(): "showSemanticRankerOption": current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED], "showVectorOption": current_app.config[CONFIG_VECTOR_SEARCH_ENABLED], "showUserUpload": current_app.config[CONFIG_USER_UPLOAD_ENABLED], + "showLanguagePicker": current_app.config[CONFIG_LANGUAGE_PICKER_ENABLED], "showSpeechInput": current_app.config[CONFIG_SPEECH_INPUT_ENABLED], "showSpeechOutputBrowser": current_app.config[CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED], "showSpeechOutputAzure": current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED], @@ -429,6 +431,7 @@ async def setup_clients(): USE_GPT4V = os.getenv("USE_GPT4V", "").lower() == "true" USE_USER_UPLOAD = os.getenv("USE_USER_UPLOAD", "").lower() == "true" + ENABLE_LANGUAGE_PICKER = os.getenv("ENABLE_LANGUAGE_PICKER", "").lower() == "true" USE_SPEECH_INPUT_BROWSER = os.getenv("USE_SPEECH_INPUT_BROWSER", "").lower() == "true" USE_SPEECH_OUTPUT_BROWSER = os.getenv("USE_SPEECH_OUTPUT_BROWSER", "").lower() == "true" USE_SPEECH_OUTPUT_AZURE = os.getenv("USE_SPEECH_OUTPUT_AZURE", "").lower() == "true" @@ -576,6 +579,7 @@ async def setup_clients(): current_app.config[CONFIG_SEMANTIC_RANKER_DEPLOYED] = AZURE_SEARCH_SEMANTIC_RANKER != "disabled" current_app.config[CONFIG_VECTOR_SEARCH_ENABLED] = os.getenv("USE_VECTORS", "").lower() != "false" current_app.config[CONFIG_USER_UPLOAD_ENABLED] = bool(USE_USER_UPLOAD) + current_app.config[CONFIG_LANGUAGE_PICKER_ENABLED] = ENABLE_LANGUAGE_PICKER current_app.config[CONFIG_SPEECH_INPUT_ENABLED] = USE_SPEECH_INPUT_BROWSER current_app.config[CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED] = USE_SPEECH_OUTPUT_BROWSER current_app.config[CONFIG_SPEECH_OUTPUT_AZURE_ENABLED] = USE_SPEECH_OUTPUT_AZURE diff --git a/app/backend/config.py b/app/backend/config.py index da076bad1d..bedc3e27be 100644 --- a/app/backend/config.py +++ b/app/backend/config.py @@ -14,6 +14,7 @@ CONFIG_SEARCH_CLIENT = "search_client" CONFIG_OPENAI_CLIENT = "openai_client" CONFIG_INGESTER = "ingester" +CONFIG_LANGUAGE_PICKER_ENABLED = "language_picker_enabled" CONFIG_SPEECH_INPUT_ENABLED = "speech_input_enabled" CONFIG_SPEECH_OUTPUT_BROWSER_ENABLED = "speech_output_browser_enabled" CONFIG_SPEECH_OUTPUT_AZURE_ENABLED = "speech_output_azure_enabled" diff --git a/app/frontend/package-lock.json b/app/frontend/package-lock.json index 88b2efb43f..4dbd119a1d 100644 --- a/app/frontend/package-lock.json +++ b/app/frontend/package-lock.json @@ -15,9 +15,14 @@ "@fluentui/react-icons": "^2.0.249", "@react-spring/web": "^9.7.3", "dompurify": "^3.0.6", + "i18next": "^23.12.2", + "i18next-browser-languagedetector": "^8.0.0", + "i18next-http-backend": "^2.5.2", "ndjson-readablestream": "^1.2.0", "react": "^18.3.1", "react-dom": "^18.3.1", + "react-helmet-async": "^2.0.5", + "react-i18next": "^15.0.0", "react-markdown": "^9.0.1", "react-router-dom": "^6.23.1", "react-syntax-highlighter": "^15.5.0", @@ -369,7 +374,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.22.15", + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.25.6.tgz", + "integrity": "sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==", "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" @@ -2576,6 +2583,15 @@ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", "dev": true }, + "node_modules/cross-fetch": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-4.0.0.tgz", + "integrity": "sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g==", + "license": "MIT", + "dependencies": { + "node-fetch": "^2.6.12" + } + }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -3147,6 +3163,15 @@ "node": "*" } }, + "node_modules/html-parse-stringify": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-parse-stringify/-/html-parse-stringify-3.0.1.tgz", + "integrity": "sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==", + "license": "MIT", + "dependencies": { + "void-elements": "3.1.0" + } + }, "node_modules/html-url-attributes": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.0.tgz", @@ -3167,12 +3192,62 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/i18next": { + "version": "23.14.0", + "resolved": "https://registry.npmjs.org/i18next/-/i18next-23.14.0.tgz", + "integrity": "sha512-Y5GL4OdA8IU2geRrt2+Uc1iIhsjICdHZzT9tNwQ3TVqdNzgxHToGCKf/TPRP80vTCAP6svg2WbbJL+Gx5MFQVA==", + "funding": [ + { + "type": "individual", + "url": "https://locize.com" + }, + { + "type": "individual", + "url": "https://locize.com/i18next.html" + }, + { + "type": "individual", + "url": "https://www.i18next.com/how-to/faq#i18next-is-awesome.-how-can-i-support-the-project" + } + ], + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.2" + } + }, + "node_modules/i18next-browser-languagedetector": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/i18next-browser-languagedetector/-/i18next-browser-languagedetector-8.0.0.tgz", + "integrity": "sha512-zhXdJXTTCoG39QsrOCiOabnWj2jecouOqbchu3EfhtSHxIB5Uugnm9JaizenOy39h7ne3+fLikIjeW88+rgszw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.2" + } + }, + "node_modules/i18next-http-backend": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/i18next-http-backend/-/i18next-http-backend-2.6.1.tgz", + "integrity": "sha512-rCilMAnlEQNeKOZY1+x8wLM5IpYOj10guGvEpeC59tNjj6MMreLIjIW8D1RclhD3ifLwn6d/Y9HEM1RUE6DSog==", + "license": "MIT", + "dependencies": { + "cross-fetch": "4.0.0" + } + }, "node_modules/inline-style-parser": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.3.tgz", "integrity": "sha512-qlD8YNDqyTKTyuITrDOffsl6Tdhv+UC4hcdAVuQsK4IMQ99nSgd1MIA/Q+jQYoh9r3hVUXhYh7urSRmXPkW04g==", "license": "MIT" }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, "node_modules/is-alphabetical": { "version": "1.0.4", "license": "MIT", @@ -4371,6 +4446,26 @@ "resolved": "https://registry.npmjs.org/ndjson-readablestream/-/ndjson-readablestream-1.2.0.tgz", "integrity": "sha512-QbWX2IIfKMVL+ZFHm9vFEzPh1NzZfzJql59T+9XoXzUp8n0wu2t9qgDV9nT0A77YYa6KbAjsHNWzJfpZTfp4xQ==" }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, "node_modules/node-releases": { "version": "2.0.14", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", @@ -4556,6 +4651,48 @@ "loose-envify": "^1.1.0" } }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==", + "license": "MIT" + }, + "node_modules/react-helmet-async": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-2.0.5.tgz", + "integrity": "sha512-rYUYHeus+i27MvFE+Jaa4WsyBKGkL6qVgbJvSBoX8mbsWoABJXdEO0bZyi0F6i+4f0NuIb8AvqPMj3iXFHkMwg==", + "license": "Apache-2.0", + "dependencies": { + "invariant": "^2.2.4", + "react-fast-compare": "^3.2.2", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-i18next": { + "version": "15.0.1", + "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-15.0.1.tgz", + "integrity": "sha512-NwxLqNM6CLbeGA9xPsjits0EnXdKgCRSS6cgkgOdNcPXqL+1fYNl8fBg1wmnnHvFy812Bt4IWTPE9zjoPmFj3w==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.8", + "html-parse-stringify": "^3.0.1" + }, + "peerDependencies": { + "i18next": ">= 23.2.3", + "react": ">= 16.8.0" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + } + } + }, "node_modules/react-is": { "version": "17.0.2", "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", @@ -4867,6 +5004,12 @@ "semver": "bin/semver.js" } }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==", + "license": "MIT" + }, "node_modules/source-map": { "version": "0.7.4", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", @@ -4991,6 +5134,12 @@ "node": ">=4" } }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, "node_modules/trim-lines": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", @@ -5338,6 +5487,15 @@ } } }, + "node_modules/void-elements": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", + "integrity": "sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/web-namespaces": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", @@ -5348,6 +5506,22 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, "node_modules/wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", diff --git a/app/frontend/package.json b/app/frontend/package.json index 9af592675a..d3cc7f04d6 100644 --- a/app/frontend/package.json +++ b/app/frontend/package.json @@ -19,11 +19,16 @@ "@fluentui/react-icons": "^2.0.249", "@react-spring/web": "^9.7.3", "dompurify": "^3.0.6", + "i18next": "^23.12.2", + "i18next-browser-languagedetector": "^8.0.0", + "i18next-http-backend": "^2.5.2", "ndjson-readablestream": "^1.2.0", "react": "^18.3.1", "react-dom": "^18.3.1", "react-markdown": "^9.0.1", "react-router-dom": "^6.23.1", + "react-helmet-async": "^2.0.5", + "react-i18next": "^15.0.0", "react-syntax-highlighter": "^15.5.0", "rehype-raw": "^7.0.0", "remark-gfm": "^4.0.0", diff --git a/app/frontend/src/api/models.ts b/app/frontend/src/api/models.ts index c8d264e362..c2899e3948 100644 --- a/app/frontend/src/api/models.ts +++ b/app/frontend/src/api/models.ts @@ -35,6 +35,7 @@ export type ChatAppRequestOverrides = { use_gpt4v?: boolean; gpt4v_input?: GPT4VInput; vector_fields: VectorFieldOptions[]; + language: string; }; export type ResponseMessage = { @@ -84,6 +85,7 @@ export type Config = { showSemanticRankerOption: boolean; showVectorOption: boolean; showUserUpload: boolean; + showLanguagePicker: boolean; showSpeechInput: boolean; showSpeechOutputBrowser: boolean; showSpeechOutputAzure: boolean; diff --git a/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx b/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx index 02c8be119f..2cee00c761 100644 --- a/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx +++ b/app/frontend/src/components/AnalysisPanel/AnalysisPanel.tsx @@ -1,5 +1,5 @@ import { Stack, Pivot, PivotItem } from "@fluentui/react"; - +import { useTranslation } from "react-i18next"; import styles from "./AnalysisPanel.module.css"; import { SupportingContent } from "../SupportingContent"; @@ -30,6 +30,7 @@ export const AnalysisPanel = ({ answer, activeTab, activeCitation, citationHeigh const [citation, setCitation] = useState(""); const client = useLogin ? useMsal().instance : undefined; + const { t } = useTranslation(); const fetchCitation = async () => { const token = client ? await getToken(client) : undefined; @@ -78,21 +79,21 @@ export const AnalysisPanel = ({ answer, activeTab, activeCitation, citationHeigh > {renderFileViewer()} diff --git a/app/frontend/src/components/Answer/Answer.tsx b/app/frontend/src/components/Answer/Answer.tsx index 22f182f64d..e024563acb 100644 --- a/app/frontend/src/components/Answer/Answer.tsx +++ b/app/frontend/src/components/Answer/Answer.tsx @@ -1,5 +1,6 @@ import { useMemo } from "react"; import { Stack, IconButton } from "@fluentui/react"; +import { useTranslation } from "react-i18next"; import DOMPurify from "dompurify"; import ReactMarkdown from "react-markdown"; import remarkGfm from "remark-gfm"; @@ -44,6 +45,7 @@ export const Answer = ({ const followupQuestions = answer.context?.followup_questions; const messageContent = answer.message.content; const parsedAnswer = useMemo(() => parseAnswerToHtml(messageContent, isStreaming, onCitationClicked), [answer]); + const { t } = useTranslation(); const sanitizedAnswerHtml = DOMPurify.sanitize(parsedAnswer.answerHtml); return ( @@ -55,16 +57,16 @@ export const Answer = ({ onThoughtProcessClicked()} disabled={!answer.context.thoughts?.length} /> onSupportingContentClicked()} disabled={!answer.context.data_points} /> @@ -85,7 +87,7 @@ export const Answer = ({ {!!parsedAnswer.citations.length && ( - Citations: + {t("citationWithColon")} {parsedAnswer.citations.map((x, i) => { const path = getCitationFilePath(x); return ( @@ -101,7 +103,7 @@ export const Answer = ({ {!!followupQuestions?.length && showFollowupQuestions && onFollowupQuestionClicked && ( - Follow-up questions: + {t("followupQuestions")} {followupQuestions.map((x, i) => { return ( onFollowupQuestionClicked(x)}> diff --git a/app/frontend/src/components/Answer/AnswerLoading.tsx b/app/frontend/src/components/Answer/AnswerLoading.tsx index 6df192a636..24f8f63453 100644 --- a/app/frontend/src/components/Answer/AnswerLoading.tsx +++ b/app/frontend/src/components/Answer/AnswerLoading.tsx @@ -1,10 +1,12 @@ import { Stack } from "@fluentui/react"; import { animated, useSpring } from "@react-spring/web"; +import { useTranslation } from "react-i18next"; import styles from "./Answer.module.css"; import { AnswerIcon } from "./AnswerIcon"; export const AnswerLoading = () => { + const { t, i18n } = useTranslation(); const animatedStyles = useSpring({ from: { opacity: 0 }, to: { opacity: 1 } @@ -16,7 +18,7 @@ export const AnswerLoading = () => {

- Generating answer + {t("generatingAnswer")}

diff --git a/app/frontend/src/components/Answer/SpeechOutputAzure.tsx b/app/frontend/src/components/Answer/SpeechOutputAzure.tsx index 61087c3659..2dabcb028a 100644 --- a/app/frontend/src/components/Answer/SpeechOutputAzure.tsx +++ b/app/frontend/src/components/Answer/SpeechOutputAzure.tsx @@ -1,5 +1,5 @@ import { useState } from "react"; - +import { useTranslation } from "react-i18next"; import { IconButton } from "@fluentui/react"; import { getSpeechApi, SpeechConfig } from "../../api"; @@ -13,6 +13,7 @@ interface Props { export const SpeechOutputAzure = ({ answer, speechConfig, index, isStreaming }: Props) => { const [isLoading, setIsLoading] = useState(false); const [localPlayingState, setLocalPlayingState] = useState(false); + const { t } = useTranslation(); const playAudio = async (url: string) => { speechConfig.audio.src = url; @@ -70,8 +71,8 @@ export const SpeechOutputAzure = ({ answer, speechConfig, index, isStreaming }: startOrStopSpeech(answer)} disabled={isStreaming} /> diff --git a/app/frontend/src/components/Answer/SpeechOutputBrowser.tsx b/app/frontend/src/components/Answer/SpeechOutputBrowser.tsx index bc91d5adfe..f202e829aa 100644 --- a/app/frontend/src/components/Answer/SpeechOutputBrowser.tsx +++ b/app/frontend/src/components/Answer/SpeechOutputBrowser.tsx @@ -1,5 +1,7 @@ import { useState } from "react"; import { IconButton } from "@fluentui/react"; +import { useTranslation } from "react-i18next"; +import { supportedLngs } from "../../i18n/config"; interface Props { answer: string; @@ -15,19 +17,31 @@ try { console.error("SpeechSynthesis is not supported"); } -const getUtterance = function (text: string) { +const getUtterance = function (text: string, lngCode: string = "en-US") { if (synth) { const utterance = new SpeechSynthesisUtterance(text); - utterance.lang = "en-US"; + utterance.lang = lngCode; utterance.volume = 1; utterance.rate = 1; utterance.pitch = 1; - utterance.voice = synth.getVoices().filter((voice: SpeechSynthesisVoice) => voice.lang === "en-US")[0]; + + let voice = synth.getVoices().filter((voice: SpeechSynthesisVoice) => voice.lang === lngCode)[0]; + if (!voice) { + voice = synth.getVoices().filter((voice: SpeechSynthesisVoice) => voice.lang === "en-US")[0]; + } + + utterance.voice = voice; return utterance; } }; export const SpeechOutputBrowser = ({ answer }: Props) => { + const { t, i18n } = useTranslation(); + const currentLng = i18n.language; + let lngCode = supportedLngs[currentLng]?.locale; + if (!lngCode) { + lngCode = "en-US"; + } const [isPlaying, setIsPlaying] = useState(false); const startOrStopSpeech = (answer: string) => { @@ -37,7 +51,7 @@ export const SpeechOutputBrowser = ({ answer }: Props) => { setIsPlaying(false); return; } - const utterance: SpeechSynthesisUtterance | undefined = getUtterance(answer); + const utterance: SpeechSynthesisUtterance | undefined = getUtterance(answer, lngCode); if (!utterance) { return; @@ -62,8 +76,8 @@ export const SpeechOutputBrowser = ({ answer }: Props) => { startOrStopSpeech(answer)} disabled={!synth} /> diff --git a/app/frontend/src/components/ClearChatButton/ClearChatButton.tsx b/app/frontend/src/components/ClearChatButton/ClearChatButton.tsx index 19a24a624a..5b6a0ae50c 100644 --- a/app/frontend/src/components/ClearChatButton/ClearChatButton.tsx +++ b/app/frontend/src/components/ClearChatButton/ClearChatButton.tsx @@ -1,5 +1,6 @@ import { Delete24Regular } from "@fluentui/react-icons"; import { Button } from "@fluentui/react-components"; +import { useTranslation } from "react-i18next"; import styles from "./ClearChatButton.module.css"; @@ -10,10 +11,11 @@ interface Props { } export const ClearChatButton = ({ className, disabled, onClick }: Props) => { + const { t, i18n } = useTranslation(); return (
); diff --git a/app/frontend/src/components/Example/ExampleList.tsx b/app/frontend/src/components/Example/ExampleList.tsx index 49c35cbd2d..dab4ec97ec 100644 --- a/app/frontend/src/components/Example/ExampleList.tsx +++ b/app/frontend/src/components/Example/ExampleList.tsx @@ -1,25 +1,19 @@ import { Example } from "./Example"; +import { useTranslation } from "react-i18next"; import styles from "./Example.module.css"; -const DEFAULT_EXAMPLES: string[] = [ - "What is included in my Northwind Health Plus plan that is not in standard?", - "What happens in a performance review?", - "What does a Product Manager do?" -]; - -const GPT4V_EXAMPLES: string[] = [ - "Compare the impact of interest rates and GDP in financial markets.", - "What is the expected trend for the S&P 500 index over the next five years? Compare it to the past S&P 500 performance", - "Can you identify any correlation between oil prices and stock market trends?" -]; - interface Props { onExampleClicked: (value: string) => void; useGPT4V?: boolean; } export const ExampleList = ({ onExampleClicked, useGPT4V }: Props) => { + const { t } = useTranslation(); + + const DEFAULT_EXAMPLES: string[] = [t("defaultExamples.1"), t("defaultExamples.2"), t("defaultExamples.3")]; + const GPT4V_EXAMPLES: string[] = [t("gpt4vExamples.1"), t("gpt4vExamples.2"), t("gpt4vExamples.3")]; + return (
    {(useGPT4V ? GPT4V_EXAMPLES : DEFAULT_EXAMPLES).map((question, i) => ( diff --git a/app/frontend/src/components/GPT4VSettings/GPT4VSettings.tsx b/app/frontend/src/components/GPT4VSettings/GPT4VSettings.tsx index 99dba193f5..3453c7abea 100644 --- a/app/frontend/src/components/GPT4VSettings/GPT4VSettings.tsx +++ b/app/frontend/src/components/GPT4VSettings/GPT4VSettings.tsx @@ -1,11 +1,11 @@ import { useEffect, useState } from "react"; import { Stack, Checkbox, ICheckboxProps, IDropdownOption, IDropdownProps, Dropdown } from "@fluentui/react"; import { useId } from "@fluentui/react-hooks"; +import { useTranslation } from "react-i18next"; import styles from "./GPT4VSettings.module.css"; import { GPT4VInput } from "../../api"; import { HelpCallout } from "../../components/HelpCallout"; -import { toolTipText } from "../../i18n/tooltips.js"; interface Props { gpt4vInputs: GPT4VInput; @@ -39,37 +39,38 @@ export const GPT4VSettings = ({ updateGPT4VInputs, updateUseGPT4V, isUseGPT4V, g const useGPT4VFieldId = useId("useGPT4VField"); const gpt4VInputId = useId("gpt4VInput"); const gpt4VInputFieldId = useId("gpt4VInputField"); + const { t } = useTranslation(); return ( ( - + )} /> {useGPT4V && ( ( - + )} /> )} diff --git a/app/frontend/src/components/HelpCallout/HelpCallout.tsx b/app/frontend/src/components/HelpCallout/HelpCallout.tsx index 455c150286..595a6f0fca 100644 --- a/app/frontend/src/components/HelpCallout/HelpCallout.tsx +++ b/app/frontend/src/components/HelpCallout/HelpCallout.tsx @@ -1,5 +1,6 @@ import { ITextFieldProps, DefaultButton, IconButton, IButtonStyles, Callout, IStackTokens, Stack, IStackStyles, initializeIcons } from "@fluentui/react"; import { useBoolean, useId } from "@fluentui/react-hooks"; +import { useTranslation } from "react-i18next"; const stackTokens: IStackTokens = { childrenGap: 4, @@ -21,6 +22,7 @@ export const HelpCallout = (props: IHelpCalloutProps): JSX.Element => { const [isCalloutVisible, { toggle: toggleIsCalloutVisible }] = useBoolean(false); const descriptionId: string = useId("description"); const iconButtonId: string = useId("iconButton"); + const { t } = useTranslation(); return ( <> @@ -28,13 +30,20 @@ export const HelpCallout = (props: IHelpCalloutProps): JSX.Element => { - + {isCalloutVisible && ( {props.helpText} - Close + {t("labels.closeButton")} )} diff --git a/app/frontend/src/components/LoginButton/LoginButton.tsx b/app/frontend/src/components/LoginButton/LoginButton.tsx index 9674fbfce7..3d8c64323a 100644 --- a/app/frontend/src/components/LoginButton/LoginButton.tsx +++ b/app/frontend/src/components/LoginButton/LoginButton.tsx @@ -1,5 +1,6 @@ import { DefaultButton } from "@fluentui/react"; import { useMsal } from "@azure/msal-react"; +import { useTranslation } from "react-i18next"; import styles from "./LoginButton.module.css"; import { getRedirectUri, loginRequest, appServicesLogout, getUsername, checkLoggedIn } from "../../authConfig"; @@ -11,6 +12,7 @@ export const LoginButton = () => { const { loggedIn, setLoggedIn } = useContext(LoginContext); const activeAccount = instance.getActiveAccount(); const [username, setUsername] = useState(""); + const { t } = useTranslation(); useEffect(() => { const fetchUsername = async () => { @@ -55,7 +57,7 @@ export const LoginButton = () => { }; return ( diff --git a/app/frontend/src/components/MarkdownViewer/MarkdownViewer.tsx b/app/frontend/src/components/MarkdownViewer/MarkdownViewer.tsx index 6d7061724e..147516b349 100644 --- a/app/frontend/src/components/MarkdownViewer/MarkdownViewer.tsx +++ b/app/frontend/src/components/MarkdownViewer/MarkdownViewer.tsx @@ -1,4 +1,5 @@ import { Spinner, SpinnerSize, MessageBar, MessageBarType, Link, IconButton } from "@fluentui/react"; +import { useTranslation } from "react-i18next"; import React, { useState, useEffect } from "react"; import ReactMarkdown from "react-markdown"; import remarkGfm from "remark-gfm"; @@ -13,6 +14,7 @@ export const MarkdownViewer: React.FC = ({ src }) => { const [content, setContent] = useState(""); const [isLoading, setIsLoading] = useState(true); const [error, setError] = useState(null); + const { t } = useTranslation(); /** * Anchor links result in HTTP 404 errors as the URL they point to does not exist. @@ -66,8 +68,8 @@ export const MarkdownViewer: React.FC = ({ src }) => { className={styles.downloadButton} style={{ color: "black" }} iconProps={{ iconName: "Save" }} - title="Save" - ariaLabel="Save" + title={t("tooltips.save")} + ariaLabel={t("tooltips.save")} href={src} download /> diff --git a/app/frontend/src/components/QuestionInput/QuestionInput.tsx b/app/frontend/src/components/QuestionInput/QuestionInput.tsx index c253717fba..5612a3475b 100644 --- a/app/frontend/src/components/QuestionInput/QuestionInput.tsx +++ b/app/frontend/src/components/QuestionInput/QuestionInput.tsx @@ -2,6 +2,7 @@ import { useState, useEffect, useContext } from "react"; import { Stack, TextField } from "@fluentui/react"; import { Button, Tooltip } from "@fluentui/react-components"; import { Send28Filled } from "@fluentui/react-icons"; +import { useTranslation } from "react-i18next"; import styles from "./QuestionInput.module.css"; import { SpeechInput } from "./SpeechInput"; @@ -20,6 +21,7 @@ interface Props { export const QuestionInput = ({ onSend, disabled, placeholder, clearOnSend, initQuestion, showSpeechInput }: Props) => { const [question, setQuestion] = useState(""); const { loggedIn } = useContext(LoginContext); + const { t } = useTranslation(); const [isComposing, setIsComposing] = useState(false); useEffect(() => { @@ -85,7 +87,7 @@ export const QuestionInput = ({ onSend, disabled, placeholder, clearOnSend, init onCompositionEnd={handleCompositionEnd} />
    - +
    diff --git a/app/frontend/src/components/QuestionInput/SpeechInput.tsx b/app/frontend/src/components/QuestionInput/SpeechInput.tsx index bf1bfaaece..882fd180b6 100644 --- a/app/frontend/src/components/QuestionInput/SpeechInput.tsx +++ b/app/frontend/src/components/QuestionInput/SpeechInput.tsx @@ -1,38 +1,53 @@ import { SetStateAction, useState } from "react"; import { Button, Tooltip } from "@fluentui/react-components"; import { Mic28Filled } from "@fluentui/react-icons"; +import { useTranslation } from "react-i18next"; import styles from "./QuestionInput.module.css"; +import { supportedLngs } from "../../i18n/config"; interface Props { updateQuestion: (question: string) => void; } -const SpeechRecognition = (window as any).speechRecognition || (window as any).webkitSpeechRecognition; -let speechRecognition: { - continuous: boolean; - lang: string; - interimResults: boolean; - maxAlternatives: number; - start: () => void; - onresult: (event: { results: { transcript: SetStateAction }[][] }) => void; - onend: () => void; - onerror: (event: { error: string }) => void; - stop: () => void; -} | null = null; -try { - speechRecognition = new SpeechRecognition(); - if (speechRecognition != null) { - speechRecognition.lang = "en-US"; - speechRecognition.interimResults = true; +const useCustomSpeechRecognition = () => { + const { i18n } = useTranslation(); + const currentLng = i18n.language; + let lngCode = supportedLngs[currentLng]?.locale; + if (!lngCode) { + lngCode = "en-US"; } -} catch (err) { - console.error("SpeechRecognition not supported"); - speechRecognition = null; -} + + const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition; + let speechRecognition: { + continuous: boolean; + lang: string; + interimResults: boolean; + maxAlternatives: number; + start: () => void; + onresult: (event: { results: { transcript: SetStateAction }[][] }) => void; + onend: () => void; + onerror: (event: { error: string }) => void; + stop: () => void; + } | null = null; + + try { + speechRecognition = new SpeechRecognition(); + if (speechRecognition != null) { + speechRecognition.lang = lngCode; + speechRecognition.interimResults = true; + } + } catch (err) { + console.error("SpeechRecognition not supported"); + speechRecognition = null; + } + + return speechRecognition; +}; export const SpeechInput = ({ updateQuestion }: Props) => { + let speechRecognition = useCustomSpeechRecognition(); + const { t } = useTranslation(); const [isRecording, setIsRecording] = useState(false); - const startRecording = () => { if (speechRecognition == null) { console.error("SpeechRecognition not supported"); @@ -85,14 +100,14 @@ export const SpeechInput = ({ updateQuestion }: Props) => { <> {!isRecording && (
    - +
    )} {isRecording && (
    - +
    diff --git a/app/frontend/src/components/SettingsButton/SettingsButton.tsx b/app/frontend/src/components/SettingsButton/SettingsButton.tsx index 9dcdd5f126..8445320f45 100644 --- a/app/frontend/src/components/SettingsButton/SettingsButton.tsx +++ b/app/frontend/src/components/SettingsButton/SettingsButton.tsx @@ -1,5 +1,6 @@ import { Settings24Regular } from "@fluentui/react-icons"; import { Button } from "@fluentui/react-components"; +import { useTranslation } from "react-i18next"; import styles from "./SettingsButton.module.css"; @@ -9,10 +10,11 @@ interface Props { } export const SettingsButton = ({ className, onClick }: Props) => { + const { t } = useTranslation(); return (
    ); diff --git a/app/frontend/src/components/UploadFile/UploadFile.tsx b/app/frontend/src/components/UploadFile/UploadFile.tsx index 2b931fad7f..fe5a5c0535 100644 --- a/app/frontend/src/components/UploadFile/UploadFile.tsx +++ b/app/frontend/src/components/UploadFile/UploadFile.tsx @@ -3,6 +3,7 @@ import { Callout, Label, Text } from "@fluentui/react"; import { Button } from "@fluentui/react-components"; import { Add24Regular, Delete24Regular } from "@fluentui/react-icons"; import { useMsal } from "@azure/msal-react"; +import { useTranslation } from "react-i18next"; import { SimpleAPIResponse, uploadFileApi, deleteUploadedFileApi, listUploadedFilesApi } from "../../api"; import { useLogin, getToken } from "../../authConfig"; @@ -22,6 +23,7 @@ export const UploadFile: React.FC = ({ className, disabled }: Props) => { const [uploadedFile, setUploadedFile] = useState(); const [uploadedFileError, setUploadedFileError] = useState(); const [uploadedFiles, setUploadedFiles] = useState([]); + const { t } = useTranslation(); if (!useLogin) { throw new Error("The UploadFile component requires useLogin to be true"); @@ -96,7 +98,7 @@ export const UploadFile: React.FC = ({ className, disabled }: Props) => { } catch (error) { console.error(error); setIsUploading(false); - setUploadedFileError(`Error uploading file - please try again or contact admin.`); + setUploadedFileError(t("upload.uploadedFileError")); } }; @@ -104,7 +106,7 @@ export const UploadFile: React.FC = ({ className, disabled }: Props) => {
    {isCalloutVisible && ( @@ -118,7 +120,7 @@ export const UploadFile: React.FC = ({ className, disabled }: Props) => { >
    - + = ({ className, disabled }: Props) => { {/* Show a loading message while files are being uploaded */} - {isUploading && {"Uploading files..."}} + {isUploading && {t("upload.uploadingFiles")}} {!isUploading && uploadedFileError && {uploadedFileError}} {!isUploading && uploadedFile && {uploadedFile.message}} {/* Display the list of already uploaded */} -

    Previously uploaded files:

    +

    {t("upload.uploadedFilesLabel")}

    - {isLoading && Loading...} - {!isLoading && uploadedFiles.length === 0 && No files uploaded yet} + {isLoading && {t("upload.loading")}} + {!isLoading && uploadedFiles.length === 0 && {t("upload.noFilesUploaded")}} {uploadedFiles.map((filename, index) => { return (
    @@ -148,10 +150,10 @@ export const UploadFile: React.FC = ({ className, disabled }: Props) => { onClick={() => handleRemoveFile(filename)} disabled={deletionStatus[filename] === "pending" || deletionStatus[filename] === "success"} > - {!deletionStatus[filename] && "Delete file"} - {deletionStatus[filename] == "pending" && "Deleting file..."} - {deletionStatus[filename] == "error" && "Error deleting."} - {deletionStatus[filename] == "success" && "File deleted"} + {!deletionStatus[filename] && t("upload.deleteFile")} + {deletionStatus[filename] == "pending" && t("upload.deletingFile")} + {deletionStatus[filename] == "error" && t("upload.errorDeleting")} + {deletionStatus[filename] == "success" && t("upload.fileDeleted")}
    ); diff --git a/app/frontend/src/components/VectorSettings/VectorSettings.tsx b/app/frontend/src/components/VectorSettings/VectorSettings.tsx index 58b0c04bc9..7463d0cd46 100644 --- a/app/frontend/src/components/VectorSettings/VectorSettings.tsx +++ b/app/frontend/src/components/VectorSettings/VectorSettings.tsx @@ -1,11 +1,11 @@ import { useEffect, useState } from "react"; import { Stack, IDropdownOption, Dropdown, IDropdownProps } from "@fluentui/react"; import { useId } from "@fluentui/react-hooks"; +import { useTranslation } from "react-i18next"; import styles from "./VectorSettings.module.css"; import { HelpCallout } from "../../components/HelpCallout"; import { RetrievalMode, VectorFieldOptions } from "../../api"; -import { toolTipText } from "../../i18n/tooltips.js"; interface Props { showImageOptions?: boolean; @@ -38,39 +38,58 @@ export const VectorSettings = ({ updateRetrievalMode, updateVectorFields, showIm const retrievalModeFieldId = useId("retrievalModeField"); const vectorFieldsId = useId("vectorFields"); const vectorFieldsFieldId = useId("vectorFieldsField"); + const { t } = useTranslation(); return ( ( - + )} /> {showImageOptions && [RetrievalMode.Vectors, RetrievalMode.Hybrid].includes(retrievalMode) && ( ( - + )} /> )} diff --git a/app/frontend/src/i18n/LanguagePicker.module.css b/app/frontend/src/i18n/LanguagePicker.module.css new file mode 100644 index 0000000000..23a6cc1c48 --- /dev/null +++ b/app/frontend/src/i18n/LanguagePicker.module.css @@ -0,0 +1,25 @@ +.languagePicker { + word-break: break-word; + background: #dbdbdb; + border-radius: 0.5rem; + display: flex; + justify-content: center; + align-items: center; + padding-left: 0.3125rem; + padding-right: 0.3125rem; + margin-bottom: 0.3125rem; + cursor: pointer; + width: fit-content; +} + +.languagePicker:hover { + box-shadow: + 0rem 0.5rem 1rem rgba(0, 0, 0, 0.14), + 0rem 0rem 0.125rem rgba(0, 0, 0, 0.12); + outline: 0.125rem solid rgba(115, 118, 225, 1); +} + +.languagePickerIcon { + margin-bottom: 0.15625rem; + margin-left: 0.125rem; +} diff --git a/app/frontend/src/i18n/LanguagePicker.tsx b/app/frontend/src/i18n/LanguagePicker.tsx new file mode 100644 index 0000000000..531ad7d2ce --- /dev/null +++ b/app/frontend/src/i18n/LanguagePicker.tsx @@ -0,0 +1,39 @@ +import { useTranslation } from "react-i18next"; +import { LocalLanguage24Regular } from "@fluentui/react-icons"; +import { IDropdownOption, Dropdown } from "@fluentui/react"; +import { useId } from "@fluentui/react-hooks"; + +import { supportedLngs } from "./config"; +import styles from "./LanguagePicker.module.css"; + +interface Props { + onLanguageChange: (language: string) => void; +} + +export const LanguagePicker = ({ onLanguageChange }: Props) => { + const { i18n } = useTranslation(); + + const handleLanguageChange = (_ev: React.FormEvent, option?: IDropdownOption | undefined) => { + onLanguageChange(option?.data || i18n.language); + }; + const languagePickerId = useId("languagePicker"); + const { t } = useTranslation(); + + return ( +
    + + ({ + key: code, + text: details.name, + selected: code === i18n.language, + data: code + }))} + onChange={handleLanguageChange} + ariaLabel={t("labels.languagePicker")} + /> +
    + ); +}; diff --git a/app/frontend/src/i18n/config.ts b/app/frontend/src/i18n/config.ts new file mode 100644 index 0000000000..dd757aff98 --- /dev/null +++ b/app/frontend/src/i18n/config.ts @@ -0,0 +1,51 @@ +import i18next from "i18next"; +import LanguageDetector from "i18next-browser-languagedetector"; +import HttpApi from "i18next-http-backend"; +import { initReactI18next } from "react-i18next"; + +import enTranslation from "../locales/en/translation.json"; +import esTranslation from "../locales/es/translation.json"; +import jaTranslation from "../locales/ja/translation.json"; +import frTranslation from "../locales/fr/translation.json"; + +export const supportedLngs: { [key: string]: { name: string; locale: string } } = { + en: { + name: "English", + locale: "en-US" + }, + es: { + name: "Español", + locale: "es-ES" + }, + fr: { + name: "Français", + locale: "fr-FR" + }, + ja: { + name: "日本語", + locale: "ja-JP" + } +}; + +i18next + .use(HttpApi) + .use(LanguageDetector) + .use(initReactI18next) + // init i18next + // for all options read: https://www.i18next.com/overview/configuration-options + .init({ + resources: { + en: { translation: enTranslation }, + es: { translation: esTranslation }, + fr: { translation: frTranslation }, + ja: { translation: jaTranslation } + }, + fallbackLng: "en", + supportedLngs: Object.keys(supportedLngs), + debug: import.meta.env.DEV, + interpolation: { + escapeValue: false // not needed for react as it escapes by default + } + }); + +export default i18next; diff --git a/app/frontend/src/i18n/index.tsx b/app/frontend/src/i18n/index.tsx new file mode 100644 index 0000000000..0a84ff2b10 --- /dev/null +++ b/app/frontend/src/i18n/index.tsx @@ -0,0 +1 @@ +export * from "./LanguagePicker"; diff --git a/app/frontend/src/i18n/tooltips.ts b/app/frontend/src/i18n/tooltips.ts deleted file mode 100644 index 6042fc8c03..0000000000 --- a/app/frontend/src/i18n/tooltips.ts +++ /dev/null @@ -1,30 +0,0 @@ -// Keep values less than 20 words. -// Don't add links to the tooltips. -export const toolTipText = { - promptTemplate: - "Overrides the prompt used to generate the answer based on the question and search results. To append to existing prompt instead of replace whole prompt, start your prompt with '>>>'.", - temperature: - "Sets the temperature of the request to the LLM that generates the answer. Higher temperatures result in more creative responses, but they may be less grounded.", - seed: "Sets a seed to improve the reproducibility of the model's responses. The seed can be any integer.", - searchScore: - "Sets a minimum score for search results coming back from Azure AI search. The score range depends on whether you're using hybrid (default), vectors only, or text only.", - rerankerScore: - "Sets a minimum score for search results coming back from the semantic reranker. The score always ranges between 0-4. The higher the score, the more semantically relevant the result is to the question.", - retrieveNumber: - "Sets the number of search results to retrieve from Azure AI search. More results may increase the likelihood of finding the correct answer, but may lead to the model getting 'lost in the middle'.", - excludeCategory: "Specifies a category to exclude from the search results. There are no categories used in the default data set.", - useSemanticReranker: "Enables the Azure AI Search semantic ranker, a model that re-ranks search results based on semantic similarity to the user's query.", - useSemanticCaptions: - "Sends semantic captions to the LLM instead of the full search result. A semantic caption is extracted from a search result during the process of semantic ranking.", - suggestFollowupQuestions: "Asks the LLM to suggest follow-up questions based on the user's query.", - useGPT4Vision: "Uses GPT-4-Turbo with Vision to generate responses based on images and text from the index.", - vectorFields: - "Specifies which embedding fields in the Azure AI Search Index will be searched, both the 'Images and text' embeddings, 'Images' only, or 'Text' only.", - gpt4VisionInputs: - "Sets what will be sent to the vision model. 'Images and text' sends both images and text to the model, 'Images' sends only images, and 'Text' sends only text.", - retrievalMode: - "Sets the retrieval mode for the Azure AI Search query. `Vectors + Text (Hybrid)` uses a combination of vector search and full text search, `Vectors` uses only vector search, and `Text` uses only full text search. Hybrid is generally optimal.", - streamChat: "Continuously streams the response to the chat UI as it is generated.", - useOidSecurityFilter: "Filter search results based on the authenticated user's OID.", - useGroupsSecurityFilter: "Filter search results based on the authenticated user's groups." -}; diff --git a/app/frontend/src/index.tsx b/app/frontend/src/index.tsx index 07fd0f7040..a8821c8c45 100644 --- a/app/frontend/src/index.tsx +++ b/app/frontend/src/index.tsx @@ -1,12 +1,15 @@ import React from "react"; import ReactDOM from "react-dom/client"; import { createHashRouter, RouterProvider } from "react-router-dom"; +import { I18nextProvider } from "react-i18next"; +import { HelmetProvider } from "react-helmet-async"; import { initializeIcons } from "@fluentui/react"; import "./index.css"; import Chat from "./pages/chat/Chat"; import LayoutWrapper from "./layoutWrapper"; +import i18next from "./i18n/config"; initializeIcons(); @@ -33,6 +36,10 @@ const router = createHashRouter([ ReactDOM.createRoot(document.getElementById("root") as HTMLElement).render( - + + + + + ); diff --git a/app/frontend/src/locales/en/translation.json b/app/frontend/src/locales/en/translation.json new file mode 100644 index 0000000000..e9539cd2ce --- /dev/null +++ b/app/frontend/src/locales/en/translation.json @@ -0,0 +1,135 @@ +{ + "pageTitle": "Azure OpenAI + AI Search", + "headerTitle": "Azure OpenAI + AI Search", + "chat": "Chat", + "qa": "Ask a question", + "login": "Login", + "logout": "Logout", + "clearChat": "Clear chat", + "upload": { + "fileLabel": "Upload file:", + "uploadedFilesLabel": "Previously uploaded files:", + "noFilesUploaded": "No files uploaded yet", + "loading": "Loading...", + "manageFileUploads": "Manage file uploads", + "uploadingFiles": "Uploading files...", + "uploadedFileError": "Error uploading file - please try again or contact admin.", + "deleteFile": "Delete file", + "deletingFile": "Deleting file...", + "errorDeleting": "Error deleting.", + "fileDeleted": "File deleted" + }, + "developerSettings": "Developer settings", + + "chatEmptyStateTitle": "Chat with your data", + "chatEmptyStateSubtitle": "Ask anything or try an example", + "defaultExamples": { + "1": "What is included in my Northwind Health Plus plan that is not in standard?", + "2": "What happens in a performance review?", + "3": "What does a Product Manager do?", + "placeholder": "Type a new question (e.g. does my plan cover annual eye exams?)" + }, + "askTitle": "Ask your data", + "gpt4vExamples": { + "1": "Compare the impact of interest rates and GDP in financial markets.", + "2": "What is the expected trend for the S&P 500 index over the next five years? Compare it to the past S&P 500 performance", + "3": "Can you identify any correlation between oil prices and stock market trends?", + "placeholder": "Example: Does my plan cover annual eye exams?" + }, + "generatingAnswer": "Generating answer", + "citationWithColon": "Citation:", + "followupQuestions": "Follow-up questions:", + + "tooltips": { + "submitQuestion": "Submit question", + "askWithVoice": "Ask question with voice", + "stopRecording": "Stop recording question", + "showThoughtProcess": "Show thought process", + "showSupportingContent": "Show supporting content", + "speakAnswer": "Speak answer", + "info": "Info", + "save": "Save" + }, + + "headerTexts":{ + "thoughtProcess": "Thought Process", + "supportingContent": "Supporting Content", + "citation": "Citation" + }, + + "labels": { + "toggleMenu": "Toggle menu", + "languagePicker": "Select your language", + "headerText": "Configure answer generation", + "closeButton": "Close", + "promptTemplate": "Override prompt template", + "temperature": "Temperature", + "seed": "Seed", + "minimumSearchScore": "Minimum search score", + "minimumRerankerScore": "Minimum reranker score", + "retrieveCount": "Retrieve this many search results:", + "excludeCategory": "Exclude category", + "useSemanticRanker": "Use semantic ranker for retrieval", + "useSemanticCaptions": "Use semantic captions", + "useSuggestFollowupQuestions": "Suggest follow-up questions", + "useGPT4V": "Use GPT vision model", + "gpt4VInput": { + "label": "GPT vision model inputs", + "options": { + "textAndImages": "Images and text", + "images": "Images", + "texts": "Text" + } + }, + "retrievalMode": { + "label": "Retrieval mode", + "options": { + "hybrid": "Vectors + Text (Hybrid)", + "vectors": "Vectors", + "texts": "Text" + } + }, + "vector": { + "label": "Vector fields (Multi-query vector search)", + "options": { + "embedding": "Text Embeddings", + "imageEmbedding": "Image Embeddings", + "both": "Text and Image embeddings" + } + }, + "useOidSecurityFilter": "Use oid security filter", + "useGroupsSecurityFilter": "Use groups security filter", + "shouldStream": "Stream chat completion responses" + }, + + "helpTexts": { + "promptTemplate": + "Overrides the prompt used to generate the answer based on the question and search results. To append to existing prompt instead of replace whole prompt, start your prompt with '>>>'.", + "temperature": + "Sets the temperature of the request to the LLM that generates the answer. Higher temperatures result in more creative responses, but they may be less grounded.", + "seed": "Sets a seed to improve the reproducibility of the model's responses. The seed can be any integer.", + "searchScore": + "Sets a minimum score for search results coming back from Azure AI search. The score range depends on whether you're using hybrid (default), vectors only, or text only.", + "rerankerScore": + "Sets a minimum score for search results coming back from the semantic reranker. The score always ranges between 0-4. The higher the score, the more semantically relevant the result is to the question.", + "retrieveNumber": + "Sets the number of search results to retrieve from Azure AI search. More results may increase the likelihood of finding the correct answer, but may lead to the model getting 'lost in the middle'.", + "excludeCategory": + "Specifies a category to exclude from the search results. There are no categories used in the default data set.", + "useSemanticReranker": + "Enables the Azure AI Search semantic ranker, a model that re-ranks search results based on semantic similarity to the user's query.", + "useSemanticCaptions": + "Sends semantic captions to the LLM instead of the full search result. A semantic caption is extracted from a search result during the process of semantic ranking.", + "suggestFollowupQuestions": "Asks the LLM to suggest follow-up questions based on the user's query.", + "useGPT4Vision": "Uses GPT-4-Turbo with Vision to generate responses based on images and text from the index.", + "vectorFields": + "Specifies which embedding fields in the Azure AI Search Index will be searched, both the 'Images and text' embeddings, 'Images' only, or 'Text' only.", + "gpt4VisionInputs": + "Sets what will be sent to the vision model. 'Images and text' sends both images and text to the model, 'Images' sends only images, and 'Text' sends only text.", + "retrievalMode": + "Sets the retrieval mode for the Azure AI Search query. `Vectors + Text (Hybrid)` uses a combination of vector search and full text search, `Vectors` uses only vector search, and `Text` uses only full text search. Hybrid is generally optimal.", + "streamChat": "Continuously streams the response to the chat UI as it is generated.", + "useOidSecurityFilter": "Filter search results based on the authenticated user's OID.", + "useGroupsSecurityFilter": "Filter search results based on the authenticated user's groups." + } +} diff --git a/app/frontend/src/locales/es/translation.json b/app/frontend/src/locales/es/translation.json new file mode 100644 index 0000000000..20a0294f20 --- /dev/null +++ b/app/frontend/src/locales/es/translation.json @@ -0,0 +1,136 @@ +{ + "pageTitle": "Azure OpenAI + AI Search", + "headerTitle": "Azure OpenAI + AI Search", + "chat": "Chat", + "qa": "Haz una pregunta", + "login": "Iniciar sesión", + "logout": "Cerrar sesión", + "clearChat": "Borrar chat", + "upload": { + "fileLabel": "Subir archivo:", + "uploadedFilesLabel": "Archivos subidos previamente:", + "noFilesUploaded": "No se han subido archivos todavía", + "loading": "Cargando...", + "manageFileUploads": "Administrar subidas de archivos", + "uploadingFiles": "Subiendo archivos...", + "uploadedFileError": "Error al subir el archivo - por favor, inténtalo de nuevo o contacta con el administrador.", + "deleteFile": "Eliminar archivo", + "deletingFile": "Eliminando archivo...", + "errorDeleting": "Error eliminando.", + "fileDeleted": "Archivo eliminado" + }, + "developerSettings": "Configuración del desarrollador", + + "chatEmptyStateTitle": "Chatea con tus datos", + "chatEmptyStateSubtitle": "Pregunta cualquier cosa o prueba un ejemplo", + "defaultExamples": { + "1": "¿Qué está incluido en mi plan Northwind Health Plus que no está en el estándar?", + "2": "¿Qué sucede en una revisión de rendimiento?", + "3": "¿Qué hace un Gerente de producto?", + "placeholder": "Escribe una nueva pregunta (por ejemplo, ¿mi plan cubre exámenes anuales de la vista?)" + }, + "askTitle": "Pregunta a tus datos", + "gpt4vExamples": { + "1": "Compara el impacto de las tasas de interés y el PIB en los mercados financieros.", + "2": "¿Cuál es la tendencia esperada para el índice S&P 500 en los próximos cinco años? Compáralo con el rendimiento pasado del S&P 500", + "3": "¿Puedes identificar alguna correlación entre los precios del petróleo y las tendencias del mercado de valores?", + "placeholder": "Ejemplo: ¿Mi plan cubre exámenes anuales de la vista?" + }, + "generatingAnswer": "Generando respuesta", + "citationWithColon": "Cita:", + "followupQuestions": "Preguntas de seguimiento:", + + "tooltips": { + "submitQuestion": "Enviar pregunta", + "askWithVoice": "Realizar pregunta con voz", + "stopRecording": "Detener la grabación de la pregunta", + "showThoughtProcess": "Mostrar proceso de pensamiento", + "showSupportingContent": "Mostrar contenido de soporte", + "speakAnswer": "Hablar respuesta", + "info": "Información", + "save": "Guardar" + }, + + "headerTexts":{ + "thoughtProcess": "Proceso de Pensamiento", + "supportingContent": "Contenido de apoyo", + "citation": "Cita" + }, + + "labels": { + "toggleMenu": "Alternar menú", + "languagePicker": "Seleccione su idioma", + "headerText": "Configurar la generación de respuestas", + "closeButton": "Cerrar", + "promptTemplate": "Sobrescribir plantilla de aviso", + "temperature": "Temperatura", + "seed": "Seed", + "minimumSearchScore": "Puntaje mínimo de búsqueda", + "minimumRerankerScore": "Puntaje mínimo de re-clasificación", + "retrieveCount": "Obtén éste número resultados de búsqueda:", + "excludeCategory": "Excluir categoría", + "useSemanticRanker": "Usar clasificador semántico para la recuperación", + "useSemanticCaptions": "Usar subtítulos semánticos", + "useSuggestFollowupQuestions": "Sugerir preguntas de seguimiento", + "useGPT4V": "Usar modelo de visión GPT", + "gpt4VInput": { + "label": "Entradas del modelo de visión GPT", + "options": { + "textAndImages": "Imágenes y texto", + "images": "Imágenes", + "texts": "Texto" + } + }, + "retrievalMode": { + "label": "Modo de recuperación", + "options": { + "hybrid": "Vectores + Texto (Híbrido)", + "vectors": "Vectores", + "texts": "Texto" + } + }, + "vector": { + "label": "Campos de vector (Búsqueda de vector de múltiples consultas)", + "options": { + "embedding": "Incrustaciones de texto", + "imageEmbedding": "Incrustaciones de imagen", + "both": "Incrustaciones de texto e imagen" + } + }, + "useOidSecurityFilter": "Usar filtro de seguridad oid", + "useGroupsSecurityFilter": "Usar filtro de seguridad degrupos", + "shouldStream": "Transmitir respuestas de finalización de chat en tiempo real" + }, + + "helpTexts": { + "promptTemplate": + "Sobrescribe el aviso utilizado para generar la respuesta basada en la pregunta y los resultados de la búsqueda. Para agregar al aviso existente en lugar de reemplazar todo el aviso, comienza tu aviso con '>>>'.", + "temperature": + "Establece la temperatura de la solicitud al LLM que genera la respuesta. Temperaturas más altas dan como resultado respuestas más creativas, pero pueden ser menos realistas.", + "seed": + "Define un número entero para mejorar la reproducibilidad de las respuestas del modelo. El número de semilla se utiliza para generar respuestas más consistentes. El número de semilla puede ser cualquier número.", + "searchScore": + "Establece una puntuación mínima para los resultados de búsqueda que vuelven de Azure AI search. El rango de puntuaciones depende de si estás utilizando híbrido (predeterminado), solo vectores o solo texto.", + "rerankerScore": + "Establece una puntuación mínima para los resultados de búsqueda que vuelven del re-clasificador semántico. La puntuación siempre varía entre 0-4. Cuanto mayor es la puntuación, más relevante es semánticamente el resultado a la pregunta.", + "retrieveNumber": + "Establece el número de resultados de búsqueda para recuperar de Azure AI search. Más resultados pueden aumentar la probabilidad de encontrar la respuesta correcta, pero pueden provocar que el modelo se 'pierda en el medio'.", + "excludeCategory": + "Especifica una categoría para excluir de los resultados de búsqueda. No se utilizan categorías en el conjunto de datos predeterminado.", + "useSemanticReranker": + "Habilita el re-clasificador semántico de Azure AI Search, un modelo que re-clasifica los resultados de búsqueda basándose en la similitud semántica con la consulta del usuario.", + "useSemanticCaptions": + "Envía subtítulos semánticos al LLM en lugar del resultado de búsqueda completo. Un subtítulo semántico se extrae de un resultado de búsqueda durante el proceso de clasificación semántica.", + "suggestFollowupQuestions": "Pide al LLM que sugiera preguntas de seguimiento basándose en la consulta del usuario.", + "useGPT4Vision": "Utiliza GPT-4-Turbo con Visión para generar respuestas basándose en imágenes y texto del índice.", + "vectorFields": + "Especifica qué campos de incrustación en el índice de búsqueda de Azure AI se buscarán, tanto las incrustaciones de 'Imágenes y texto', solo 'Imagenes', o solo 'Texto'.", + "gpt4VisionInputs": + "Establece lo que se enviará al modelo de visión. 'Imágenes y texto' envía tanto imágenes como texto al modelo, 'Imágenes' solo envía imágenes y 'Texto' solo envía texto.", + "retrievalMode": + "Establece el modo de recuperación para la consulta de búsqueda de Azure AI. Vectores + Texto (Híbrido) utiliza una combinación de búsqueda vectorial y búsqueda de texto completo, Vectores utiliza solo la búsqueda vectorial y Texto utiliza solo la búsqueda de texto completo. Generalmente, el modo híbrido es óptimo.", + "streamChat": "Transmite continuamente la respuesta a la interfaz de usuario del chat mientras se genera.", + "useOidSecurityFilter": "Filtra los resultados de búsqueda en función del OID del usuario autenticado.", + "useGroupsSecurityFilter": "Filtra los resultados de búsqueda en función de los grupos del usuario autenticado." + } +} diff --git a/app/frontend/src/locales/fr/translation.json b/app/frontend/src/locales/fr/translation.json new file mode 100644 index 0000000000..d58812f99e --- /dev/null +++ b/app/frontend/src/locales/fr/translation.json @@ -0,0 +1,136 @@ +{ + "pageTitle": "Azure OpenAI + AI Search", + "headerTitle": "Azure OpenAI + AI Search", + "chat": "Chat", + "qa": "Posez une question", + "login": "Connexion", + "logout": "Déconnexion", + "clearChat": "Effacer le chat", + "upload": { + "fileLabel": "Télécharger le fichier:", + "uploadedFilesLabel": "Fichiers précédemment téléchargés:", + "noFilesUploaded": "Aucun fichier téléchargé pour le moment", + "loading": "Chargement...", + "manageFileUploads": "Gérer les téléchargements de fichiers", + "uploadingFiles": "Téléchargement de fichiers...", + "uploadedFileError": "Erreur lors du téléchargement du fichier - veuillez réessayer ou contacter l'administrateur.", + "deleteFile": "Supprimer le fichier", + "deletingFile": "Suppression du fichier...", + "errorDeleting": "Erreur lors de la suppression.", + "fileDeleted": "Fichier supprimé" + }, + "developerSettings": "Paramètres développeur", + + "chatEmptyStateTitle": "Discutez avec vos données", + "chatEmptyStateSubtitle": "Demandez n'importe quoi ou essayez un exemple", + "defaultExamples": { + "1": "Qu'est-ce qui est inclus dans mon plan Northwind Health Plus qui n'est pas dans le standard?", + "2": "Qu'arrive-t-il lors d'un examen de performance?", + "3": "Qu'est-ce qu'un Product Manager fait?", + "placeholder": "Tapez une nouvelle question (par exemple, mon plan couvre-t-il les examens oculaires annuels?)" + }, + "askTitle": "Demandez à vos données", + "gpt4vExamples": { + "1": "Comparez l'impact des taux d'intérêt et du PIB sur les marchés financiers.", + "2": "Quelle est la tendance prévue pour l'indice S&P 500 au cours des cinq prochaines années? Comparez-le aux performances passées de l'S&P 500", + "3": "Pouvez-vous identifier une corrélation entre les prix du pétrole et les tendances du marché boursier?", + "placeholder": "Exemple: Mon plan couvre-t-il les examens oculaires annuels?" + }, + "generatingAnswer": "Génération de la réponse", + "citationWithColon": "Citation:", + "followupQuestions": "Questions de suivi:", + + "tooltips": { + "submitQuestion": "Soumettre une question", + "askWithVoice": "Poser une question à l'aide de la voix", + "stopRecording": "Arrêter l'enregistrement de la question", + "showThoughtProcess": "Montrer le processus de réflexion", + "showSupportingContent": "Montrer le contenu de soutien", + "speakAnswer": "Parler réponse", + "info": "Info", + "save": "Sauvegarder" + }, + + "headerTexts":{ + "thoughtProcess": "Processus de Réflexion", + "supportingContent": "Contenu d'Appui", + "citation": "Citation" + }, + + "labels": { + "toggleMenu": "Basculer le menu", + "languagePicker": "Sélectionnez votre langue", + "headerText": "Configurer la génération de réponses", + "closeButton": "Fermer", + "promptTemplate": "Modèle de l'invitation", + "temperature": "Température", + "seed": "Seed", + "minimumSearchScore": "Score de recherche minimum", + "minimumRerankerScore": "Score minimum du reclasseur sémantique", + "retrieveCount": "Récupérer ce nombre de résultats de recherche :", + "excludeCategory": "Exclure la catégorie", + "useSemanticRanker": "Utiliser le reclasseur sémantique", + "useSemanticCaptions": "Utiliser les titres sémantiques", + "useSuggestFollowupQuestions": "Suggérer des questions de suivi", + "useGPT4V": "Utiliser le modèle GPT Vision", + "gpt4VInput": { + "label": "Entrées du modèle GPT Vision", + "options": { + "textAndImages": "Images et texte", + "images": "Images", + "texts": "Texte" + } + }, + "retrievalMode": { + "label": "Mode de récupération", + "options": { + "hybrid": "Vecteurs + Texte (Hybride)", + "vectors": "Vecteurs", + "texts": "Texte" + } + }, + "vector": { + "label": "Champs de vecteur (recherche de vecteur multi-requête)", + "options": { + "embedding": "Incorporations de texte", + "imageEmbedding": "Incorporations d'images", + "both": "Incorporations de texte et d'images" + } + }, + "useOidSecurityFilter": "Utiliser le filtre de sécurité oid", + "useGroupsSecurityFilter": "Utiliser le filtre de sécurité de groupe", + "shouldStream": "Diffuser en continu les réponses" + }, + + "helpTexts": { + "promptTemplate": + "Remplace le prompt utilisé pour générer la réponse en fonction de la question et des résultats de la recherche. Pour ajouter à l'invite existante au lieu de remplacer l'invite entière, commencez votre invite par '>>>'.", + "temperature": + "Définit la température de la requête à le Grand modèle de langage (LLM) qui génère la réponse. Des températures plus élevées donnent lieu à des réponses plus créatives, mais elles peuvent être moins ancrées.", + "seed": + "Détermine un numéro premier qui servira à améliorer la répétabilité des réponses du modèle. Le numéro premier doit être un entier quelconque.", + "searchScore": + "Définit un score minimum pour les résultats de recherche provenant d'Azure AI Search. La plage de scores dépend si vous utilisez l'hybride (par défaut), uniquement les vecteurs, ou uniquement le texte.", + "rerankerScore": + "Définit un score minimum pour les résultats de recherche provenant du reranker sémantique. Le score varie toujours entre 0 et 4. Plus le score est élevé, plus le résultat est sémantiquement pertinent par rapport à la question.", + "retrieveNumber": + "Définit le nombre de résultats de recherche à récupérer d'Azure AI Search. Plus de résultats peuvent augmenter la probabilité de trouver la bonne réponse, mais peuvent amener le modèle à se 'perdre au milieu'.", + "excludeCategory": + "Spécifie une catégorie à exclure des résultats de recherche. Il n'y a aucune catégorie utilisée dans l'ensemble de données par défaut.", + "useSemanticReranker": + "Active le classement sémantique d'Azure AI Search, un modèle qui réorganise les résultats de recherche en fonction de la similarité sémantique avec la requête de l'utilisateur.", + "useSemanticCaptions": + "Envoie des légendes sémantiques à l'LLM au lieu du résultat de recherche complet. Une légende sémantique est extraite d'un résultat de recherche lors du processus de classement sémantique.", + "suggestFollowupQuestions": "Demande à l'LLM de suggérer des questions de suivi en fonction de la requête de l'utilisateur.", + "useGPT4Vision": "Utilise GPT-4-Turbo avec Vision pour générer des réponses basées sur des images et du texte de l'index.", + "vectorFields": + "Spécifie quels champs d'incorporation dans l'index de recherche Azure AI seront recherchés, à la fois les incorporations 'Images et texte', 'Images' seulement, ou 'Texte' seulement.", + "gpt4VisionInputs": + "Définit ce qui sera envoyé au modèle de vision. 'Images et texte' envoie à la fois des images et du texte au modèle, 'Images' envoie seulement des images, et 'Texte' envoie seulement du texte.", + "retrievalMode": + "Définit le mode de récupération pour la requête Azure AI Search. Vecteurs + Texte (Hybride) utilise une combinaison de recherche vectorielle et de recherche en texte intégral, Vecteurs utilise uniquement la recherche vectorielle, et Texte utilise uniquement la recherche en texte intégral. Hybride est généralement optimal.", + "streamChat": "Diffuse en continu la réponse à l'interface utilisateur du chat au fur et à mesure de sa génération.", + "useOidSecurityFilter":"Filtrez les résultats de recherche en fonction de l'OID de l'utilisateur authentifié.", + "useGroupsSecurityFilter": "Filtrez les résultats de recherche en fonction des groupes de l'utilisateur authentifié." + } +} diff --git a/app/frontend/src/locales/ja/translation.json b/app/frontend/src/locales/ja/translation.json new file mode 100644 index 0000000000..7d0fc61cc5 --- /dev/null +++ b/app/frontend/src/locales/ja/translation.json @@ -0,0 +1,134 @@ +{ + "pageTitle": "Azure OpenAI + AI Search", + "headerTitle": "Azure OpenAI + AI Search", + "chat": "チャット", + "qa": "一問一答", + "login": "ログイン", + "logout": "ログアウト", + "clearChat": "チャットをクリア", + "upload": { + "fileLabel": "ファイルをアップロード:", + "uploadedFilesLabel": "アップロード済みのファイル:", + "noFilesUploaded": "まだファイルがアップロードされていません", + "loading": "読み込み中...", + "manageFileUploads": "ファイルのアップロードを管理", + "uploadingFiles": "ファイルをアップロード中...", + "uploadedFileError": "ファイルのアップロードエラー - 再試行、もしくは管理者にお問い合わせください。", + "deleteFile": "ファイルを削除", + "deletingFile": "ファイルを削除中...", + "errorDeleting": "削除エラー。", + "fileDeleted": "ファイル削除済み" + }, + "developerSettings": "高度な設定", + + "chatEmptyStateTitle": "データを用いてチャットを行います", + "chatEmptyStateSubtitle": "ご自由にお問い合わせいただくか、もしくは下記のサンプルをお試しください", + "defaultExamples": { + "1": "私が契約中の Northwind Health Plus の標準プランには何が含まれ、何が含まれないのですか?", + "2": "パフォーマンス・レビューで何を行いますか?", + "3": "プロダクトマネージャーの職務内容について教えてください。", + "placeholder": "新しい質問を入力してください (例:私の契約プランは年1回の眼科検診もカバーしていますか?)" + }, + "askTitle": "データを利用した問い合わせ", + "gpt4vExamples": { + "1": "金融市場における金利とGDPの影響を比較してください。", + "2": "S&P 500指数の今後5年間のトレンドを予想してください、そして過去のS&P 500のパフォーマンスと比較してください。", + "3": "原油価格と株式市場の動向の間には相関関係があると思いますか?", + "placeholder": "例:私の契約プランは年1回の眼科検診もカバーしていますか?" + }, + "generatingAnswer": "回答を生成中", + "citationWithColon": "引用:", + "followupQuestions": "フォローアップの質問:", + + "tooltips":{ + "submitQuestion": "質問を送信", + "askWithVoice": "音声で質問", + "stopRecording": "質問の記録を停止", + "showThoughtProcess": "思考プロセスの表示", + "showSupportingContent": "サポート内容の表示", + "speakAnswer": "音声による回答", + "info": "情報", + "save": "保存" + }, + + "headerTexts":{ + "thoughtProcess": "思考プロセス", + "supportingContent": "サポート内容", + "citation": "引用" + }, + + "labels": { + "toggleMenu": "メニューを切り替える", + "languagePicker": "言語の選択", + "headerText": "回答生成の設定", + "closeButton": "閉じる", + "promptTemplate": "プロンプト・テンプレートを上書き", + "temperature": "温度", + "seed": "シード", + "minimumSearchScore": "最小検索スコア", + "minimumRerankerScore": "最小リランキング・スコア", + "retrieveCount": "ここで指定する検索結果数を取得:", + "excludeCategory": "カテゴリを除外", + "useSemanticRanker": "取得にセマンティック・ランカーを使用", + "useSemanticCaptions": "セマンティック・キャプションを使用", + "useSuggestFollowupQuestions": "フォローアップの質問を提案", + "useGPT4V": "GPT Visionモデルを使用", + "gpt4VInput": { + "label": "GPT Visionモデルの入力", + "options": { + "textAndImages": "画像とテキスト", + "images": "画像", + "texts": "テキスト" + } + }, + "retrievalMode": { + "label": "検索モード", + "options": { + "hybrid": "ベクトル + テキスト (ハイブリッド)", + "vectors": "ベクトル", + "texts": "テキスト" + } + }, + "vector": { + "label": "ベクトルフィールド (マルチクエリベクトル検索)", + "options": { + "embedding": "テキスト埋め込み", + "imageEmbedding": "画像埋め込み", + "both": "テキストと画像の埋め込み" + } + }, + "useOidSecurityFilter": "OIDセキュリティフィルターの使用", + "useGroupsSecurityFilter": "グループセキュリティフィルターの使用", + "shouldStream": "ストリームでチャット応答を取得" + }, + + "helpTexts": { + "promptTemplate": + "質問や検索結果に基づいて回答を生成するためプロンプトを上書きします。既存のプロンプトに追加する場合、全てを置き換えるのではなく、プロンプトを'>>>'で始めてください。", + "temperature": + "LLMで回答を生成する際、リクエストに対して温度を設定します。温度が高いほど、より創造的な回答が得られますが、不安定になる可能性があります。", + "seed": "モデルによる回答の再現性を向上させるためシードを設定します。シードは任意の整数に設定できます。", + "searchScore": + "Azure AI Searchから返される検索結果の最小スコアを設定します。スコアの範囲は、ハイブリッド(デフォルト)、ベクトルのみ、またはテキストのみを使用しているかどうかによって異なります。", + "rerankerScore": + "セマンティック・リランカーから返される検索結果の最小スコアを設定します。スコアの値は0から4の範囲で変更できます。スコアの値が大きいほど、質問に対する結果の意味的な関連性が高まります。", + "retrieveNumber": + "Azure AI Searchの検索結果から取得する数を設定します。結果が多ければ多いほど、正しい答えを見つける可能性は高まるかもしれませんが、モデルが「途中で迷子になる」可能性もあります。", + "excludeCategory": "検索結果から除外するカテゴリを指定します。デフォルトのデータセットはカテゴリを使用していません。", + "useSemanticReranker": + "Azure AI Searchのセマンティック・ランカーを有効にします(ユーザーのクエリに対するセマンティック類似性に基づいて検索結果をリランク付けするモデル)。", + "useSemanticCaptions": + "完全な検索結果ではなく、LLMにセマンティック・キャプションを送信します。セマンティック・キャプションは、セマンティック・ランキングの処理中に検索結果から抽出されます。", + "suggestFollowupQuestions": "ユーザーのクエリに基づいて、LLMにフォローアップの質問を提案するように問い合わせます。", + "useGPT4Vision": "インデックスから画像とテキストを利用して回答を生成するためGPT-4-Turbo with Visionを使用します。", + "vectorFields": + "Azure AI Search Index中でどの埋め込みフィールドを検索に利用するか指定します。「画像とテキスト」の両方、もしくは「画像」のみ、または「テキスト」のみのいずれかの埋め込みを指定します。", + "gpt4VisionInputs": + "ビジョンモデルに送信する内容を設定します。「画像とテキスト」は画像とテキストの両方をモデルに送信し、「画像」は画像のみを送信し、「テキスト」はテキストのみを送信します。", + "retrievalMode": + "Azure AI Searchクエリの取得モードを設定します。「ベクトル + テキスト (ハイブリッド)」はベクトル検索と全文検索の組み合わせを使用し、「ベクトル」はベクトル検索のみを使用し、「テキスト」は全文検索のみを使用します。一般的にはハイブリッド検索がお勧めです。", + "streamChat": "生成された回答をチャットUIに対して継続的にストリームで送信します。", + "useOidSecurityFilter": "認証ユーザーのOIDに基づいて検索結果をフィルタリングします。", + "useGroupsSecurityFilter": "認証ユーザーのグループに基づいて検索結果をフィルタリングします。" + } +} diff --git a/app/frontend/src/pages/ask/Ask.tsx b/app/frontend/src/pages/ask/Ask.tsx index 10f6d7e2d5..d91293267d 100644 --- a/app/frontend/src/pages/ask/Ask.tsx +++ b/app/frontend/src/pages/ask/Ask.tsx @@ -1,4 +1,6 @@ import { useContext, useEffect, useRef, useState } from "react"; +import { useTranslation } from "react-i18next"; +import { Helmet } from "react-helmet-async"; import { Checkbox, Panel, DefaultButton, Spinner, TextField, ICheckboxProps, ITextFieldProps } from "@fluentui/react"; import { useId } from "@fluentui/react-hooks"; @@ -14,12 +16,12 @@ import { SettingsButton } from "../../components/SettingsButton/SettingsButton"; import { useLogin, getToken, requireAccessControl, checkLoggedIn } from "../../authConfig"; import { VectorSettings } from "../../components/VectorSettings"; import { GPT4VSettings } from "../../components/GPT4VSettings"; -import { toolTipText } from "../../i18n/tooltips.js"; import { UploadFile } from "../../components/UploadFile"; import { useMsal } from "@azure/msal-react"; import { TokenClaimsDisplay } from "../../components/TokenClaimsDisplay"; import { LoginContext } from "../../loginContext"; +import { LanguagePicker } from "../../i18n/LanguagePicker"; export function Component(): JSX.Element { const [isConfigPanelOpen, setIsConfigPanelOpen] = useState(false); @@ -45,6 +47,7 @@ export function Component(): JSX.Element { const [showSemanticRankerOption, setShowSemanticRankerOption] = useState(false); const [showVectorOption, setShowVectorOption] = useState(false); const [showUserUpload, setShowUserUpload] = useState(false); + const [showLanguagePicker, setshowLanguagePicker] = useState(false); const [showSpeechInput, setShowSpeechInput] = useState(false); const [showSpeechOutputBrowser, setShowSpeechOutputBrowser] = useState(false); const [showSpeechOutputAzure, setShowSpeechOutputAzure] = useState(false); @@ -83,6 +86,7 @@ export function Component(): JSX.Element { setRetrievalMode(RetrievalMode.Text); } setShowUserUpload(config.showUserUpload); + setshowLanguagePicker(config.showLanguagePicker); setShowSpeechInput(config.showSpeechInput); setShowSpeechOutputBrowser(config.showSpeechOutputBrowser); setShowSpeechOutputAzure(config.showSpeechOutputAzure); @@ -129,6 +133,7 @@ export function Component(): JSX.Element { vector_fields: vectorFieldList, use_gpt4v: useGPT4V, gpt4v_input: gpt4vInput, + language: i18n.language, ...(seed !== null ? { seed: seed } : {}) } }, @@ -233,18 +238,23 @@ export function Component(): JSX.Element { const useOidSecurityFilterFieldId = useId("useOidSecurityFilterField"); const useGroupsSecurityFilterId = useId("useGroupsSecurityFilter"); const useGroupsSecurityFilterFieldId = useId("useGroupsSecurityFilterField"); + const { t, i18n } = useTranslation(); return (
    + {/* Setting the page title using react-helmet-async */} + + {t("pageTitle")} +
    {showUserUpload && } setIsConfigPanelOpen(!isConfigPanelOpen)} />
    -

    Ask your data

    +

    {t("askTitle")}

    makeApiRequest(question)} @@ -253,8 +263,13 @@ export function Component(): JSX.Element {
    - {isLoading && } - {!lastQuestionRef.current && } + {isLoading && } + {!lastQuestionRef.current && ( +
    + {showLanguagePicker && i18n.changeLanguage(newLang)} />} + +
    + )} {!isLoading && answer && !error && (
    setIsConfigPanelOpen(false)} - closeButtonAriaLabel="Close" - onRenderFooterContent={() => setIsConfigPanelOpen(false)}>Close} + closeButtonAriaLabel={t("labels.closeButton")} + onRenderFooterContent={() => setIsConfigPanelOpen(false)}>{t("labels.closeButton")}} isFooterAtBottom={true} > ( - + )} /> ( - + )} /> ( - + )} /> ( - + )} /> @@ -358,7 +373,7 @@ export function Component(): JSX.Element { ( - + )} /> )} @@ -375,7 +395,7 @@ export function Component(): JSX.Element { ( - + )} /> ( - + )} /> @@ -405,14 +430,14 @@ export function Component(): JSX.Element { id={semanticRankerFieldId} className={styles.chatSettingsSeparator} checked={useSemanticRanker} - label="Use semantic ranker for retrieval" + label={t("labels.useSemanticRanker")} onChange={onUseSemanticRankerChange} aria-labelledby={semanticRankerId} onRenderLabel={(props: ICheckboxProps | undefined) => ( )} @@ -422,7 +447,7 @@ export function Component(): JSX.Element { id={semanticCaptionsFieldId} className={styles.chatSettingsSeparator} checked={useSemanticCaptions} - label="Use semantic captions" + label={t("labels.useSemanticCaptions")} onChange={onUseSemanticCaptionsChange} disabled={!useSemanticRanker} aria-labelledby={semanticCaptionsId} @@ -430,7 +455,7 @@ export function Component(): JSX.Element { )} @@ -464,7 +489,7 @@ export function Component(): JSX.Element { id={useOidSecurityFilterFieldId} className={styles.chatSettingsSeparator} checked={useOidSecurityFilter || requireAccessControl} - label="Use oid security filter" + label={t("labels.useOidSecurityFilter")} disabled={!loggedIn || requireAccessControl} onChange={onUseOidSecurityFilterChange} aria-labelledby={useOidSecurityFilterId} @@ -472,7 +497,7 @@ export function Component(): JSX.Element { )} @@ -481,7 +506,7 @@ export function Component(): JSX.Element { id={useGroupsSecurityFilterFieldId} className={styles.chatSettingsSeparator} checked={useGroupsSecurityFilter || requireAccessControl} - label="Use groups security filter" + label={t("labels.useGroupsSecurityFilter")} disabled={!loggedIn || requireAccessControl} onChange={onUseGroupsSecurityFilterChange} aria-labelledby={useGroupsSecurityFilterId} @@ -489,7 +514,7 @@ export function Component(): JSX.Element { )} diff --git a/app/frontend/src/pages/chat/Chat.tsx b/app/frontend/src/pages/chat/Chat.tsx index 248788c5b6..001a9b8712 100644 --- a/app/frontend/src/pages/chat/Chat.tsx +++ b/app/frontend/src/pages/chat/Chat.tsx @@ -1,4 +1,6 @@ import { useRef, useState, useEffect, useContext } from "react"; +import { useTranslation } from "react-i18next"; +import { Helmet } from "react-helmet-async"; import { Checkbox, Panel, DefaultButton, TextField, ITextFieldProps, ICheckboxProps } from "@fluentui/react"; import { SparkleFilled } from "@fluentui/react-icons"; import { useId } from "@fluentui/react-hooks"; @@ -32,8 +34,8 @@ import { VectorSettings } from "../../components/VectorSettings"; import { useMsal } from "@azure/msal-react"; import { TokenClaimsDisplay } from "../../components/TokenClaimsDisplay"; import { GPT4VSettings } from "../../components/GPT4VSettings"; -import { toolTipText } from "../../i18n/tooltips.js"; import { LoginContext } from "../../loginContext"; +import { LanguagePicker } from "../../i18n/LanguagePicker"; const Chat = () => { const [isConfigPanelOpen, setIsConfigPanelOpen] = useState(false); @@ -74,6 +76,7 @@ const Chat = () => { const [showSemanticRankerOption, setShowSemanticRankerOption] = useState(false); const [showVectorOption, setShowVectorOption] = useState(false); const [showUserUpload, setShowUserUpload] = useState(false); + const [showLanguagePicker, setshowLanguagePicker] = useState(false); const [showSpeechInput, setShowSpeechInput] = useState(false); const [showSpeechOutputBrowser, setShowSpeechOutputBrowser] = useState(false); const [showSpeechOutputAzure, setShowSpeechOutputAzure] = useState(false); @@ -98,6 +101,7 @@ const Chat = () => { setRetrievalMode(RetrievalMode.Text); } setShowUserUpload(config.showUserUpload); + setshowLanguagePicker(config.showLanguagePicker); setShowSpeechInput(config.showSpeechInput); setShowSpeechOutputBrowser(config.showSpeechOutputBrowser); setShowSpeechOutputAzure(config.showSpeechOutputAzure); @@ -185,6 +189,7 @@ const Chat = () => { vector_fields: vectorFieldList, use_gpt4v: useGPT4V, gpt4v_input: gpt4vInput, + language: i18n.language, ...(seed !== null ? { seed: seed } : {}) } }, @@ -339,9 +344,14 @@ const Chat = () => { const useGroupsSecurityFilterFieldId = useId("useGroupsSecurityFilterField"); const shouldStreamId = useId("shouldStream"); const shouldStreamFieldId = useId("shouldStreamField"); + const { t, i18n } = useTranslation(); return (
    + {/* Setting the page title using react-helmet-async */} + + {t("pageTitle")} +
    {showUserUpload && } @@ -352,8 +362,10 @@ const Chat = () => { {!lastQuestionRef.current ? (
    ) : ( @@ -427,7 +439,7 @@ const Chat = () => {
    makeApiRequest(question)} showSpeechInput={showSpeechInput} @@ -447,19 +459,19 @@ const Chat = () => { )} setIsConfigPanelOpen(false)} - closeButtonAriaLabel="Close" - onRenderFooterContent={() => setIsConfigPanelOpen(false)}>Close} + closeButtonAriaLabel={t("labels.closeButton")} + onRenderFooterContent={() => setIsConfigPanelOpen(false)}>{t("labels.closeButton")}} isFooterAtBottom={true} > { )} @@ -477,7 +489,7 @@ const Chat = () => { { onChange={onTemperatureChange} aria-labelledby={temperatureId} onRenderLabel={(props: ITextFieldProps | undefined) => ( - + )} /> ( - + )} /> { onChange={onMinimumSearchScoreChange} aria-labelledby={searchScoreId} onRenderLabel={(props: ITextFieldProps | undefined) => ( - + )} /> @@ -522,7 +534,7 @@ const Chat = () => { { )} @@ -544,7 +556,7 @@ const Chat = () => { { onChange={onRetrieveCountChange} aria-labelledby={retrieveCountId} onRenderLabel={(props: ITextFieldProps | undefined) => ( - + )} /> { )} @@ -579,14 +596,14 @@ const Chat = () => { id={semanticRankerFieldId} className={styles.chatSettingsSeparator} checked={useSemanticRanker} - label="Use semantic ranker for retrieval" + label={t("labels.useSemanticRanker")} onChange={onUseSemanticRankerChange} aria-labelledby={semanticRankerId} onRenderLabel={(props: ICheckboxProps | undefined) => ( )} @@ -596,7 +613,7 @@ const Chat = () => { id={semanticCaptionsFieldId} className={styles.chatSettingsSeparator} checked={useSemanticCaptions} - label="Use semantic captions" + label={t("labels.useSemanticCaptions")} onChange={onUseSemanticCaptionsChange} disabled={!useSemanticRanker} aria-labelledby={semanticCaptionsId} @@ -604,7 +621,7 @@ const Chat = () => { )} @@ -616,14 +633,14 @@ const Chat = () => { id={suggestFollowupQuestionsFieldId} className={styles.chatSettingsSeparator} checked={useSuggestFollowupQuestions} - label="Suggest follow-up questions" + label={t("labels.useSuggestFollowupQuestions")} onChange={onUseSuggestFollowupQuestionsChange} aria-labelledby={suggestFollowupQuestionsId} onRenderLabel={(props: ICheckboxProps | undefined) => ( )} @@ -655,7 +672,7 @@ const Chat = () => { id={useOidSecurityFilterFieldId} className={styles.chatSettingsSeparator} checked={useOidSecurityFilter || requireAccessControl} - label="Use oid security filter" + label={t("labels.useOidSecurityFilter")} disabled={!loggedIn || requireAccessControl} onChange={onUseOidSecurityFilterChange} aria-labelledby={useOidSecurityFilterId} @@ -663,7 +680,7 @@ const Chat = () => { )} @@ -672,7 +689,7 @@ const Chat = () => { id={useGroupsSecurityFilterFieldId} className={styles.chatSettingsSeparator} checked={useGroupsSecurityFilter || requireAccessControl} - label="Use groups security filter" + label={t("labels.useGroupsSecurityFilter")} disabled={!loggedIn || requireAccessControl} onChange={onUseGroupsSecurityFilterChange} aria-labelledby={useGroupsSecurityFilterId} @@ -680,7 +697,7 @@ const Chat = () => { )} @@ -692,11 +709,11 @@ const Chat = () => { id={shouldStreamFieldId} className={styles.chatSettingsSeparator} checked={shouldStream} - label="Stream chat completion responses" + label={t("labels.shouldStream")} onChange={onShouldStreamChange} aria-labelledby={shouldStreamId} onRenderLabel={(props: ICheckboxProps | undefined) => ( - + )} /> diff --git a/app/frontend/src/pages/layout/Layout.tsx b/app/frontend/src/pages/layout/Layout.tsx index 94d96a127b..2086129292 100644 --- a/app/frontend/src/pages/layout/Layout.tsx +++ b/app/frontend/src/pages/layout/Layout.tsx @@ -1,6 +1,6 @@ import React, { useState, useEffect, useRef, RefObject } from "react"; import { Outlet, NavLink, Link } from "react-router-dom"; - +import { useTranslation } from "react-i18next"; import styles from "./Layout.module.css"; import { useLogin } from "../../authConfig"; @@ -9,6 +9,7 @@ import { LoginButton } from "../../components/LoginButton"; import { IconButton } from "@fluentui/react"; const Layout = () => { + const { t } = useTranslation(); const [menuOpen, setMenuOpen] = useState(false); const menuRef: RefObject = useRef(null); @@ -38,7 +39,7 @@ const Layout = () => {
    -

    Azure OpenAI + AI Search

    +

    {t("headerTitle")}

    {useLogin && } - +
    diff --git a/azure.yaml b/azure.yaml index 2d5ef5abb0..4deeba5c78 100644 --- a/azure.yaml +++ b/azure.yaml @@ -67,6 +67,7 @@ pipeline: - AZURE_COMPUTER_VISION_RESOURCE_GROUP - AZURE_COMPUTER_VISION_LOCATION - AZURE_COMPUTER_VISION_SKU + - ENABLE_LANGUAGE_PICKER - USE_SPEECH_INPUT_BROWSER - USE_SPEECH_OUTPUT_BROWSER - USE_SPEECH_OUTPUT_AZURE diff --git a/docs/customization.md b/docs/customization.md index 06628b5aeb..a43506a29d 100644 --- a/docs/customization.md +++ b/docs/customization.md @@ -17,12 +17,7 @@ The Chat App is designed to work with any PDF documents. The sample data is prov ## Customizing the UI -The frontend is built using [React](https://reactjs.org/) and [Fluent UI components](https://react.fluentui.dev/). The frontend components are stored in the `app/frontend/src` folder. The typical components you'll want to customize are: - -- `app/frontend/index.html`: To change the page title -- `app/frontend/src/pages/layout/Layout.tsx`: To change the header text and logo -- `app/frontend/src/pages/chat/Chat.tsx`: To change the large heading -- `app/frontend/src/components/Example/ExampleList.tsx`: To change the example questions +The frontend is built using [React](https://reactjs.org/) and [Fluent UI components](https://react.fluentui.dev/). The frontend components are stored in the `app/frontend/src` folder. To modify the page title, header text, example questions, and other UI elements, you can customize the `app/frontend/src/locales/{en/es/fr/jp}/translation.json` file for different languages(English is the default). The primary strings and labels used throughout the application are defined within these files. ## Customizing the backend diff --git a/docs/deploy_features.md b/docs/deploy_features.md index 30a232fe05..e4628b1b80 100644 --- a/docs/deploy_features.md +++ b/docs/deploy_features.md @@ -7,6 +7,7 @@ You should typically enable these features before running `azd up`. Once you've * [Using GPT-4](#using-gpt-4) * [Using text-embedding-3 models](#using-text-embedding-3-models) * [Enabling GPT-4 Turbo with Vision](#enabling-gpt-4-turbo-with-vision) +* [Enabling language picker](#enabling-language-picker) * [Enabling speech input/output](#enabling-speech-inputoutput) * [Enabling Integrated Vectorization](#enabling-integrated-vectorization) * [Enabling authentication](#enabling-authentication) @@ -141,6 +142,16 @@ If you have already deployed: This section covers the integration of GPT-4 Vision with Azure AI Search. Learn how to enhance your search capabilities with the power of image and text indexing, enabling advanced search functionalities over diverse document types. For a detailed guide on setup and usage, visit our [Enabling GPT-4 Turbo with Vision](gpt4v.md) page. +## Enabling language picker + +You can optionally enable the language picker to allow users to switch between different languages. Currently, it supports English, Spanish, French, and Japanese. + +To add support for additional languages, create new locale files and update `app/frontend/src/i18n/config.ts` accordingly. To enable language picker, run: + +```shell +azd env set ENABLE_LANGUAGE_PICKER true +``` + ## Enabling speech input/output [📺 Watch a short video of speech input/output](https://www.youtube.com/watch?v=BwiHUjlLY_U) diff --git a/infra/main.bicep b/infra/main.bicep index c3c4adbbab..f64c80321e 100644 --- a/infra/main.bicep +++ b/infra/main.bicep @@ -160,6 +160,8 @@ param principalId string = '' @description('Use Application Insights for monitoring and performance tracing') param useApplicationInsights bool = false +@description('Enable language picker') +param enableLanguagePicker bool = false @description('Use speech recognition feature in browser') param useSpeechInputBrowser bool = false @description('Use speech synthesis in browser') @@ -296,6 +298,7 @@ module backend 'core/host/appservice.bicep' = { APPLICATIONINSIGHTS_CONNECTION_STRING: useApplicationInsights ? monitoring.outputs.applicationInsightsConnectionString : '' AZURE_SPEECH_SERVICE_ID: useSpeechOutputAzure ? speech.outputs.resourceId : '' AZURE_SPEECH_SERVICE_LOCATION: useSpeechOutputAzure ? speech.outputs.location : '' + ENABLE_LANGUAGE_PICKER: enableLanguagePicker USE_SPEECH_INPUT_BROWSER: useSpeechInputBrowser USE_SPEECH_OUTPUT_BROWSER: useSpeechOutputBrowser USE_SPEECH_OUTPUT_AZURE: useSpeechOutputAzure diff --git a/infra/main.parameters.json b/infra/main.parameters.json index 4b6d9c97ed..023cea7604 100644 --- a/infra/main.parameters.json +++ b/infra/main.parameters.json @@ -152,6 +152,9 @@ "useGPT4V": { "value": "${USE_GPT4V=false}" }, + "enableLanguagePicker": { + "value": "${ENABLE_LANGUAGE_PICKER=false}" + }, "useSpeechInputBrowser": { "value": "${USE_SPEECH_INPUT_BROWSER=false}" }, diff --git a/infra/main.test.bicep b/infra/main.test.bicep index f66317e44d..5195aaa907 100644 --- a/infra/main.test.bicep +++ b/infra/main.test.bicep @@ -29,6 +29,7 @@ module main 'main.bicep' = { useApplicationInsights: false useVectors: true useGPT4V: false + enableLanguagePicker: false useSpeechInputBrowser: false useSpeechOutputBrowser: false diff --git a/requirements-dev.txt b/requirements-dev.txt index d5933e00da..d115c1000a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,4 +12,4 @@ pytest-snapshot pre-commit locust pip-tools -mypy +mypy==1.10.1 diff --git a/tests/conftest.py b/tests/conftest.py index a607c7764f..19a403fdff 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -300,6 +300,7 @@ def mock_env(monkeypatch, request): monkeypatch.setenv("AZURE_STORAGE_CONTAINER", "test-storage-container") monkeypatch.setenv("AZURE_STORAGE_RESOURCE_GROUP", "test-storage-rg") monkeypatch.setenv("AZURE_SUBSCRIPTION_ID", "test-storage-subid") + monkeypatch.setenv("ENABLE_LANGUAGE_PICKER", "true") monkeypatch.setenv("USE_SPEECH_INPUT_BROWSER", "true") monkeypatch.setenv("USE_SPEECH_OUTPUT_AZURE", "true") monkeypatch.setenv("AZURE_SEARCH_INDEX", "test-search-index") diff --git a/tests/e2e.py b/tests/e2e.py index ca4aa0612d..b176d6f6d0 100644 --- a/tests/e2e.py +++ b/tests/e2e.py @@ -48,6 +48,7 @@ def run_server(port: int): "AZURE_STORAGE_CONTAINER": "test-storage-container", "AZURE_STORAGE_RESOURCE_GROUP": "test-storage-rg", "AZURE_SUBSCRIPTION_ID": "test-storage-subid", + "ENABLE_LANGUAGE_PICKER": "false", "USE_SPEECH_INPUT_BROWSER": "false", "USE_SPEECH_OUTPUT_AZURE": "false", "AZURE_SEARCH_INDEX": "test-search-index",