diff --git a/app/backend/app.py b/app/backend/app.py index 5ae60e289a..a1f1e3a820 100644 --- a/app/backend/app.py +++ b/app/backend/app.py @@ -1,4 +1,5 @@ import dataclasses +import datetime import io import json import logging @@ -22,6 +23,8 @@ from azure.search.documents.indexes.aio import SearchIndexClient from azure.storage.blob.aio import ContainerClient from azure.storage.blob.aio import StorageStreamDownloader as BlobDownloader +from azure.storage.blob.aio import BlobServiceClient +from azure.storage.blob import generate_blob_sas, BlobSasPermissions from azure.storage.filedatalake.aio import FileSystemClient from azure.storage.filedatalake.aio import StorageStreamDownloader as DatalakeDownloader from openai import AsyncAzureOpenAI, AsyncOpenAI @@ -72,6 +75,7 @@ CONFIG_USER_BLOB_CONTAINER_CLIENT, CONFIG_USER_UPLOAD_ENABLED, CONFIG_VECTOR_SEARCH_ENABLED, + CONFIG_BLOB_SERVICE_CLIENT ) from core.authentication import AuthenticationHelper from decorators import authenticated, authenticated_path @@ -232,6 +236,11 @@ async def chat_stream(auth_claims: Dict[str, Any]): context = request_json.get("context", {}) context["auth_claims"] = auth_claims try: + # Extract the image URL from the request_json + image_url = request_json.get("image") + if image_url: + #context["image_url"] = image_url + context.setdefault("overrides", {})["image_url"] = image_url use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False) approach: Approach if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config: @@ -242,7 +251,7 @@ async def chat_stream(auth_claims: Dict[str, Any]): result = await approach.run_stream( request_json["messages"], context=context, - session_state=request_json.get("session_state"), + session_state=request_json.get("session_state") ) response = await make_response(format_as_ndjson(result)) response.timeout = None # type: ignore @@ -346,6 +355,50 @@ async def upload(auth_claims: dict[str, Any]): await ingester.add_file(File(content=file_io, acls={"oids": [user_oid]}, url=file_client.url)) return jsonify({"message": "File uploaded successfully"}), 200 +@bp.post("/upload_new") +async def upload_new(): + request_files = await request.files + if "file" not in request_files: + # If no files were included in the request, return an error response + return jsonify({"message": "No file part in the request", "status": "failed"}), 400 + + file = request_files.getlist("file")[0] + blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] + + # Use a generic blob client instead of user-specific directory + blob_client = blob_container_client.get_blob_client(file.filename) + + file_io = file + file_io.name = file.filename + file_io = io.BufferedReader(file_io) + + # Upload the file without user-specific metadata + await blob_client.upload_blob(file_io, overwrite=True) + file_io.seek(0) + + start_time = datetime.datetime.now(datetime.timezone.utc) + expiry_time = start_time + datetime.timedelta(days=1) + + blob_service_client: BlobServiceClient = current_app.config[CONFIG_BLOB_SERVICE_CLIENT] + # Generate User Delegation Key + user_delegation_key = await blob_service_client.get_user_delegation_key( + key_start_time=start_time, + key_expiry_time=expiry_time + ) + + # Generate SAS URL + sas_token = generate_blob_sas( + account_name=blob_service_client.account_name, + container_name=blob_client.container_name, + blob_name=blob_client.blob_name, + user_delegation_key=user_delegation_key, + permission=BlobSasPermissions(read=True), + expiry=expiry_time + ) + sas_url = f"{blob_client.url}?{sas_token}" + print("sas url is ", sas_url) + + return jsonify({"message": "File uploaded successfully", "file_url": sas_url}), 200 @bp.post("/delete_uploaded") @authenticated @@ -450,6 +503,8 @@ async def setup_clients(): f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", AZURE_STORAGE_CONTAINER, credential=azure_credential ) + blob_service_client = BlobServiceClient(f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential) + # Set up authentication helper search_index = None if AZURE_USE_AUTHENTICATION: @@ -570,6 +625,7 @@ async def setup_clients(): current_app.config[CONFIG_OPENAI_CLIENT] = openai_client current_app.config[CONFIG_SEARCH_CLIENT] = search_client current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client + current_app.config[CONFIG_BLOB_SERVICE_CLIENT] = blob_service_client current_app.config[CONFIG_AUTH_CLIENT] = auth_helper current_app.config[CONFIG_GPT4V_DEPLOYED] = bool(USE_GPT4V) diff --git a/app/backend/approaches/chatapproach.py b/app/backend/approaches/chatapproach.py index ea1857da3b..2a01a312ee 100644 --- a/app/backend/approaches/chatapproach.py +++ b/app/backend/approaches/chatapproach.py @@ -9,11 +9,15 @@ class ChatApproach(Approach, ABC): - query_prompt_few_shots: list[ChatCompletionMessageParam] = [ - {"role": "user", "content": "How did crypto do last year?"}, - {"role": "assistant", "content": "Summarize Cryptocurrency Market Dynamics from last year"}, - {"role": "user", "content": "What are my health plans?"}, - {"role": "assistant", "content": "Show available health plans"}, + query_prompt_few_shots = [ + {"role": "user", "content": "What's the best way to control weeds in my cornfield?"}, + {"role": "assistant", "content": "Recommendations for weed control in cornfields"}, + {"role": "user", "content": "How can I improve soil fertility for better crop yields?"}, + {"role": "assistant", "content": "Strategies to enhance soil fertility and increase crop yields"}, + {"role": "user", "content": "What's the most effective method for pest management in soybean crops?"}, + {"role": "assistant", "content": "Integrated pest management techniques for soybean crops"}, + {"role": "user", "content": "Which fertilizer should I use for my wheat fields?"}, + {"role": "assistant", "content": "Recommendations for fertilizers suitable for wheat cultivation"}, ] NO_RESPONSE = "0" diff --git a/app/backend/approaches/chatreadretrieveread.py b/app/backend/approaches/chatreadretrieveread.py index e2da9eb7f6..4fd4882afe 100644 --- a/app/backend/approaches/chatreadretrieveread.py +++ b/app/backend/approaches/chatreadretrieveread.py @@ -55,10 +55,13 @@ def __init__( @property def system_message_chat_conversation(self): - return """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers. - Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. + return """Assistant helps the Agronomist who will get queries from growers about various questions that growers would have about their fields, + agro-chemicals, pest control, nutrients, fertilizers etc. The job of agronomist is to use this knowledge base of documents and answer based on it. + Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question. For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question. - Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. + Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. + Use square brackets to reference the source, for example [info1.txt]. + Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]. {follow_up_questions_prompt} {injected_prompt} """ diff --git a/app/backend/approaches/chatreadretrievereadvision.py b/app/backend/approaches/chatreadretrievereadvision.py index 4e587e5803..a3b90dc62e 100644 --- a/app/backend/approaches/chatreadretrievereadvision.py +++ b/app/backend/approaches/chatreadretrievereadvision.py @@ -1,8 +1,11 @@ +import json from typing import Any, Awaitable, Callable, Coroutine, Optional, Union +import requests + from azure.search.documents.aio import SearchClient from azure.storage.blob.aio import ContainerClient -from openai import AsyncOpenAI, AsyncStream +from openai import AsyncOpenAI, AsyncStream, AsyncAzureOpenAI from openai.types.chat import ( ChatCompletion, ChatCompletionChunk, @@ -17,7 +20,6 @@ from core.authentication import AuthenticationHelper from core.imageshelper import fetch_image - class ChatReadRetrieveReadVisionApproach(ChatApproach): """ A multi-step approach that first uses OpenAI to turn the user's question into a search query, @@ -68,7 +70,8 @@ def __init__( @property def system_message_chat_conversation(self): return """ - You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. + Assistant helps the Agronomist who will get queries from growers about various questions that growers would have about their fields, + agro-chemicals, pest control, nutrients, fertilizers etc. The job of agronomist is to use this knowledge base of documents and answer based on it., The documents contain text, graphs, tables and images. Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName: Each text source starts in a new line and has the file name followed by colon and the actual information Always include the source name from the image or text for each fact you use in the response in the format: [filename] @@ -81,6 +84,54 @@ def system_message_chat_conversation(self): {follow_up_questions_prompt} {injected_prompt} """ + async def analyze_image(self, image_url: str) -> str: + import requests + from io import BytesIO + from PIL import Image + import base64 + # Step 1: Fetch the image from the URL + response = requests.get(image_url) + encoded_image = "" + # Step 2: Ensure the request was successful + if response.status_code == 200: + # Step 3: Load the image into memory + image_data = BytesIO(response.content) + + # Step 4: Convert image to base64 + # If you need the image in Base64 format + encoded_image = base64.b64encode(response.content).decode('utf-8') + + #print("Base64 Encoded Image:") + #print(encoded_image) + else: + print(f"Failed to retrieve image. Status code: {response.status_code}") + + """Analyze the image using Azure Computer Vision API for plant diseases.""" + deployment_name = self.gpt4v_deployment + response = await self.openai_client.chat.completions.create( + model=deployment_name, + messages=[ + { "role": "system", "content": "You are an expert agronomist specializing in plant pathology and crop health. Your task is to analyze images of agricultural crops, focusing on corn, soybean, wheat, and other common global crops. Provide precise identifications of plant health issues, diseases, pest damage, nutrient deficiencies, or environmental stress." }, + { "role": "user", "content": [ + { + "type": "text", + "text": "Analyze the provided image and respond with the following information:\n1. Crop Identification: Specify the crop (e.g., corn, soybean, wheat).\n2. Plant Part: Identify the part of the plant shown (e.g., leaf, stem, root, fruit).\n3. Health Status: State whether the plant appears healthy or shows signs of issues.\n4. If issues are present, provide:\n a) Primary Condition: The most prominent disease, pest, or deficiency.\n b) Secondary Conditions: Any other noticeable issues.\n c) Severity: Estimate the severity as mild, moderate, or severe.\n5. Key Visual Indicators: List 2-3 key visual cues that led to your diagnosis.\n\nRespond in a structured format suitable for database querying. If the image is unclear or not plant-related, state 'Image unclear or not plant-related'. If you cannot confidently identify an issue, state 'Unable to determine specific condition'." + }, + { + "type": "image_url", + "image_url": { + "url": "data:image/jpeg;base64," + encoded_image + } + } + ] } + ], + max_tokens=2000 + ) + # Access the response + assistant_response = response.choices[0].message.content + print(assistant_response) + return assistant_response + async def run_until_final_call( self, @@ -108,8 +159,16 @@ async def run_until_final_call( raise ValueError("The most recent message content must be a string.") past_messages: list[ChatCompletionMessageParam] = messages[:-1] + print("Override is ", overrides) # STEP 1: Generate an optimized keyword search query based on the chat history and the last question - user_query_request = "Generate search query for: " + original_user_query + #user_query_request = "Generate search query for: " + original_user_query + #image_analysis = await self.analyze_image("https://stv3od7n6qiv4m2.blob.core.windows.net/content/corn-BLS-irregular-lesions.jpg?sp=r&st=2024-09-10T14:45:19Z&se=2024-09-10T22:45:19Z&skoid=44d37ec7-fae7-4a3b-9f41-b8f3539805da&sktid=c6c1e9da-5d0c-4f8f-9a02-3c67206efbd6&skt=2024-09-10T14:45:19Z&ske=2024-09-10T22:45:19Z&sks=b&skv=2022-11-02&spr=https&sv=2022-11-02&sr=b&sig=wn6yZRtzLkEPun65ed7BPxzU3bhkBOY9KJ5j%2F0B8aEE%3D") + image_received = overrides.get("image_url", "") + print("Image received in vision method is ", image_received) + image_analysis = await self.analyze_image(image_received) + #image_analysis = await self.analyze_image("https://stv3od7n6qiv4m2.blob.core.windows.net/content/beacterial-leaf-streak.jfif?sp=r&st=2024-09-10T17:27:30Z&se=2024-09-11T01:27:30Z&skoid=44d37ec7-fae7-4a3b-9f41-b8f3539805da&sktid=c6c1e9da-5d0c-4f8f-9a02-3c67206efbd6&skt=2024-09-10T17:27:30Z&ske=2024-09-11T01:27:30Z&sks=b&skv=2022-11-02&spr=https&sv=2022-11-02&sr=b&sig=7LG3gwgQ9vmu3pVt8Vb1Exh1EGVMkAR3QC%2Fc2BLH8Co%3D") + user_query_request = f"Generate search query for: {image_analysis}" + print(user_query_request) query_response_token_limit = 100 query_model = self.chatgpt_model @@ -132,7 +191,7 @@ async def run_until_final_call( seed=seed, ) - query_text = self.get_search_query(chat_completion, original_user_query) + query_text = self.get_search_query(chat_completion, image_analysis) # STEP 2: Retrieve relevant documents from the search index with the GPT optimized query @@ -170,7 +229,7 @@ async def run_until_final_call( self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "", ) - user_content: list[ChatCompletionContentPartParam] = [{"text": original_user_query, "type": "text"}] + user_content: list[ChatCompletionContentPartParam] = [{"text": image_analysis, "type": "text"}] image_list: list[ChatCompletionContentPartImageParam] = [] if send_text_to_gptvision: @@ -181,6 +240,8 @@ async def run_until_final_call( if url: image_list.append({"image_url": url, "type": "image_url"}) user_content.extend(image_list) + if image_analysis: + image_list.append({"image_url": image_analysis, "type": "image_url"}) response_token_limit = 1024 messages = build_messages( diff --git a/app/backend/approaches/retrievethenread.py b/app/backend/approaches/retrievethenread.py index 9a5b11c327..37f8f578f5 100644 --- a/app/backend/approaches/retrievethenread.py +++ b/app/backend/approaches/retrievethenread.py @@ -18,7 +18,8 @@ class RetrieveThenReadApproach(Approach): """ system_chat_template = ( - "You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. " + "You are an intelligent assistant helping agronomists who will ask questions on behalf of farmers with their " + + "agriculture products like herbicies, fungicides, pesticides etc questions from a product book. " + "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. " + "Answer the following question using only the data provided in the sources below. " + "For tabular information return it as an html table. Do not return markdown format. " diff --git a/app/backend/approaches/retrievethenreadvision.py b/app/backend/approaches/retrievethenreadvision.py index 3c09a0bd83..20949b6719 100644 --- a/app/backend/approaches/retrievethenreadvision.py +++ b/app/backend/approaches/retrievethenreadvision.py @@ -23,7 +23,8 @@ class RetrieveThenReadVisionApproach(Approach): """ system_chat_template_gpt4v = ( - "You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. " + "You are an intelligent assistant helping agronomists who will ask questions on behalf of farmers with their " + + "agriculture products like herbicies, fungicides, pesticides etc questions from a product book., The documents contain text, graphs, tables and images. " + "Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName: " + "Each text source starts in a new line and has the file name followed by colon and the actual information " + "Always include the source name from the image or text for each fact you use in the response in the format: [filename] " diff --git a/app/backend/config.py b/app/backend/config.py index da076bad1d..85caed9d6b 100644 --- a/app/backend/config.py +++ b/app/backend/config.py @@ -7,6 +7,7 @@ CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client" CONFIG_USER_UPLOAD_ENABLED = "user_upload_enabled" CONFIG_USER_BLOB_CONTAINER_CLIENT = "user_blob_container_client" +CONFIG_BLOB_SERVICE_CLIENT = "config_blob_service_client" CONFIG_AUTH_CLIENT = "auth_client" CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed" CONFIG_SEMANTIC_RANKER_DEPLOYED = "semantic_ranker_deployed" diff --git a/app/backend/requirements.in b/app/backend/requirements.in index 9a7e1b4c6d..198150d268 100644 --- a/app/backend/requirements.in +++ b/app/backend/requirements.in @@ -29,3 +29,4 @@ beautifulsoup4 types-beautifulsoup4 msgraph-sdk==1.1.0 openai-messages-token-helper +requests_toolbelt \ No newline at end of file diff --git a/app/frontend/index.html b/app/frontend/index.html index 30205db90f..a606c6fda7 100644 --- a/app/frontend/index.html +++ b/app/frontend/index.html @@ -4,7 +4,7 @@ - Azure OpenAI + AI Search + AgriVisor - Agronomy Advisor
diff --git a/app/frontend/src/api/models.ts b/app/frontend/src/api/models.ts index 3ac5590bf5..84e62f817e 100644 --- a/app/frontend/src/api/models.ts +++ b/app/frontend/src/api/models.ts @@ -77,6 +77,7 @@ export type ChatAppRequest = { messages: ResponseMessage[]; context?: ChatAppRequestContext; session_state: any; + image?: string; }; export type Config = { diff --git a/app/frontend/src/components/Answer/AnswerLoading.tsx b/app/frontend/src/components/Answer/AnswerLoading.tsx index 6df192a636..1c75222bc4 100644 --- a/app/frontend/src/components/Answer/AnswerLoading.tsx +++ b/app/frontend/src/components/Answer/AnswerLoading.tsx @@ -16,7 +16,7 @@ export const AnswerLoading = () => {

- Generating answer + Pondering deeply about your question

diff --git a/app/frontend/src/components/Example/ExampleList.tsx b/app/frontend/src/components/Example/ExampleList.tsx index 49c35cbd2d..ca21cbc8c3 100644 --- a/app/frontend/src/components/Example/ExampleList.tsx +++ b/app/frontend/src/components/Example/ExampleList.tsx @@ -3,15 +3,15 @@ import { Example } from "./Example"; import styles from "./Example.module.css"; const DEFAULT_EXAMPLES: string[] = [ - "What is included in my Northwind Health Plus plan that is not in standard?", - "What happens in a performance review?", - "What does a Product Manager do?" + "How to improve quality of farm soil specifically for cultivating corn?", + "What are the benefits of using zinc seed treatments for corn cultivation?", + "What factors should be considered for optimal Soybean cultivation?" ]; const GPT4V_EXAMPLES: string[] = [ - "Compare the impact of interest rates and GDP in financial markets.", - "What is the expected trend for the S&P 500 index over the next five years? Compare it to the past S&P 500 performance", - "Can you identify any correlation between oil prices and stock market trends?" + "How to improve quality of farm soil specifically for cultivating corn?", + "What are the benefits of using zinc seed treatments for corn cultivation?", + "What factors should be considered for optimal Soybean cultivation?" ]; interface Props { diff --git a/app/frontend/src/components/QuestionInput/QuestionInput.tsx b/app/frontend/src/components/QuestionInput/QuestionInput.tsx index c253717fba..1a5fb8927b 100644 --- a/app/frontend/src/components/QuestionInput/QuestionInput.tsx +++ b/app/frontend/src/components/QuestionInput/QuestionInput.tsx @@ -1,7 +1,7 @@ -import { useState, useEffect, useContext } from "react"; +import { useState, useEffect, useContext, useRef } from "react"; import { Stack, TextField } from "@fluentui/react"; import { Button, Tooltip } from "@fluentui/react-components"; -import { Send28Filled } from "@fluentui/react-icons"; +import { Send28Filled, Image24Regular } from "@fluentui/react-icons"; import styles from "./QuestionInput.module.css"; import { SpeechInput } from "./SpeechInput"; @@ -9,7 +9,7 @@ import { LoginContext } from "../../loginContext"; import { requireLogin } from "../../authConfig"; interface Props { - onSend: (question: string) => void; + onSend: (question: string, image?: string) => void; // URL of the image updated with the question disabled: boolean; initQuestion?: string; placeholder?: string; @@ -21,6 +21,9 @@ export const QuestionInput = ({ onSend, disabled, placeholder, clearOnSend, init const [question, setQuestion] = useState(""); const { loggedIn } = useContext(LoginContext); const [isComposing, setIsComposing] = useState(false); + const [selectedFile, setSelectedFile] = useState(null); // New state for file + const fileInputRef = useRef(null); + const [uploadeFileUrl, setUploadedFileUrl] = useState(undefined); useEffect(() => { initQuestion && setQuestion(initQuestion); @@ -31,10 +34,13 @@ export const QuestionInput = ({ onSend, disabled, placeholder, clearOnSend, init return; } - onSend(question); + console.log("Uploaded file url we are sending is ...", uploadeFileUrl); + onSend(question, uploadeFileUrl); // Pass the selected file or undefined if (clearOnSend) { setQuestion(""); + setSelectedFile(null); // Clear the file on send + setUploadedFileUrl(undefined); } }; @@ -69,6 +75,41 @@ export const QuestionInput = ({ onSend, disabled, placeholder, clearOnSend, init placeholder = "Please login to continue..."; } + const handleImageUpload = () => { + fileInputRef.current?.click(); + }; + + const onFileChange = async (event: React.ChangeEvent) => { + const file = event.target.files?.[0]; + if (file) { + setSelectedFile(file); + const formData = new FormData(); + formData.append("file", file); + + try { + const response = await fetch("/upload_new", { + method: "POST", + body: formData + }); + + if (response.ok) { + const result = await response.json(); + console.log("File uploaded successfully:", result.message); + // You might want to update the UI to show the uploaded image name + console.log("Blob url received is ", result.file_url); + setUploadedFileUrl(result.file_url); + console.log("Set varaible value is ", uploadeFileUrl); + } else { + console.error("File upload failed"); + setUploadedFileUrl(undefined); + } + } catch (error) { + console.error("Error uploading file:", error); + setUploadedFileUrl(undefined); + } + } + }; + return ( + {/* New file input */}
- {isLoading && } + {isLoading && } {!lastQuestionRef.current && } {!isLoading && answer && !error && (
diff --git a/app/frontend/src/pages/chat/Chat.tsx b/app/frontend/src/pages/chat/Chat.tsx index 99725263ab..ab95618f64 100644 --- a/app/frontend/src/pages/chat/Chat.tsx +++ b/app/frontend/src/pages/chat/Chat.tsx @@ -140,7 +140,7 @@ const Chat = () => { const client = useLogin ? useMsal().instance : undefined; const { loggedIn } = useContext(LoginContext); - const makeApiRequest = async (question: string) => { + const makeApiRequest = async (question: string, image?: string) => { lastQuestionRef.current = question; error && setError(undefined); @@ -179,9 +179,10 @@ const Chat = () => { } }, // AI Chat Protocol: Client must pass on any session state received from the server - session_state: answers.length ? answers[answers.length - 1][1].session_state : null + session_state: answers.length ? answers[answers.length - 1][1].session_state : null, + image: image }; - + console.log(request); const response = await chatApi(request, shouldStream, token); if (!response.body) { throw Error("No response body"); @@ -353,8 +354,8 @@ const Chat = () => { {!lastQuestionRef.current ? (
) : ( @@ -426,9 +427,9 @@ const Chat = () => {
makeApiRequest(question)} + onSend={(question, image) => makeApiRequest(question, image)} showSpeechInput={showSpeechInput} />
diff --git a/app/frontend/src/pages/layout/Layout.module.css b/app/frontend/src/pages/layout/Layout.module.css index baa8951d56..0ac7f8f6dc 100644 --- a/app/frontend/src/pages/layout/Layout.module.css +++ b/app/frontend/src/pages/layout/Layout.module.css @@ -5,7 +5,7 @@ } .header { - background-color: #222222; + background-color: #16a34a; color: #f2f2f2; } @@ -39,7 +39,7 @@ z-index: 100; display: none; flex-direction: column; - background-color: #222222; + background-color: #16a34a; position: absolute; top: 2.7rem; right: 0; diff --git a/app/frontend/src/pages/layout/Layout.tsx b/app/frontend/src/pages/layout/Layout.tsx index 94d96a127b..7e065a17ae 100644 --- a/app/frontend/src/pages/layout/Layout.tsx +++ b/app/frontend/src/pages/layout/Layout.tsx @@ -38,7 +38,7 @@ const Layout = () => {
-

Azure OpenAI + AI Search

+

AgriVisor - Agronomy Advisor