Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 57 additions & 1 deletion app/backend/app.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import dataclasses
import datetime
import io
import json
import logging
Expand All @@ -22,6 +23,8 @@
from azure.search.documents.indexes.aio import SearchIndexClient
from azure.storage.blob.aio import ContainerClient
from azure.storage.blob.aio import StorageStreamDownloader as BlobDownloader
from azure.storage.blob.aio import BlobServiceClient
from azure.storage.blob import generate_blob_sas, BlobSasPermissions
from azure.storage.filedatalake.aio import FileSystemClient
from azure.storage.filedatalake.aio import StorageStreamDownloader as DatalakeDownloader
from openai import AsyncAzureOpenAI, AsyncOpenAI
Expand Down Expand Up @@ -72,6 +75,7 @@
CONFIG_USER_BLOB_CONTAINER_CLIENT,
CONFIG_USER_UPLOAD_ENABLED,
CONFIG_VECTOR_SEARCH_ENABLED,
CONFIG_BLOB_SERVICE_CLIENT
)
from core.authentication import AuthenticationHelper
from decorators import authenticated, authenticated_path
Expand Down Expand Up @@ -232,6 +236,11 @@ async def chat_stream(auth_claims: Dict[str, Any]):
context = request_json.get("context", {})
context["auth_claims"] = auth_claims
try:
# Extract the image URL from the request_json
image_url = request_json.get("image")
if image_url:
#context["image_url"] = image_url
context.setdefault("overrides", {})["image_url"] = image_url
use_gpt4v = context.get("overrides", {}).get("use_gpt4v", False)
approach: Approach
if use_gpt4v and CONFIG_CHAT_VISION_APPROACH in current_app.config:
Expand All @@ -242,7 +251,7 @@ async def chat_stream(auth_claims: Dict[str, Any]):
result = await approach.run_stream(
request_json["messages"],
context=context,
session_state=request_json.get("session_state"),
session_state=request_json.get("session_state")
)
response = await make_response(format_as_ndjson(result))
response.timeout = None # type: ignore
Expand Down Expand Up @@ -346,6 +355,50 @@ async def upload(auth_claims: dict[str, Any]):
await ingester.add_file(File(content=file_io, acls={"oids": [user_oid]}, url=file_client.url))
return jsonify({"message": "File uploaded successfully"}), 200

@bp.post("/upload_new")
async def upload_new():
request_files = await request.files
if "file" not in request_files:
# If no files were included in the request, return an error response
return jsonify({"message": "No file part in the request", "status": "failed"}), 400

file = request_files.getlist("file")[0]
blob_container_client: ContainerClient = current_app.config[CONFIG_BLOB_CONTAINER_CLIENT]

# Use a generic blob client instead of user-specific directory
blob_client = blob_container_client.get_blob_client(file.filename)

file_io = file
file_io.name = file.filename
file_io = io.BufferedReader(file_io)

# Upload the file without user-specific metadata
await blob_client.upload_blob(file_io, overwrite=True)
file_io.seek(0)

start_time = datetime.datetime.now(datetime.timezone.utc)
expiry_time = start_time + datetime.timedelta(days=1)

blob_service_client: BlobServiceClient = current_app.config[CONFIG_BLOB_SERVICE_CLIENT]
# Generate User Delegation Key
user_delegation_key = await blob_service_client.get_user_delegation_key(
key_start_time=start_time,
key_expiry_time=expiry_time
)

# Generate SAS URL
sas_token = generate_blob_sas(
account_name=blob_service_client.account_name,
container_name=blob_client.container_name,
blob_name=blob_client.blob_name,
user_delegation_key=user_delegation_key,
permission=BlobSasPermissions(read=True),
expiry=expiry_time
)
sas_url = f"{blob_client.url}?{sas_token}"
print("sas url is ", sas_url)

return jsonify({"message": "File uploaded successfully", "file_url": sas_url}), 200

@bp.post("/delete_uploaded")
@authenticated
Expand Down Expand Up @@ -450,6 +503,8 @@ async def setup_clients():
f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", AZURE_STORAGE_CONTAINER, credential=azure_credential
)

blob_service_client = BlobServiceClient(f"https://{AZURE_STORAGE_ACCOUNT}.blob.core.windows.net", credential=azure_credential)

# Set up authentication helper
search_index = None
if AZURE_USE_AUTHENTICATION:
Expand Down Expand Up @@ -570,6 +625,7 @@ async def setup_clients():
current_app.config[CONFIG_OPENAI_CLIENT] = openai_client
current_app.config[CONFIG_SEARCH_CLIENT] = search_client
current_app.config[CONFIG_BLOB_CONTAINER_CLIENT] = blob_container_client
current_app.config[CONFIG_BLOB_SERVICE_CLIENT] = blob_service_client
current_app.config[CONFIG_AUTH_CLIENT] = auth_helper

current_app.config[CONFIG_GPT4V_DEPLOYED] = bool(USE_GPT4V)
Expand Down
14 changes: 9 additions & 5 deletions app/backend/approaches/chatapproach.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,15 @@


class ChatApproach(Approach, ABC):
query_prompt_few_shots: list[ChatCompletionMessageParam] = [
{"role": "user", "content": "How did crypto do last year?"},
{"role": "assistant", "content": "Summarize Cryptocurrency Market Dynamics from last year"},
{"role": "user", "content": "What are my health plans?"},
{"role": "assistant", "content": "Show available health plans"},
query_prompt_few_shots = [
{"role": "user", "content": "What's the best way to control weeds in my cornfield?"},
{"role": "assistant", "content": "Recommendations for weed control in cornfields"},
{"role": "user", "content": "How can I improve soil fertility for better crop yields?"},
{"role": "assistant", "content": "Strategies to enhance soil fertility and increase crop yields"},
{"role": "user", "content": "What's the most effective method for pest management in soybean crops?"},
{"role": "assistant", "content": "Integrated pest management techniques for soybean crops"},
{"role": "user", "content": "Which fertilizer should I use for my wheat fields?"},
{"role": "assistant", "content": "Recommendations for fertilizers suitable for wheat cultivation"},
]
NO_RESPONSE = "0"

Expand Down
9 changes: 6 additions & 3 deletions app/backend/approaches/chatreadretrieveread.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,13 @@ def __init__(

@property
def system_message_chat_conversation(self):
return """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
return """Assistant helps the Agronomist who will get queries from growers about various questions that growers would have about their fields,
agro-chemicals, pest control, nutrients, fertilizers etc. The job of agronomist is to use this knowledge base of documents and answer based on it.
Be brief in your answers. Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf].
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response.
Use square brackets to reference the source, for example [info1.txt].
Don't combine sources, list each source separately, for example [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
Expand Down
73 changes: 67 additions & 6 deletions app/backend/approaches/chatreadretrievereadvision.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import json
from typing import Any, Awaitable, Callable, Coroutine, Optional, Union

import requests

from azure.search.documents.aio import SearchClient
from azure.storage.blob.aio import ContainerClient
from openai import AsyncOpenAI, AsyncStream
from openai import AsyncOpenAI, AsyncStream, AsyncAzureOpenAI
from openai.types.chat import (
ChatCompletion,
ChatCompletionChunk,
Expand All @@ -17,7 +20,6 @@
from core.authentication import AuthenticationHelper
from core.imageshelper import fetch_image


class ChatReadRetrieveReadVisionApproach(ChatApproach):
"""
A multi-step approach that first uses OpenAI to turn the user's question into a search query,
Expand Down Expand Up @@ -68,7 +70,8 @@ def __init__(
@property
def system_message_chat_conversation(self):
return """
You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images.
Assistant helps the Agronomist who will get queries from growers about various questions that growers would have about their fields,
agro-chemicals, pest control, nutrients, fertilizers etc. The job of agronomist is to use this knowledge base of documents and answer based on it., The documents contain text, graphs, tables and images.
Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name>
Each text source starts in a new line and has the file name followed by colon and the actual information
Always include the source name from the image or text for each fact you use in the response in the format: [filename]
Expand All @@ -81,6 +84,54 @@ def system_message_chat_conversation(self):
{follow_up_questions_prompt}
{injected_prompt}
"""
async def analyze_image(self, image_url: str) -> str:
import requests
from io import BytesIO
from PIL import Image
import base64
# Step 1: Fetch the image from the URL
response = requests.get(image_url)
encoded_image = ""
# Step 2: Ensure the request was successful
if response.status_code == 200:
# Step 3: Load the image into memory
image_data = BytesIO(response.content)

# Step 4: Convert image to base64
# If you need the image in Base64 format
encoded_image = base64.b64encode(response.content).decode('utf-8')

#print("Base64 Encoded Image:")
#print(encoded_image)
else:
print(f"Failed to retrieve image. Status code: {response.status_code}")

"""Analyze the image using Azure Computer Vision API for plant diseases."""
deployment_name = self.gpt4v_deployment
response = await self.openai_client.chat.completions.create(
model=deployment_name,
messages=[
{ "role": "system", "content": "You are an expert agronomist specializing in plant pathology and crop health. Your task is to analyze images of agricultural crops, focusing on corn, soybean, wheat, and other common global crops. Provide precise identifications of plant health issues, diseases, pest damage, nutrient deficiencies, or environmental stress." },
{ "role": "user", "content": [
{
"type": "text",
"text": "Analyze the provided image and respond with the following information:\n1. Crop Identification: Specify the crop (e.g., corn, soybean, wheat).\n2. Plant Part: Identify the part of the plant shown (e.g., leaf, stem, root, fruit).\n3. Health Status: State whether the plant appears healthy or shows signs of issues.\n4. If issues are present, provide:\n a) Primary Condition: The most prominent disease, pest, or deficiency.\n b) Secondary Conditions: Any other noticeable issues.\n c) Severity: Estimate the severity as mild, moderate, or severe.\n5. Key Visual Indicators: List 2-3 key visual cues that led to your diagnosis.\n\nRespond in a structured format suitable for database querying. If the image is unclear or not plant-related, state 'Image unclear or not plant-related'. If you cannot confidently identify an issue, state 'Unable to determine specific condition'."
},
{
"type": "image_url",
"image_url": {
"url": "data:image/jpeg;base64," + encoded_image
}
}
] }
],
max_tokens=2000
)
# Access the response
assistant_response = response.choices[0].message.content
print(assistant_response)
return assistant_response


async def run_until_final_call(
self,
Expand Down Expand Up @@ -108,8 +159,16 @@ async def run_until_final_call(
raise ValueError("The most recent message content must be a string.")
past_messages: list[ChatCompletionMessageParam] = messages[:-1]

print("Override is ", overrides)
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
user_query_request = "Generate search query for: " + original_user_query
#user_query_request = "Generate search query for: " + original_user_query
#image_analysis = await self.analyze_image("https://stv3od7n6qiv4m2.blob.core.windows.net/content/corn-BLS-irregular-lesions.jpg?sp=r&st=2024-09-10T14:45:19Z&se=2024-09-10T22:45:19Z&skoid=44d37ec7-fae7-4a3b-9f41-b8f3539805da&sktid=c6c1e9da-5d0c-4f8f-9a02-3c67206efbd6&skt=2024-09-10T14:45:19Z&ske=2024-09-10T22:45:19Z&sks=b&skv=2022-11-02&spr=https&sv=2022-11-02&sr=b&sig=wn6yZRtzLkEPun65ed7BPxzU3bhkBOY9KJ5j%2F0B8aEE%3D")
image_received = overrides.get("image_url", "")
print("Image received in vision method is ", image_received)
image_analysis = await self.analyze_image(image_received)
#image_analysis = await self.analyze_image("https://stv3od7n6qiv4m2.blob.core.windows.net/content/beacterial-leaf-streak.jfif?sp=r&st=2024-09-10T17:27:30Z&se=2024-09-11T01:27:30Z&skoid=44d37ec7-fae7-4a3b-9f41-b8f3539805da&sktid=c6c1e9da-5d0c-4f8f-9a02-3c67206efbd6&skt=2024-09-10T17:27:30Z&ske=2024-09-11T01:27:30Z&sks=b&skv=2022-11-02&spr=https&sv=2022-11-02&sr=b&sig=7LG3gwgQ9vmu3pVt8Vb1Exh1EGVMkAR3QC%2Fc2BLH8Co%3D")
user_query_request = f"Generate search query for: {image_analysis}"
print(user_query_request)

query_response_token_limit = 100
query_model = self.chatgpt_model
Expand All @@ -132,7 +191,7 @@ async def run_until_final_call(
seed=seed,
)

query_text = self.get_search_query(chat_completion, original_user_query)
query_text = self.get_search_query(chat_completion, image_analysis)

# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query

Expand Down Expand Up @@ -170,7 +229,7 @@ async def run_until_final_call(
self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else "",
)

user_content: list[ChatCompletionContentPartParam] = [{"text": original_user_query, "type": "text"}]
user_content: list[ChatCompletionContentPartParam] = [{"text": image_analysis, "type": "text"}]
image_list: list[ChatCompletionContentPartImageParam] = []

if send_text_to_gptvision:
Expand All @@ -181,6 +240,8 @@ async def run_until_final_call(
if url:
image_list.append({"image_url": url, "type": "image_url"})
user_content.extend(image_list)
if image_analysis:
image_list.append({"image_url": image_analysis, "type": "image_url"})

response_token_limit = 1024
messages = build_messages(
Expand Down
3 changes: 2 additions & 1 deletion app/backend/approaches/retrievethenread.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ class RetrieveThenReadApproach(Approach):
"""

system_chat_template = (
"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. "
"You are an intelligent assistant helping agronomists who will ask questions on behalf of farmers with their "
+ "agriculture products like herbicies, fungicides, pesticides etc questions from a product book. "
+ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. "
+ "Answer the following question using only the data provided in the sources below. "
+ "For tabular information return it as an html table. Do not return markdown format. "
Expand Down
3 changes: 2 additions & 1 deletion app/backend/approaches/retrievethenreadvision.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ class RetrieveThenReadVisionApproach(Approach):
"""

system_chat_template_gpt4v = (
"You are an intelligent assistant helping analyze the Annual Financial Report of Contoso Ltd., The documents contain text, graphs, tables and images. "
"You are an intelligent assistant helping agronomists who will ask questions on behalf of farmers with their "
+ "agriculture products like herbicies, fungicides, pesticides etc questions from a product book., The documents contain text, graphs, tables and images. "
+ "Each image source has the file name in the top left corner of the image with coordinates (10,10) pixels and is in the format SourceFileName:<file_name> "
+ "Each text source starts in a new line and has the file name followed by colon and the actual information "
+ "Always include the source name from the image or text for each fact you use in the response in the format: [filename] "
Expand Down
1 change: 1 addition & 0 deletions app/backend/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
CONFIG_BLOB_CONTAINER_CLIENT = "blob_container_client"
CONFIG_USER_UPLOAD_ENABLED = "user_upload_enabled"
CONFIG_USER_BLOB_CONTAINER_CLIENT = "user_blob_container_client"
CONFIG_BLOB_SERVICE_CLIENT = "config_blob_service_client"
CONFIG_AUTH_CLIENT = "auth_client"
CONFIG_GPT4V_DEPLOYED = "gpt4v_deployed"
CONFIG_SEMANTIC_RANKER_DEPLOYED = "semantic_ranker_deployed"
Expand Down
1 change: 1 addition & 0 deletions app/backend/requirements.in
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,4 @@ beautifulsoup4
types-beautifulsoup4
msgraph-sdk==1.1.0
openai-messages-token-helper
requests_toolbelt
2 changes: 1 addition & 1 deletion app/frontend/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
<meta charset="UTF-8" />
<link rel="icon" type="image/x-icon" href="/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Azure OpenAI + AI Search</title>
<title>AgriVisor - Agronomy Advisor</title>
</head>
<body>
<div id="root"></div>
Expand Down
1 change: 1 addition & 0 deletions app/frontend/src/api/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ export type ChatAppRequest = {
messages: ResponseMessage[];
context?: ChatAppRequestContext;
session_state: any;
image?: string;
};

export type Config = {
Expand Down
2 changes: 1 addition & 1 deletion app/frontend/src/components/Answer/AnswerLoading.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ export const AnswerLoading = () => {
<AnswerIcon />
<Stack.Item grow>
<p className={styles.answerText}>
Generating answer
Pondering deeply about your question
<span className={styles.loadingdots} />
</p>
</Stack.Item>
Expand Down
Loading
Loading