|
1 |
| -import streamlit as st |
2 |
| -import pyttsx3 |
3 |
| -import speech_recognition as sr |
4 |
| -from PyPDF2 import PdfReader |
5 |
| -from langchain.text_splitter import RecursiveCharacterTextSplitter |
6 |
| -import os |
7 |
| -from langchain_google_genai import GoogleGenerativeAIEmbeddings |
8 |
| -import google.generativeai as genai |
9 |
| -from langchain_community.vectorstores import FAISS |
10 |
| -from langchain_google_genai import ChatGoogleGenerativeAI |
11 |
| -from langchain.chains.question_answering import load_qa_chain |
12 |
| -from langchain.prompts import PromptTemplate |
13 |
| -from dotenv import load_dotenv |
14 |
| - |
15 |
| -load_dotenv() |
16 |
| -os.getenv("GOOGLE_API_KEY") |
17 |
| -genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) |
18 |
| - |
19 |
| -# Initialize pyttsx3 for voice output |
20 |
| -engine = pyttsx3.init() |
21 |
| - |
22 |
| -# Function to speak the text |
23 |
| -def speak(text): |
24 |
| - engine.say(text) |
25 |
| - engine.runAndWait() |
26 |
| - |
27 |
| -# Function to listen to voice input |
28 |
| -def listen(): |
29 |
| - r = sr.Recognizer() |
30 |
| - with sr.Microphone() as source: |
31 |
| - st.write("Listening...") |
32 |
| - r.adjust_for_ambient_noise(source) |
33 |
| - audio = r.listen(source) |
34 |
| - |
35 |
| - try: |
36 |
| - user_input = r.recognize_google(audio) |
37 |
| - st.write(f"You said: {user_input}") |
38 |
| - return user_input |
39 |
| - except sr.UnknownValueError: |
40 |
| - st.write("Sorry, I could not understand what you said.") |
41 |
| - return None |
42 |
| - except sr.RequestError as e: |
43 |
| - st.write(f"Could not request results from Google Speech Recognition service; {e}") |
44 |
| - return None |
45 |
| - |
46 |
| - |
47 |
| -def get_pdf_text(pdf_docs): |
48 |
| - text="" |
49 |
| - for pdf in pdf_docs: |
50 |
| - pdf_reader= PdfReader(pdf) |
51 |
| - for page in pdf_reader.pages: |
52 |
| - text+= page.extract_text() |
53 |
| - return text |
54 |
| - |
55 |
| - |
56 |
| -def get_text_chunks(text): |
57 |
| - text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000) |
58 |
| - chunks = text_splitter.split_text(text) |
59 |
| - return chunks |
60 |
| - |
61 |
| - |
62 |
| -def get_vector_store(text_chunks): |
63 |
| - embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001") |
64 |
| - vector_store = FAISS.from_texts(text_chunks, embedding=embeddings) |
65 |
| - vector_store.save_local("faiss_index") |
66 |
| - |
67 |
| - |
68 |
| -def get_conversational_chain(): |
69 |
| - |
70 |
| - prompt_template = """ |
71 |
| - Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in |
72 |
| - provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n |
73 |
| - Context:\n {context}?\n |
74 |
| - Question: \n{question}\n |
75 |
| -
|
76 |
| - Answer: |
77 |
| - """ |
78 |
| - |
79 |
| - model = ChatGoogleGenerativeAI(model="gemini-pro", |
80 |
| - temperature=0.3) |
81 |
| - |
82 |
| - prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"]) |
83 |
| - chain = load_qa_chain(model, chain_type="stuff", prompt=prompt) |
84 |
| - |
85 |
| - return chain |
86 |
| - |
87 |
| - |
88 |
| -def user_input(user_question): |
89 |
| - embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") |
90 |
| - |
91 |
| - # Load the local FAISS index with dangerous deserialization allowed |
92 |
| - new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) |
93 |
| - docs = new_db.similarity_search(user_question) |
94 |
| - |
95 |
| - chain = get_conversational_chain() |
96 |
| - |
97 |
| - response = chain( |
98 |
| - {"input_documents": docs, "question": user_question}, |
99 |
| - return_only_outputs=True |
100 |
| - ) |
101 |
| - |
102 |
| - speak(response["output_text"]) # Speak the response |
103 |
| - st.write("Reply: ", response["output_text"]) |
104 |
| - |
105 |
| - |
106 |
| -def main(): |
107 |
| - st.set_page_config("Beyond GPS Navigation") |
108 |
| - st.header("Beyond GPS Navigator for Blind") |
109 |
| - |
110 |
| - user_question = st.text_input("Ask your query") |
111 |
| - voice_input_button = st.button("Voice Input") |
112 |
| - |
113 |
| - if voice_input_button: |
114 |
| - user_question = listen() # Listen to voice input |
115 |
| - if user_question: |
116 |
| - user_input(user_question) |
117 |
| - |
118 |
| - if user_question: |
119 |
| - user_input(user_question) |
120 |
| - |
121 |
| - with st.sidebar: |
122 |
| - st.title("Menu:") |
123 |
| - pdf_docs = st.file_uploader("Upload your route data and Click on the Submit & Process Button", accept_multiple_files=True) |
124 |
| - if st.button("Submit & Process"): |
125 |
| - with st.spinner("Processing..."): |
126 |
| - raw_text = get_pdf_text(pdf_docs) |
127 |
| - text_chunks = get_text_chunks(raw_text) |
128 |
| - get_vector_store(text_chunks) |
129 |
| - st.success("Done") |
130 |
| - |
131 |
| - |
132 |
| -if __name__ == "__main__": |
133 |
| - main() |
| 1 | +import streamlit as st |
| 2 | +import pyttsx3 |
| 3 | +import speech_recognition as sr |
| 4 | +from PyPDF2 import PdfReader |
| 5 | +from langchain.text_splitter import RecursiveCharacterTextSplitter |
| 6 | +import os |
| 7 | +from langchain_google_genai import GoogleGenerativeAIEmbeddings |
| 8 | +import google.generativeai as genai |
| 9 | +from langchain_community.vectorstores import FAISS |
| 10 | +from langchain_google_genai import ChatGoogleGenerativeAI |
| 11 | +from langchain.chains.question_answering import load_qa_chain |
| 12 | +from langchain.prompts import PromptTemplate |
| 13 | +from dotenv import load_dotenv |
| 14 | + |
| 15 | +load_dotenv() |
| 16 | +os.getenv("GOOGLE_API_KEY") |
| 17 | +genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) |
| 18 | + |
| 19 | +# Initialize pyttsx3 for voice output |
| 20 | +engine = pyttsx3.init() |
| 21 | + |
| 22 | +# Function to speak the text |
| 23 | +def speak(text): |
| 24 | + engine.say(text) |
| 25 | + engine.runAndWait() |
| 26 | + |
| 27 | +# Function to listen to voice input |
| 28 | +def listen(): |
| 29 | + r = sr.Recognizer() |
| 30 | + with sr.Microphone() as source: |
| 31 | + st.write("Listening...") |
| 32 | + r.adjust_for_ambient_noise(source) |
| 33 | + audio = r.listen(source) |
| 34 | + |
| 35 | + try: |
| 36 | + user_input = r.recognize_google(audio) |
| 37 | + st.write(f"You said: {user_input}") |
| 38 | + return user_input |
| 39 | + except sr.UnknownValueError: |
| 40 | + st.write("Sorry, I could not understand what you said.") |
| 41 | + return None |
| 42 | + except sr.RequestError as e: |
| 43 | + st.write(f"Could not request results from Google Speech Recognition service; {e}") |
| 44 | + return None |
| 45 | + |
| 46 | + |
| 47 | +def get_pdf_text(pdf_docs): |
| 48 | + text="" |
| 49 | + for pdf in pdf_docs: |
| 50 | + pdf_reader= PdfReader(pdf) |
| 51 | + for page in pdf_reader.pages: |
| 52 | + text+= page.extract_text() |
| 53 | + return text |
| 54 | + |
| 55 | + |
| 56 | +def get_text_chunks(text): |
| 57 | + text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000) |
| 58 | + chunks = text_splitter.split_text(text) |
| 59 | + return chunks |
| 60 | + |
| 61 | + |
| 62 | +def get_vector_store(text_chunks): |
| 63 | + embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001") |
| 64 | + vector_store = FAISS.from_texts(text_chunks, embedding=embeddings) |
| 65 | + vector_store.save_local("faiss_index") |
| 66 | + |
| 67 | + |
| 68 | +def get_conversational_chain(): |
| 69 | + |
| 70 | + prompt_template = """ |
| 71 | + Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in |
| 72 | + provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n |
| 73 | + Context:\n {context}?\n |
| 74 | + Question: \n{question}\n |
| 75 | +
|
| 76 | + Answer: |
| 77 | + """ |
| 78 | + |
| 79 | + model = ChatGoogleGenerativeAI(model="gemini-pro", |
| 80 | + temperature=0.3) |
| 81 | + |
| 82 | + prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"]) |
| 83 | + chain = load_qa_chain(model, chain_type="stuff", prompt=prompt) |
| 84 | + |
| 85 | + return chain |
| 86 | + |
| 87 | + |
| 88 | +def user_input(user_question): |
| 89 | + embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") |
| 90 | + |
| 91 | + # Load the local FAISS index with dangerous deserialization allowed |
| 92 | + new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) |
| 93 | + docs = new_db.similarity_search(user_question) |
| 94 | + |
| 95 | + chain = get_conversational_chain() |
| 96 | + |
| 97 | + response = chain( |
| 98 | + {"input_documents": docs, "question": user_question}, |
| 99 | + return_only_outputs=True |
| 100 | + ) |
| 101 | + |
| 102 | + speak(response["output_text"]) # Speak the response |
| 103 | + st.write("Reply: ", response["output_text"]) |
| 104 | + |
| 105 | + |
| 106 | +def main(): |
| 107 | + st.set_page_config("Beyond GPS Navigation") |
| 108 | + st.header("Beyond GPS Navigator for Blind") |
| 109 | + |
| 110 | + user_question = st.text_input("Ask your query") |
| 111 | + voice_input_button = st.button("Voice Input") |
| 112 | + |
| 113 | + if voice_input_button: |
| 114 | + user_question = listen() # Listen to voice input |
| 115 | + if user_question: |
| 116 | + user_input(user_question) |
| 117 | + |
| 118 | + if user_question: |
| 119 | + user_input(user_question) |
| 120 | + |
| 121 | + with st.sidebar: |
| 122 | + st.title("Menu:") |
| 123 | + pdf_docs = st.file_uploader("Upload your route data and Click on the Submit & Process Button", accept_multiple_files=True) |
| 124 | + if st.button("Submit & Process"): |
| 125 | + with st.spinner("Processing..."): |
| 126 | + raw_text = get_pdf_text(pdf_docs) |
| 127 | + text_chunks = get_text_chunks(raw_text) |
| 128 | + get_vector_store(text_chunks) |
| 129 | + st.success("Done") |
| 130 | + |
| 131 | + |
| 132 | +if __name__ == "__main__": |
| 133 | + main() |
0 commit comments