KrishP-12 commited on
Commit
0624a49
·
verified ·
1 Parent(s): d6eaf8d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -0
app.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from PIL import Image
4
+ import pytesseract
5
+ from pdf2image import convert_from_path
6
+ from langchain_community.embeddings import HuggingFaceEmbeddings
7
+ from langchain.prompts import PromptTemplate
8
+ from langchain.chains import RetrievalQA
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain_groq import ChatGroq
11
+ from langchain_community.vectorstores import FAISS
12
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+
14
+ def setup_environment():
15
+ os.environ["GROQ_API_KEY"] = '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'
16
+
17
+ # Define OCR functions for image and PDF files
18
+ def ocr_image(image_path, language='eng+guj'):
19
+ img = Image.open(image_path)
20
+ text = pytesseract.image_to_string(img, lang=language)
21
+ return text
22
+
23
+ def ocr_pdf(pdf_path, language='eng+guj'):
24
+ images = convert_from_path(pdf_path)
25
+ all_text = ""
26
+ for img in images:
27
+ text = pytesseract.image_to_string(img, lang=language)
28
+ all_text += text + "\n"
29
+ return all_text
30
+
31
+ def ocr_file(file_path):
32
+ file_extension = os.path.splitext(file_path)[1].lower()
33
+ if file_extension == ".pdf":
34
+ text_re = ocr_pdf(file_path, language='guj+eng')
35
+ elif file_extension in [".jpg", ".jpeg", ".png", ".bmp"]:
36
+ text_re = ocr_image(file_path, language='guj+eng')
37
+ else:
38
+ raise ValueError("Unsupported file format. Supported formats are PDF, JPG, JPEG, PNG, BMP.")
39
+ return text_re
40
+
41
+ def get_text_chunks(text):
42
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
43
+ chunks = text_splitter.split_text(text)
44
+ return chunks
45
+
46
+ def get_vector_store(text_chunks):
47
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': True})
48
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
49
+ os.makedirs("faiss_index", exist_ok=True)
50
+ vector_store.save_local("faiss_index")
51
+ return vector_store
52
+
53
+ def process_ocr_and_pdf_files(file_paths):
54
+ raw_text = ""
55
+ for file_path in file_paths:
56
+ raw_text += ocr_file(file_path) + "\n"
57
+ text_chunks = get_text_chunks(raw_text)
58
+ return get_vector_store(text_chunks)
59
+
60
+ def get_conversational_chain():
61
+ template = """You are an intelligent educational assistant specialized in handling queries about documents. You have been provided with OCR-processed text from the uploaded files that contains important educational information.
62
+
63
+ Core Responsibilities:
64
+ 1. Language Processing:
65
+ - Identify the language of the user's query (English or Gujarati)
66
+ - Respond in the same language as the query
67
+ - If the query is in Gujarati, ensure the response maintains proper Gujarati grammar and terminology
68
+ - For technical terms, provide both English and Gujarati versions when relevant
69
+
70
+ 2. Document Understanding:
71
+ - Analyze the OCR-processed text from the uploaded files
72
+ - Account for potential OCR errors or misinterpretations
73
+ - Focus on extracting accurate information despite possible OCR imperfections
74
+
75
+ 3. Response Guidelines:
76
+ - Provide direct, clear answers based solely on the document content
77
+ - If information is unclear due to OCR quality, mention this limitation
78
+ - For numerical data (dates, percentages, marks), double-check accuracy before responding
79
+ - If information is not found in the documents, clearly state: \"This information is not present in the uploaded documents\"
80
+
81
+ 4. Educational Context:
82
+ - Maintain focus on educational queries related to the document content
83
+ - For admission-related queries, emphasize important deadlines and requirements
84
+ - For scholarship information, highlight eligibility criteria and application processes
85
+ - For course-related queries, provide detailed, accurate information from the documents
86
+
87
+ 5. Response Format:
88
+ - Structure responses clearly with relevant subpoints when necessary
89
+ - For complex information, break down the answer into digestible parts
90
+ - Include relevant reference points from the documents when applicable
91
+ - Format numerical data and dates clearly
92
+
93
+ 6. Quality Control:
94
+ - Verify that responses align with the document content
95
+ - Don't make assumptions beyond the provided information
96
+ - If multiple interpretations are possible due to OCR quality, mention all possibilities
97
+ - Maintain consistency in terminology throughout the conversation
98
+
99
+ Important Rules:
100
+ - Never make up information not present in the documents
101
+ - Don't combine information from previous conversations or external knowledge
102
+ - Always indicate if certain parts of the documents are unclear due to OCR quality
103
+ - Maintain professional tone while being accessible to students and parents
104
+ - If the query is out of scope of the uploaded documents, politely redirect to relevant official sources
105
+
106
+ Context from uploaded documents:
107
+ {context}
108
+
109
+ Chat History:
110
+ {history}
111
+
112
+ Current Question: {question}
113
+ Assistant: Let me provide a clear and accurate response based on the uploaded documents..."""
114
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-MiniLM-L6-v2", model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': True})
115
+ new_vector_store = FAISS.load_local(
116
+ "faiss_index", embeddings, allow_dangerous_deserialization=True
117
+ )
118
+ QA_CHAIN_PROMPT = PromptTemplate(input_variables=["history", "context", "question"], template=template)
119
+ qa_chain = RetrievalQA.from_chain_type(llm, retriever=new_vector_store.as_retriever(), chain_type='stuff', verbose=True, chain_type_kwargs={"verbose": True,"prompt": QA_CHAIN_PROMPT,"memory": ConversationBufferMemory(memory_key="history",input_key="question"),})
120
+ return qa_chain
121
+
122
+ def user_input(user_question):
123
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': True})
124
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
125
+ docs = new_db.similarity_search(user_question)
126
+ chain = get_conversational_chain()
127
+ response = chain({"input_documents": docs, "query": user_question}, return_only_outputs=True)
128
+ return response.get("result", "No result found")
129
+
130
+ def gradio_interface():
131
+ def process_files(files):
132
+ file_paths = []
133
+ for file in files:
134
+ file_path = os.path.join("temp", file.name)
135
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
136
+ with open(file_path, "wb") as f:
137
+ f.write(file.read())
138
+ file_paths.append(file_path)
139
+ process_ocr_and_pdf_files(file_paths)
140
+ return "Files processed and vector store updated!"
141
+
142
+ def ask_question(user_question):
143
+ return user_input(user_question)
144
+
145
+ file_upload = gr.inputs.File(label="Upload Files", type="file", multiple=True)
146
+ text_input = gr.inputs.Textbox(label="Ask a question related to the uploaded documents:")
147
+
148
+ outputs = [gr.outputs.Textbox(label="Output"), gr.outputs.Textbox(label="Conversation History")]
149
+ interface = gr.Interface(
150
+ fn=[process_files, ask_question],
151
+ inputs=[file_upload, text_input],
152
+ outputs=outputs,
153
+ live=True
154
+ )
155
+ interface.launch()
156
+
157
+ if __name__ == "__main__":
158
+ setup_environment()
159
+ gradio_interface()