DrishtiSharma commited on
Commit
51d1d4f
·
verified ·
1 Parent(s): fc4ce9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +229 -4
app.py CHANGED
@@ -33,10 +33,8 @@ from langchain.llms import OpenAI
33
  from langchain.document_loaders import UnstructuredPDFLoader
34
  from langchain.vectorstores import Chroma
35
  from langchain.embeddings import HuggingFaceEmbeddings
36
- from langchain.text_splitter import NLTKTextSplitter
37
- from patent_downloader import PatentDownloader
38
- from langchain.document_loaders import PyMuPDFLoader
39
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
40
 
41
  PERSISTED_DIRECTORY = tempfile.mkdtemp()
42
 
@@ -52,4 +50,231 @@ def check_poppler_installed():
52
  "Poppler is not installed or not in PATH. Install 'poppler-utils' for PDF processing."
53
  )
54
 
55
- check_poppler_installed()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  from langchain.document_loaders import UnstructuredPDFLoader
34
  from langchain.vectorstores import Chroma
35
  from langchain.embeddings import HuggingFaceEmbeddings
 
 
 
36
  from langchain.text_splitter import RecursiveCharacterTextSplitter
37
+ from patent_downloader import PatentDownloader
38
 
39
  PERSISTED_DIRECTORY = tempfile.mkdtemp()
40
 
 
50
  "Poppler is not installed or not in PATH. Install 'poppler-utils' for PDF processing."
51
  )
52
 
53
+ check_poppler_installed()
54
+
55
+ def clean_extracted_text(text):
56
+ """
57
+ Cleans extracted text to remove metadata, headers, and irrelevant content.
58
+ """
59
+ lines = text.split("\n")
60
+ cleaned_lines = []
61
+
62
+ for line in lines:
63
+ line = line.strip()
64
+
65
+ # Filter out lines with metadata patterns
66
+ if (
67
+ re.match(r"^(U\.S\.|United States|Sheet|Figure|References|Patent No|Date of Patent)", line)
68
+ or re.match(r"^\(?\d+\)?$", line) # Matches single numbers (page numbers)
69
+ or "Examiner" in line
70
+ or "Attorney" in line
71
+ or len(line) < 30 # Skip very short lines
72
+ ):
73
+ continue
74
+
75
+ cleaned_lines.append(line)
76
+
77
+ return "\n".join(cleaned_lines)
78
+
79
+ def load_docs(document_path):
80
+ """
81
+ Load and clean the PDF content, then split into chunks.
82
+ """
83
+ try:
84
+ import fitz # PyMuPDF for text extraction
85
+
86
+ # Step 1: Extract plain text from PDF
87
+ doc = fitz.open(document_path)
88
+ extracted_text = []
89
+
90
+ for page_num, page in enumerate(doc):
91
+ page_text = page.get_text("text") # Extract text
92
+ clean_page_text = clean_extracted_text(page_text)
93
+ if clean_page_text: # Keep only non-empty cleaned text
94
+ extracted_text.append(clean_page_text)
95
+
96
+ doc.close()
97
+
98
+ # Combine all pages into one text
99
+ full_text = "\n".join(extracted_text)
100
+ st.write(f"\ud83d\udd8d Total Cleaned Text Length: {len(full_text)} characters")
101
+
102
+ # Step 2: Chunk the cleaned text
103
+ text_splitter = RecursiveCharacterTextSplitter(
104
+ chunk_size=1000,
105
+ chunk_overlap=100,
106
+ separators=["\n\n", "\n", " ", ""]
107
+ )
108
+ split_docs = text_splitter.create_documents([full_text])
109
+
110
+ st.write(f"\ud83d\udd0d Total Chunks After Splitting: {len(split_docs)}")
111
+ for i, doc in enumerate(split_docs[:3]): # Show first 3 chunks only
112
+ st.write(f"Chunk {i + 1}: {doc.page_content[:300]}...")
113
+
114
+ return split_docs
115
+ except Exception as e:
116
+ st.error(f"Failed to load and process PDF: {e}")
117
+ st.stop()
118
+
119
+ def initialize_vector_store(documents, persist_dir):
120
+ """
121
+ Initialize the vector store with the provided documents.
122
+ """
123
+ embeddings = HuggingFaceEmbeddings()
124
+ vectordb = Chroma.from_documents(
125
+ documents=documents,
126
+ embedding_function=embeddings,
127
+ persist_directory=persist_dir
128
+ )
129
+ vectordb.persist() # Persist the vector store to disk
130
+ return vectordb
131
+
132
+ def create_retriever(vectordb):
133
+ """
134
+ Create a retriever from the vector store.
135
+ """
136
+ return vectordb.as_retriever(search_kwargs={"k": 3})
137
+
138
+ def create_retrieval_chain(vectordb, api_key):
139
+ """
140
+ Create a conversational retrieval chain with memory.
141
+ """
142
+ retriever = create_retriever(vectordb)
143
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
144
+
145
+ return ConversationalRetrievalChain.from_llm(
146
+ llm=OpenAI(temperature=0, openai_api_key=api_key),
147
+ retriever=retriever,
148
+ memory=memory
149
+ )
150
+
151
+ def setup_retrieval_pipeline(file_path, persist_dir, api_key):
152
+ """
153
+ Load documents, create a vector store, and initialize a retrieval chain.
154
+ """
155
+ st.write(f"Processing file: {file_path}")
156
+
157
+ # Step 1: Process and chunk documents
158
+ documents = load_docs(file_path)
159
+ if not documents:
160
+ st.error("Failed to process documents. Please check the input file.")
161
+ return None
162
+
163
+ # Step 2: Initialize vector store
164
+ vectordb = initialize_vector_store(documents, persist_dir)
165
+
166
+ # Step 3: Create retrieval chain
167
+ retrieval_chain = create_retrieval_chain(vectordb, api_key)
168
+
169
+ return retrieval_chain
170
+
171
+ if __name__ == "__main__":
172
+ st.set_page_config(
173
+ page_title="Patent Chat: Google Patents Chat Demo",
174
+ page_icon="\ud83d\udd8a\ufe0f",
175
+ layout="wide",
176
+ initial_sidebar_state="expanded",
177
+ )
178
+ st.header("\ud83d\udd8a\ufe0f Patent Chat: Google Patents Chat Demo")
179
+
180
+ # Input for Google Patent Link
181
+ patent_link = st.text_area(
182
+ "Enter Google Patent Link:",
183
+ value="https://patents.google.com/patent/US8676427B1/en",
184
+ height=90
185
+ )
186
+
187
+ # Initialize session state
188
+ for key in ["LOADED_PATENT", "pdf_preview", "loaded_pdf_path", "chain", "messages", "loading_complete"]:
189
+ if key not in st.session_state:
190
+ st.session_state[key] = None
191
+
192
+ # Button to load and process patent
193
+ if st.button("Load and Process Patent"):
194
+ if not patent_link:
195
+ st.warning("Please enter a valid Google patent link.")
196
+ st.stop()
197
+
198
+ # Extract patent number
199
+ patent_number = extract_patent_number(patent_link)
200
+ if not patent_number:
201
+ st.error("Invalid patent link format.")
202
+ st.stop()
203
+
204
+ st.write(f"Patent number: **{patent_number}**")
205
+
206
+ # File handling
207
+ pdf_path = os.path.join(tempfile.gettempdir(), f"{patent_number}.pdf")
208
+ if not os.path.isfile(pdf_path):
209
+ with st.spinner("\ud83d\udd10 Downloading patent file..."):
210
+ try:
211
+ pdf_path = download_pdf(patent_number)
212
+ st.write(f"\u2705 File downloaded: {pdf_path}")
213
+ except Exception as e:
214
+ st.error(f"Failed to download patent: {e}")
215
+ st.stop()
216
+ else:
217
+ st.write("\u2705 File already downloaded.")
218
+
219
+ # Generate PDF preview only if not already displayed
220
+ if not st.session_state.get("pdf_preview_displayed", False):
221
+ with st.spinner("\ud83d\uddbc\ufe0f Generating PDF preview..."):
222
+ preview_image_path = preview_pdf(pdf_path, scale_factor=0.5)
223
+ if preview_image_path:
224
+ st.session_state.pdf_preview = preview_image_path
225
+ st.image(preview_image_path, caption="First Page Preview", use_container_width=False)
226
+ st.session_state["pdf_preview_displayed"] = True
227
+ else:
228
+ st.warning("Failed to generate PDF preview.")
229
+ st.session_state.pdf_preview = None
230
+
231
+ # Load the document into the system
232
+ st.session_state["loading_complete"] = False
233
+ with st.spinner("\ud83d\udd04 Loading document into the system..."):
234
+ try:
235
+ st.session_state.chain = setup_retrieval_pipeline(
236
+ pdf_path, PERSISTED_DIRECTORY, OPENAI_API_KEY
237
+ )
238
+ st.session_state.LOADED_PATENT = patent_number
239
+ st.session_state.loaded_pdf_path = pdf_path
240
+ st.session_state.messages = [{"role": "assistant", "content": "Hello! How can I assist you with this patent?"}]
241
+ st.session_state["loading_complete"] = True
242
+ except Exception as e:
243
+ st.error(f"Failed to load the document: {e}")
244
+ st.session_state["loading_complete"] = False
245
+ st.stop()
246
+
247
+ if st.session_state["loading_complete"]:
248
+ st.success("\ud83d\ude80 Document successfully loaded! You can now start asking questions.")
249
+
250
+ # Display previous chat messages
251
+ if st.session_state.messages:
252
+ for message in st.session_state.messages:
253
+ with st.chat_message(message["role"]):
254
+ st.markdown(message["content"])
255
+
256
+ # User input for questions
257
+ if st.session_state.chain:
258
+ if user_input := st.chat_input("What is your question?"):
259
+ # User message
260
+ st.session_state.messages.append({"role": "user", "content": user_input})
261
+ with st.chat_message("user"):
262
+ st.markdown(user_input)
263
+
264
+ # Assistant response
265
+ with st.chat_message("assistant"):
266
+ message_placeholder = st.empty()
267
+ full_response = ""
268
+
269
+ with st.spinner("Generating response..."):
270
+ try:
271
+ # Generate response using the chain
272
+ assistant_response = st.session_state.chain({"question": user_input})
273
+ full_response = assistant_response.get("answer", "I'm sorry, I couldn't process that question.")
274
+ except Exception as e:
275
+ full_response = f"An error occurred: {e}"
276
+
277
+ message_placeholder.markdown(full_response)
278
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
279
+ else:
280
+ st.info("Press the 'Load and Process Patent' button to start processing.")