JBHF commited on
Commit
491411e
·
verified ·
1 Parent(s): 950766d

NonToxicGlazeAdvisor_Chat_with_Docs_Groq_Edition_1 - app.py - 27-03-2024, 10:25 CET

Browse files
Files changed (1) hide show
  1. app.py +87 -74
app.py CHANGED
@@ -155,85 +155,98 @@ if "vector" not in st.session_state:
155
  # docs = loader.load()
156
  # st.session_state.docs = docs
157
 
158
- # JB:
159
- # https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory
160
- # text_loader_kwargs={'autodetect_encoding': True}
161
- text_loader_kwargs={'autodetect_encoding': False}
162
- path = '../'
163
- # loader = DirectoryLoader(path, glob="**/*.pdf", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)
164
- # PyPDFDirectoryLoader (TEST):
165
- # loader = PyPDFDirectoryLoader(path, glob="**/*.pdf", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)
166
- # loader = PyPDFDirectoryLoader(path, glob="**/*.pdf", loader_kwargs=text_loader_kwargs)
167
- loader = PyPDFDirectoryLoader(path, glob="**/*.pdf")
168
- docs = loader.load()
169
- st.session_state.docs = docs
170
-
171
- # JB 18-03-2024:
172
- # https://python.langchain.com/docs/integrations/document_loaders/
173
- # MICROSOFT WORD:
174
- # https://python.langchain.com/docs/integrations/document_loaders/microsoft_word
175
- # 1 - Using Docx2txt
176
- # Load .docx using Docx2txt into a document.
177
- # %pip install --upgrade --quiet docx2txt
178
- # from langchain_community.document_loaders import Docx2txtLoader
179
- # loader = Docx2txtLoader("example_data/fake.docx")
180
- # data = loader.load()
181
- # data
182
- # [Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.docx'})]
183
- #
184
- # 2A - Using Unstructured
185
- # from langchain_community.document_loaders import UnstructuredWordDocumentLoader
186
- # loader = UnstructuredWordDocumentLoader("example_data/fake.docx")
187
- # data = loader.load()
188
- # data
189
- # [Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx'}, lookup_index=0)]
190
- #
191
- # 2B - Retain Elements
192
- # Under the hood, Unstructured creates different “elements” for different chunks of text.
193
- # By default we combine those together, but you can easily keep that separation by specifying mode="elements".
194
- # loader = UnstructuredWordDocumentLoader("example_data/fake.docx", mode="elements")
195
- # data = loader.load()
196
- # data[0]
197
- # Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx', 'filename': 'fake.docx', 'category': 'Title'}, lookup_index=0)
198
- #
199
- # 2A - Using Unstructured
200
- # from langchain_community.document_loaders import UnstructuredWordDocumentLoader
201
- # loader = UnstructuredWordDocumentLoader(path, glob="**/*.docx")
202
- # docs = loader.load()
203
- # st.session_state.docs = docs
204
 
205
 
206
-
207
-
208
- st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
209
- st.session_state.documents = st.session_state.text_splitter.split_documents(st.session_state.docs)
210
- # https://python.langchain.com/docs/integrations/vectorstores/faiss
211
- # docs_and_scores = db.similarity_search_with_score(query)
212
- # Saving and loading
213
- # You can also save and load a FAISS index.
214
- # This is useful so you don’t have to recreate it everytime you use it.
215
- # db.save_local("faiss_index")
216
- # new_db = FAISS.load_local("faiss_index", embeddings)
217
- # docs = new_db.similarity_search(query)
218
- # docs[0]
219
- # Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})
220
- #
221
- st.session_state.vector = FAISS.from_documents(st.session_state.documents, st.session_state.embeddings) # ORIGINAL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
- # st.session_state.vector = FAISS.from_documents(st.session_state.documents, st.session_state.embeddings) # ORIGINAL
224
- #st.session_state.vector.save_local("faiss_index")
225
- # The de-serialization relies loading a pickle file.
226
- # Pickle files can be modified to deliver a malicious payload that results in execution of arbitrary code on your machine.
227
- # You will need to set `allow_dangerous_deserialization` to `True` to enable deserialization. If you do this, make sure that you trust the source of the data.
228
 
229
- #st.session_state.vector = FAISS.load_local("faiss_index", st.session_state.embeddings, allow_dangerous_deserialization=True)
230
 
231
- # ZIE:
232
- # ZIE VOOR EEN APP MET CHROMADB:
233
- # https://github.com/vndee/local-rag-example/blob/main/rag.py
234
- # https://raw.githubusercontent.com/vndee/local-rag-example/main/rag.py
235
- # Chroma.from_documents(documents=chunks, embedding=FastEmbedEmbeddings())
236
- # st.session_state.vector = Chroma.from_documents(st.session_state.documents, st.session_state.embeddings) # JB
237
 
238
  st.write("---------------------------------")
239
 
 
155
  # docs = loader.load()
156
  # st.session_state.docs = docs
157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
 
160
+ with st.status("Downloading data...", expanded=True) as status:
161
+ # st.write("Searching for data...")
162
+ # time.sleep(2)
163
+ # st.write("Found URL.")
164
+ # time.sleep(1)
165
+ # st.write("Downloading data...")
166
+ # time.sleep(1)
167
+ #status.update(label="Download complete!", state="complete", expanded=False)
168
+
169
+ st.write("Laden van de PDF documenten...")
170
+ # JB:
171
+ # https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory
172
+ # text_loader_kwargs={'autodetect_encoding': True}
173
+ text_loader_kwargs={'autodetect_encoding': False}
174
+ path = '../'
175
+ # loader = DirectoryLoader(path, glob="**/*.pdf", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)
176
+ # PyPDFDirectoryLoader (TEST):
177
+ # loader = PyPDFDirectoryLoader(path, glob="**/*.pdf", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)
178
+ # loader = PyPDFDirectoryLoader(path, glob="**/*.pdf", loader_kwargs=text_loader_kwargs)
179
+ loader = PyPDFDirectoryLoader(path, glob="**/*.pdf")
180
+ docs = loader.load()
181
+ st.session_state.docs = docs
182
+
183
+ # JB 18-03-2024:
184
+ # https://python.langchain.com/docs/integrations/document_loaders/
185
+ # MICROSOFT WORD:
186
+ # https://python.langchain.com/docs/integrations/document_loaders/microsoft_word
187
+ # 1 - Using Docx2txt
188
+ # Load .docx using Docx2txt into a document.
189
+ # %pip install --upgrade --quiet docx2txt
190
+ # from langchain_community.document_loaders import Docx2txtLoader
191
+ # loader = Docx2txtLoader("example_data/fake.docx")
192
+ # data = loader.load()
193
+ # data
194
+ # [Document(page_content='Lorem ipsum dolor sit amet.', metadata={'source': 'example_data/fake.docx'})]
195
+ #
196
+ # 2A - Using Unstructured
197
+ # from langchain_community.document_loaders import UnstructuredWordDocumentLoader
198
+ # loader = UnstructuredWordDocumentLoader("example_data/fake.docx")
199
+ # data = loader.load()
200
+ # data
201
+ # [Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx'}, lookup_index=0)]
202
+ #
203
+ # 2B - Retain Elements
204
+ # Under the hood, Unstructured creates different “elements” for different chunks of text.
205
+ # By default we combine those together, but you can easily keep that separation by specifying mode="elements".
206
+ # loader = UnstructuredWordDocumentLoader("example_data/fake.docx", mode="elements")
207
+ # data = loader.load()
208
+ # data[0]
209
+ # Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx', 'filename': 'fake.docx', 'category': 'Title'}, lookup_index=0)
210
+ #
211
+ # 2A - Using Unstructured
212
+ # from langchain_community.document_loaders import UnstructuredWordDocumentLoader
213
+ # loader = UnstructuredWordDocumentLoader(path, glob="**/*.docx")
214
+ # docs = loader.load()
215
+ # st.session_state.docs = docs
216
+
217
+
218
+ st.write("Splitting / chunking de teksten...")
219
+ st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
220
+ st.session_state.documents = st.session_state.text_splitter.split_documents(st.session_state.docs)
221
+
222
+ st.write("Genereer de Vector Store (kan enige minuten duren)...")
223
+ # https://python.langchain.com/docs/integrations/vectorstores/faiss
224
+ # docs_and_scores = db.similarity_search_with_score(query)
225
+ # Saving and loading
226
+ # You can also save and load a FAISS index.
227
+ # This is useful so you don’t have to recreate it everytime you use it.
228
+ # db.save_local("faiss_index")
229
+ # new_db = FAISS.load_local("faiss_index", embeddings)
230
+ # docs = new_db.similarity_search(query)
231
+ # docs[0]
232
+ # Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})
233
+ #
234
+ st.session_state.vector = FAISS.from_documents(st.session_state.documents, st.session_state.embeddings) # ORIGINAL
235
 
236
+ # st.session_state.vector = FAISS.from_documents(st.session_state.documents, st.session_state.embeddings) # ORIGINAL
237
+ #st.session_state.vector.save_local("faiss_index")
238
+ # The de-serialization relies loading a pickle file.
239
+ # Pickle files can be modified to deliver a malicious payload that results in execution of arbitrary code on your machine.
240
+ # You will need to set `allow_dangerous_deserialization` to `True` to enable deserialization. If you do this, make sure that you trust the source of the data.
241
 
242
+ #st.session_state.vector = FAISS.load_local("faiss_index", st.session_state.embeddings, allow_dangerous_deserialization=True)
243
 
244
+ # ZIE:
245
+ # ZIE VOOR EEN APP MET CHROMADB:
246
+ # https://github.com/vndee/local-rag-example/blob/main/rag.py
247
+ # https://raw.githubusercontent.com/vndee/local-rag-example/main/rag.py
248
+ # Chroma.from_documents(documents=chunks, embedding=FastEmbedEmbeddings())
249
+ # st.session_state.vector = Chroma.from_documents(st.session_state.documents, st.session_state.embeddings) # JB
250
 
251
  st.write("---------------------------------")
252