Update app.py
Browse files
app.py
CHANGED
@@ -259,43 +259,78 @@ document_chain = create_stuff_documents_chain(llm, prompt)
|
|
259 |
retriever = st.session_state.vector.as_retriever()
|
260 |
retrieval_chain = create_retrieval_chain(retriever, document_chain)
|
261 |
|
262 |
-
|
263 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
264 |
|
265 |
-
# data = ["input1", "input2", "input3"]
|
266 |
|
267 |
-
#for i, item in enumerate(data):
|
268 |
-
key = f"input_{i}"
|
269 |
-
# text_input = st.text_input(f"Enter value for {item}", key=key)
|
270 |
-
# Access the value directly
|
271 |
-
print(f"Value for key: {key}")
|
272 |
|
273 |
-
i=i+1
|
274 |
-
|
275 |
-
prompt = st.text_input("Input your prompt here", key=key)
|
276 |
-
|
277 |
-
|
278 |
-
# If the user hits enter
|
279 |
-
if prompt:
|
280 |
-
# Then pass the prompt to the LLM
|
281 |
-
start = time.process_time()
|
282 |
-
response = retrieval_chain.invoke({"input": prompt})
|
283 |
-
# print(f"Response time: {time.process_time() - start}")
|
284 |
-
st.write(f"Response time: {time.process_time() - start} seconds")
|
285 |
-
|
286 |
-
st.write(response["answer"])
|
287 |
-
|
288 |
-
# With a streamlit expander
|
289 |
-
with st.expander("Document Similarity Search"):
|
290 |
-
# Find the relevant chunks
|
291 |
-
for i, doc in enumerate(response["context"]):
|
292 |
-
# print(doc)
|
293 |
-
# st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
|
294 |
-
st.write(doc)
|
295 |
-
st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
|
296 |
-
|
297 |
-
|
298 |
-
st.write(doc.page_content)
|
299 |
-
st.write("--------------------------------")
|
300 |
|
301 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
retriever = st.session_state.vector.as_retriever()
|
260 |
retrieval_chain = create_retrieval_chain(retriever, document_chain)
|
261 |
|
262 |
+
prompt = st.text_input("Input your prompt here") #, key=key)
|
263 |
+
|
264 |
+
# If the user hits enter
|
265 |
+
if prompt:
|
266 |
+
# Then pass the prompt to the LLM
|
267 |
+
start = time.process_time()
|
268 |
+
response = retrieval_chain.invoke({"input": prompt})
|
269 |
+
# print(f"Response time: {time.process_time() - start}")
|
270 |
+
st.write(f"Response time: {time.process_time() - start} seconds")
|
271 |
+
|
272 |
+
st.write(response["answer"])
|
273 |
+
|
274 |
+
# With a streamlit expander
|
275 |
+
with st.expander("Document Similarity Search"):
|
276 |
+
# Find the relevant chunks
|
277 |
+
for i, doc in enumerate(response["context"]):
|
278 |
+
# print(doc)
|
279 |
+
# st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
|
280 |
+
st.write(doc)
|
281 |
+
st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
|
282 |
+
|
283 |
+
|
284 |
+
st.write(doc.page_content)
|
285 |
+
st.write("--------------------------------")
|
286 |
+
|
287 |
+
st.write("---------------------------------")
|
288 |
+
|
289 |
+
|
290 |
+
|
291 |
|
|
|
292 |
|
|
|
|
|
|
|
|
|
|
|
293 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
|
295 |
+
|
296 |
+
|
297 |
+
#i=0
|
298 |
+
#while True:
|
299 |
+
#
|
300 |
+
# # data = ["input1", "input2", "input3"]
|
301 |
+
#
|
302 |
+
# #for i, item in enumerate(data):
|
303 |
+
# key = f"input_{i}"
|
304 |
+
# # text_input = st.text_input(f"Enter value for {item}", key=key)
|
305 |
+
# # Access the value directly
|
306 |
+
# print(f"Value for key: {key}")
|
307 |
+
#
|
308 |
+
# i=i+1
|
309 |
+
#
|
310 |
+
# prompt = st.text_input("Input your prompt here", key=key)
|
311 |
+
#
|
312 |
+
#
|
313 |
+
# # If the user hits enter
|
314 |
+
# if prompt:
|
315 |
+
# # Then pass the prompt to the LLM
|
316 |
+
# start = time.process_time()
|
317 |
+
# response = retrieval_chain.invoke({"input": prompt})
|
318 |
+
# # print(f"Response time: {time.process_time() - start}")
|
319 |
+
# st.write(f"Response time: {time.process_time() - start} seconds")
|
320 |
+
#
|
321 |
+
# st.write(response["answer"])
|
322 |
+
#
|
323 |
+
# # With a streamlit expander
|
324 |
+
# with st.expander("Document Similarity Search"):
|
325 |
+
# # Find the relevant chunks
|
326 |
+
# for i, doc in enumerate(response["context"]):
|
327 |
+
# # print(doc)
|
328 |
+
# # st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
|
329 |
+
# st.write(doc)
|
330 |
+
# st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
|
331 |
+
#
|
332 |
+
#
|
333 |
+
# st.write(doc.page_content)
|
334 |
+
# st.write("--------------------------------")
|
335 |
+
#
|
336 |
+
# st.write("---------------------------------")
|