JBHF commited on
Commit
7a739f2
1 Parent(s): 632d798

NonToxicGlazeAdvisor_Chat_with_Docs_Groq_Edition_1 - app.py - 27-03-2024, 09:33 CET

Browse files
Files changed (1) hide show
  1. app.py +63 -11
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # NonToxicGlazeAdvisor_Chat_with_Docs_Groq_Edition_1 - app.py - 26-03-2024
2
 
3
  # STREAMLIT:
4
  # https://www.datacamp.com/tutorial/streamlit:
@@ -260,11 +260,59 @@ document_chain = create_stuff_documents_chain(llm, prompt)
260
  retriever = st.session_state.vector.as_retriever()
261
  retrieval_chain = create_retrieval_chain(retriever, document_chain)
262
 
263
- # prompt = st.text_input("Input your prompt here") #, key=key)
264
- prompt = st.text_input("Stel hieronder Uw vraag:") #, key=key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
- # If the user hits enter
267
- if prompt:
268
  # Then pass the prompt to the LLM
269
  start = time.process_time()
270
  response = retrieval_chain.invoke({"input": prompt})
@@ -273,6 +321,16 @@ if prompt:
273
 
274
  st.write(response["answer"])
275
 
 
 
 
 
 
 
 
 
 
 
276
  # With a streamlit expander
277
  with st.expander("Document Similarity Search"):
278
  # Find the relevant chunks
@@ -290,12 +348,6 @@ st.write("---------------------------------")
290
 
291
 
292
 
293
-
294
-
295
-
296
-
297
-
298
-
299
  #i=0
300
  #while True:
301
  #
 
1
+ # NonToxicGlazeAdvisor_Chat_with_Docs_Groq_Edition_1 - app.py - 27-03-2024
2
 
3
  # STREAMLIT:
4
  # https://www.datacamp.com/tutorial/streamlit:
 
260
  retriever = st.session_state.vector.as_retriever()
261
  retrieval_chain = create_retrieval_chain(retriever, document_chain)
262
 
263
+ ## prompt = st.text_input("Input your prompt here") #, key=key)
264
+ #prompt = st.text_input("Stel hieronder Uw vraag:") #, key=key)
265
+ #
266
+ ## If the user hits enter
267
+ #if prompt:
268
+ # # Then pass the prompt to the LLM
269
+ # start = time.process_time()
270
+ # response = retrieval_chain.invoke({"input": prompt})
271
+ # # print(f"Response time: {time.process_time() - start}")
272
+ # st.write(f"Response time: {time.process_time() - start} seconds")
273
+ #
274
+ # st.write(response["answer"])
275
+ #
276
+ # # With a streamlit expander
277
+ # with st.expander("Document Similarity Search"):
278
+ # # Find the relevant chunks
279
+ # for i, doc in enumerate(response["context"]):
280
+ # # print(doc)
281
+ # # st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
282
+ # st.write(doc)
283
+ # st.write(f"Source Document # {i+1} : {doc.metadata['source'].split('/')[-1]}")
284
+ #
285
+ #
286
+ # st.write(doc.page_content)
287
+ # st.write("--------------------------------")
288
+ #
289
+ #st.write("---------------------------------")
290
+
291
+
292
+ # ZIE:
293
+ # https://raw.githubusercontent.com/streamlit/llm-examples/main/Chatbot.py
294
+ # from openai import OpenAI
295
+ # import streamlit as st
296
+
297
+ with st.sidebar:
298
+ openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
299
+ "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
300
+ "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
301
+ "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
302
+
303
+ st.title("💬 Chatbot")
304
+ st.caption("🚀 A streamlit chatbot powered by mixtral-8x7b-32768 Groq LLM (VERY FAST !)")
305
+ if "messages" not in st.session_state:
306
+ st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
307
+
308
+ for msg in st.session_state.messages:
309
+ st.chat_message(msg["role"]).write(msg["content"])
310
+
311
+ if prompt := st.chat_input():
312
+ #if not openai_api_key:
313
+ # st.info("Please add your OpenAI API key to continue.")
314
+ # st.stop()
315
 
 
 
316
  # Then pass the prompt to the LLM
317
  start = time.process_time()
318
  response = retrieval_chain.invoke({"input": prompt})
 
321
 
322
  st.write(response["answer"])
323
 
324
+
325
+ #client = OpenAI(api_key=openai_api_key)
326
+ st.session_state.messages.append({"role": "user", "content": prompt})
327
+ st.chat_message("user").write(prompt)
328
+ # response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
329
+ # msg = response.choices[0].message.content
330
+ msg = response["answer"]
331
+ st.session_state.messages.append({"role": "assistant", "content": msg})
332
+ st.chat_message("assistant").write(msg)
333
+
334
  # With a streamlit expander
335
  with st.expander("Document Similarity Search"):
336
  # Find the relevant chunks
 
348
 
349
 
350
 
 
 
 
 
 
 
351
  #i=0
352
  #while True:
353
  #