grantjw commited on
Commit
c334e8e
·
verified ·
1 Parent(s): d93836f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -80
app.py CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
2
  import io
3
  import fitz
4
  import requests
 
5
  from langchain.llms import LlamaCpp
6
  from langchain.callbacks.base import BaseCallbackHandler
7
  from langchain.docstore.document import Document
@@ -65,7 +66,7 @@ def create_chain(_retriever):
65
  # responses in real time.
66
  # callback_manager = CallbackManager([stream_handler])
67
  (repo_id, model_file_name) = ("TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
68
- "mistral-7b-instruct-v0.1.Q5_0.gguf")
69
 
70
  model_path = hf_hub_download(repo_id=repo_id,
71
  filename=model_file_name,
@@ -77,11 +78,12 @@ def create_chain(_retriever):
77
  llm = LlamaCpp(
78
  model_path=model_path,
79
  n_batch=n_batch,
80
- n_ctx=2048,
81
  max_tokens=2048,
82
- temperature=0,
83
  # callback_manager=callback_manager,
84
- verbose=False,
 
85
  streaming=True,
86
  )
87
 
@@ -102,85 +104,48 @@ def create_chain(_retriever):
102
  return qa_chain
103
 
104
 
105
- # Set the webpage title
106
- st.set_page_config(
107
- page_title="Youtube Aesop Product Reviewer"
108
- )
109
 
110
- # Create a header element
111
- st.header("Youtube Aesop Product Reviewer")
 
112
 
113
- #
114
- system_prompt = st.text_area(
115
- label="System Prompt",
116
- value="""
117
- DOCUMENT:
118
- (document text)
119
 
120
- QUESTION:
121
- (users question)
122
-
123
- INSTRUCTIONS:
124
- Answer the users QUESTION using the DOCUMENT text above.
125
- Keep your answer ground in the facts of the DOCUMENT.
126
- If the DOCUMENT doesn’t contain the facts to answer the QUESTION return {NONE}""",
127
- key="system_prompt"
128
- )
129
 
130
- if "base_url" not in st.session_state:
131
- st.session_state.base_url = ""
 
 
132
 
133
- base_url = st.text_input("Enter url for data here. For Aesop product review copy & paste: https://raw.githubusercontent.com/grantjw/product_chatbot_rag/main/data/output_transcripts.csv", key="base_url")
 
 
134
 
135
- if st.session_state.base_url != "":
136
-
137
- retriever = get_retriever(base_url)
138
-
139
- # We store the conversation in the session state.
140
- # This will be used to render the chat conversation.
141
- # We initialize it with the first message we want to be greeted with.
142
- if "messages" not in st.session_state:
143
- st.session_state.messages = [
144
- {"role": "assistant", "content": "How may I help you today?"}
145
- ]
146
-
147
- if "current_response" not in st.session_state:
148
- st.session_state.current_response = ""
149
-
150
- # We loop through each message in the session state and render it as
151
- # a chat message.
152
- for message in st.session_state.messages:
153
- with st.chat_message(message["role"]):
154
- st.markdown(message["content"])
155
-
156
- # We initialize the quantized LLM from a local path.
157
- # Currently most parameters are fixed but we can make them
158
- # configurable.
159
- llm_chain = create_chain(retriever)
160
-
161
- # We take questions/instructions from the chat input to pass to the LLM
162
- if user_prompt := st.chat_input("Your message here", key="user_input"):
163
-
164
- # Add our input to the session state
165
- st.session_state.messages.append(
166
- {"role": "user", "content": user_prompt}
167
- )
168
-
169
- # Add our input to the chat window
170
- with st.chat_message("user"):
171
- st.markdown(user_prompt)
172
-
173
- # Pass our input to the llm chain and capture the final responses.
174
- # It is worth noting that the Stream Handler is already receiving the
175
- # streaming response as the llm is generating. We get our response
176
- # here once the llm has finished generating the complete response.
177
- response = llm_chain.run(user_prompt)
178
-
179
- # Add the response to the session state
180
- st.session_state.messages.append(
181
- {"role": "assistant", "content": response}
182
- )
183
-
184
- # Add the response to the chat window
185
- with st.chat_message("assistant"):
186
- st.markdown(response)
 
2
  import io
3
  import fitz
4
  import requests
5
+ from streamlit_chat import message
6
  from langchain.llms import LlamaCpp
7
  from langchain.callbacks.base import BaseCallbackHandler
8
  from langchain.docstore.document import Document
 
66
  # responses in real time.
67
  # callback_manager = CallbackManager([stream_handler])
68
  (repo_id, model_file_name) = ("TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
69
+ "mistral-7b-instruct-v0.1.Q4_K_M.gguf")
70
 
71
  model_path = hf_hub_download(repo_id=repo_id,
72
  filename=model_file_name,
 
78
  llm = LlamaCpp(
79
  model_path=model_path,
80
  n_batch=n_batch,
81
+ n_ctx=4096,
82
  max_tokens=2048,
83
+ temperature=.33,
84
  # callback_manager=callback_manager,
85
+ top_p=1,
86
+ verbose=True,
87
  streaming=True,
88
  )
89
 
 
104
  return qa_chain
105
 
106
 
 
 
 
 
107
 
108
+ def initialize_session_state():
109
+ if 'history' not in st.session_state:
110
+ st.session_state['history'] = []
111
 
112
+ if 'generated' not in st.session_state:
113
+ st.session_state['generated'] = ["Hi, I know what Youtubers said about Aesop's products. Ask me!"]
 
 
 
 
114
 
115
+ if 'past' not in st.session_state:
116
+ st.session_state['past'] = ["Hey! 👋"]
 
 
 
 
 
 
 
117
 
118
+ def conversation_chat(query, chain, history):
119
+ result = chain({"question": query, "chat_history": history})
120
+ history.append((query, result["answer"]))
121
+ return result["answer"]
122
 
123
+ def display_chat_history(chain):
124
+ reply_container = st.container()
125
+ container = st.container()
126
 
127
+ with container:
128
+ with st.form(key='my_form', clear_on_submit=True):
129
+ user_input = st.text_input("Question:", placeholder=" ", key='input')
130
+ submit_button = st.form_submit_button(label='Send')
131
+
132
+ if submit_button and user_input:
133
+ with st.spinner('Generating response...'):
134
+ output = conversation_chat(user_input, chain, st.session_state['history'])
135
+
136
+ st.session_state['past'].append(user_input)
137
+ st.session_state['generated'].append(output)
138
+
139
+ if st.session_state['generated']:
140
+ with reply_container:
141
+ for i in range(len(st.session_state['generated'])):
142
+ message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
143
+ message(st.session_state["generated"][i], key=str(i), avatar_style="shapes")
144
+
145
+ base_url = "https://raw.githubusercontent.com/grantjw/product_chatbot_rag/main/data/output_transcripts.csv"
146
+ retriever = get_retriever(base_url)
147
+ llm_chain = create_chain(retriever)
148
+ initialize_session_state()
149
+ st.title("Aesop Product Reviewer from YouTube Reviews")
150
+ st.image("aesop.png", width=550)
151
+ display_chat_history(llm_chain)