JasperV13 commited on
Commit
c4285cd
·
1 Parent(s): b82c865

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -28
app.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  from gradio_client import Client
3
  from langchain.document_loaders.text import TextLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
@@ -14,6 +13,7 @@ from langchain.chains import ConversationalRetrievalChain
14
  from huggingface_hub import hf_hub_download
15
  from langchain.llms import LlamaCpp
16
  from langchain.chains import LLMChain
 
17
  import time
18
  import streamlit as st
19
 
@@ -111,7 +111,7 @@ qa = ConversationalRetrievalChain.from_llm(
111
  return_source_documents=True,
112
  verbose=False,
113
  )
114
- def translate(text, source, target):
115
  client = Client("https://facebook-seamless-m4t-v2-large.hf.space/--replicas/2bmbx/")
116
  result = client.predict(
117
  text,
@@ -164,45 +164,36 @@ def clear_chat_history():
164
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
165
  selected_language = st.sidebar.selectbox("Select Language", ["English", "Darija"], index=0) # English is the default
166
 
167
-
168
  # Function for generating LLaMA2 response
169
  def generate_llm_response(prompt_input):
170
  res = qa(f'''{prompt_input}''')
171
 
172
  if selected_language == "Darija":
173
- # Translate the model's response from English to Darija
174
- translated_response = translate(res['answer'], "English", "Moroccan Darija")
175
  return translated_response
176
  else:
177
  return res['answer']
178
 
179
  # User-provided prompt
180
  if prompt := st.chat_input("What is up?"):
181
- # Translate the user's input from Darija to English
182
- #if selected_language == "Darija":
183
- # translated_prompt = translate(prompt, "Moroccan Darija", "English")
184
- # else:
185
- translated_prompt = prompt
186
-
187
- st.session_state.messages.append({"role": "user", "content": translated_prompt})
188
  with st.chat_message("user", avatar="user.png"):
189
- st.write(translated_prompt)
190
 
191
- # Generate a new response if the last message is not from the assistant
192
  if st.session_state.messages[-1]["role"] != "assistant":
193
- with st.chat_message("assistant", avatar="logo.png"):
194
- with st.spinner("Thinking..."):
195
- response = generate_llm_response(st.session_state.messages[-1]["content"])
196
- translated_response = response # No need to translate back to Darija for display
197
- placeholder = st.empty()
198
- full_response = ''
199
- for item in translated_response:
200
- full_response += item
201
- placeholder.markdown(full_response)
202
- time.sleep(0.05)
203
- placeholder.markdown(full_response)
204
- message = {"role": "assistant", "content": full_response}
205
- st.session_state.messages.append(message)
206
 
207
  # Example prompt
208
  with st.sidebar :
@@ -231,4 +222,4 @@ st.sidebar.button('What is the estimated amount of money I need to start my comp
231
  with st.sidebar:
232
  st.title('Disclaimer ⚠️:')
233
  st.markdown('may introduce false information')
234
- st.markdown('consult with a preofessionel advisor for more specific problems')
 
 
1
  from gradio_client import Client
2
  from langchain.document_loaders.text import TextLoader
3
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
13
  from huggingface_hub import hf_hub_download
14
  from langchain.llms import LlamaCpp
15
  from langchain.chains import LLMChain
16
+
17
  import time
18
  import streamlit as st
19
 
 
111
  return_source_documents=True,
112
  verbose=False,
113
  )
114
+ def translate(text, source="English", target="Moroccan Arabic"):
115
  client = Client("https://facebook-seamless-m4t-v2-large.hf.space/--replicas/2bmbx/")
116
  result = client.predict(
117
  text,
 
164
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
165
  selected_language = st.sidebar.selectbox("Select Language", ["English", "Darija"], index=0) # English is the default
166
 
 
167
  # Function for generating LLaMA2 response
168
  def generate_llm_response(prompt_input):
169
  res = qa(f'''{prompt_input}''')
170
 
171
  if selected_language == "Darija":
172
+ translated_response = translate(res['answer'])
 
173
  return translated_response
174
  else:
175
  return res['answer']
176
 
177
  # User-provided prompt
178
  if prompt := st.chat_input("What is up?"):
179
+ st.session_state.messages.append({"role": "user", "content": prompt})
 
 
 
 
 
 
180
  with st.chat_message("user", avatar="user.png"):
181
+ st.write(prompt)
182
 
183
+ # Generate a new response if last message is not from assistant
184
  if st.session_state.messages[-1]["role"] != "assistant":
185
+ with st.chat_message("assistant", avatar="logo.png"):
186
+ with st.spinner("Thinking..."):
187
+ response = generate_llm_response(st.session_state.messages[-1]["content"])
188
+ placeholder = st.empty()
189
+ full_response = ''
190
+ for item in response:
191
+ full_response += item
192
+ placeholder.markdown(full_response)
193
+ time.sleep(0.05)
194
+ placeholder.markdown(full_response)
195
+ message = {"role": "assistant", "content": full_response}
196
+ st.session_state.messages.append(message)
 
197
 
198
  # Example prompt
199
  with st.sidebar :
 
222
  with st.sidebar:
223
  st.title('Disclaimer ⚠️:')
224
  st.markdown('may introduce false information')
225
+ st.markdown('consult with a preofessionel advisor for more specific problems')