SiraH commited on
Commit
6263ce1
1 Parent(s): addae57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -160,8 +160,8 @@ class UploadDoc:
160
 
161
  return documents
162
 
163
- def split_docs(documents,chunk_size=500):
164
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=100)
165
  sp_docs = text_splitter.split_documents(documents)
166
  return sp_docs
167
 
@@ -169,14 +169,14 @@ def split_docs(documents,chunk_size=500):
169
  def load_llama2_llamaCpp():
170
  core_model_name = "llama-2-7b-chat.Q4_0.gguf"
171
  #n_gpu_layers = 32
172
- n_batch = 32
173
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
174
  llm = LlamaCpp(
175
  model_path=core_model_name,
176
  #n_gpu_layers=n_gpu_layers,
177
  n_batch=n_batch,
178
  callback_manager=callback_manager,
179
- verbose=True,n_ctx = 1024, temperature = 0.1, max_tokens = 256
180
  )
181
  return llm
182
 
@@ -281,7 +281,7 @@ def main():
281
 
282
  response = qa_chain({'query': query})
283
 
284
- # url_list = set([i.metadata['source'] for i in response['source_documents']])
285
  #print(f"condensed quesion : {question_generator.run({'chat_history': response['chat_history'], 'question' : query})}")
286
 
287
  with st.chat_message("assistant"):
@@ -294,10 +294,10 @@ def main():
294
  # Add assistant response to chat history
295
  st.session_state.messages.append({"role": "assistant", "content": response['result']})
296
 
297
- # with st.expander("See the related documents"):
298
- # for count, url in enumerate(url_list):
299
- # #url_reg = regex_source(url)
300
- # st.write(str(count+1)+":", url)
301
 
302
  clear_button = st.button("Start new convo")
303
  if clear_button :
 
160
 
161
  return documents
162
 
163
+ def split_docs(documents,chunk_size=1000):
164
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=200)
165
  sp_docs = text_splitter.split_documents(documents)
166
  return sp_docs
167
 
 
169
  def load_llama2_llamaCpp():
170
  core_model_name = "llama-2-7b-chat.Q4_0.gguf"
171
  #n_gpu_layers = 32
172
+ n_batch = 512
173
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
174
  llm = LlamaCpp(
175
  model_path=core_model_name,
176
  #n_gpu_layers=n_gpu_layers,
177
  n_batch=n_batch,
178
  callback_manager=callback_manager,
179
+ verbose=True,n_ctx = 4096, temperature = 0.1, max_tokens = 512
180
  )
181
  return llm
182
 
 
281
 
282
  response = qa_chain({'query': query})
283
 
284
+ #url_list = set([i.metadata['page'] for i in response['source_documents']])
285
  #print(f"condensed quesion : {question_generator.run({'chat_history': response['chat_history'], 'question' : query})}")
286
 
287
  with st.chat_message("assistant"):
 
294
  # Add assistant response to chat history
295
  st.session_state.messages.append({"role": "assistant", "content": response['result']})
296
 
297
+ with st.expander("See the related documents"):
298
+ for count, url in enumerate(response['source_documents']):
299
+ #url_reg = regex_source(url)
300
+ st.write(str(count+1)+":", url)
301
 
302
  clear_button = st.button("Start new convo")
303
  if clear_button :