3v324v23 commited on
Commit
781ef44
1 Parent(s): 4a49435

修复一些细节

Browse files
request_llm/bridge_all.py CHANGED
@@ -192,7 +192,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
192
 
193
  def mutex_manager(window_mutex, observe_window):
194
  while True:
195
- time.sleep(0.5)
196
  if not window_mutex[-1]: break
197
  # 看门狗(watchdog)
198
  for i in range(n_model):
 
192
 
193
  def mutex_manager(window_mutex, observe_window):
194
  while True:
195
+ time.sleep(0.25)
196
  if not window_mutex[-1]: break
197
  # 看门狗(watchdog)
198
  for i in range(n_model):
request_llm/bridge_newbing.py CHANGED
@@ -406,7 +406,7 @@ class Chatbot:
406
 
407
 
408
 
409
- load_message = ""
410
 
411
  """
412
  ========================================================================
@@ -574,13 +574,16 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
574
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
575
  response = ""
576
  for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
577
- observe_window[0] = response
578
  if len(observe_window) >= 2:
579
  if (time.time()-observe_window[1]) > watch_dog_patience:
580
  raise RuntimeError("程序终止。")
581
- return response
582
-
583
 
 
 
 
 
584
 
585
  def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
586
  """
@@ -609,6 +612,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
609
  for i in range(len(history)//2):
610
  history_feedin.append([history[2*i], history[2*i+1]] )
611
 
 
612
  yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
613
  for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
614
  chatbot[-1] = (inputs, preprocess_newbing_out(response))
 
406
 
407
 
408
 
409
+ load_message = "等待NewBing响应。"
410
 
411
  """
412
  ========================================================================
 
574
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
575
  response = ""
576
  for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
577
+ observe_window[0] = preprocess_newbing_out_simple(response)
578
  if len(observe_window) >= 2:
579
  if (time.time()-observe_window[1]) > watch_dog_patience:
580
  raise RuntimeError("程序终止。")
581
+ return preprocess_newbing_out_simple(response)
 
582
 
583
+ def preprocess_newbing_out_simple(result):
584
+ if '[1]' in result:
585
+ result += '\n\n```\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
586
+ return result
587
 
588
  def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
589
  """
 
612
  for i in range(len(history)//2):
613
  history_feedin.append([history[2*i], history[2*i+1]] )
614
 
615
+ chatbot[-1] = (inputs, preprocess_newbing_out(response))
616
  yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
617
  for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
618
  chatbot[-1] = (inputs, preprocess_newbing_out(response))