3v324v23 commited on
Commit
23c5a77
1 Parent(s): acaf8cd

修正一些细节

Browse files
Files changed (4) hide show
  1. config.py +1 -1
  2. main.py +2 -2
  3. request_llm/bridge_chatgpt.py +7 -5
  4. version +1 -1
config.py CHANGED
@@ -45,7 +45,7 @@ MAX_RETRY = 2
45
 
46
  # OpenAI模型选择是(gpt4现在只对申请成功的人开放)
47
  LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
48
- AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "chatglm", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
49
 
50
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
51
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
 
45
 
46
  # OpenAI模型选择是(gpt4现在只对申请成功的人开放)
47
  LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
48
+ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm"]
49
 
50
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
51
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
main.py CHANGED
@@ -95,13 +95,13 @@ def main():
95
  with gr.Row():
96
  with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
97
  file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
98
- with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
99
  system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
100
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
101
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
102
  max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
103
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
104
- md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="").style(container=False)
105
 
106
  gr.Markdown(description)
107
  with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
 
95
  with gr.Row():
96
  with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
97
  file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
98
+ with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")):
99
  system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
100
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
101
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
102
  max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
103
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
104
+ md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
105
 
106
  gr.Markdown(description)
107
  with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
request_llm/bridge_chatgpt.py CHANGED
@@ -175,15 +175,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
175
 
176
  if chunk:
177
  try:
178
- if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
 
 
179
  # 判定为数据流的结束,gpt_replying_buffer也写完了
180
  logging.info(f'[response] {gpt_replying_buffer}')
181
  break
182
  # 处理数据流的主体
183
- chunkjson = json.loads(chunk.decode()[6:])
184
  status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
185
  # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
186
- gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
187
  history[-1] = gpt_replying_buffer
188
  chatbot[-1] = (history[-2], history[-1])
189
  yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
@@ -192,7 +194,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
192
  traceback.print_exc()
193
  yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
194
  chunk = get_full_error(chunk, stream_response)
195
- error_msg = chunk.decode()
196
  if "reduce the length" in error_msg:
197
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现已释放,您可以请再次尝试.")
198
  history = [] # 清除历史
@@ -205,7 +207,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
205
  else:
206
  from toolbox import regular_txt_to_markdown
207
  tb_str = '```\n' + traceback.format_exc() + '```'
208
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
209
  yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
210
  return
211
 
 
175
 
176
  if chunk:
177
  try:
178
+ chunk_decoded = chunk.decode()
179
+ # 前者API2D的
180
+ if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
181
  # 判定为数据流的结束,gpt_replying_buffer也写完了
182
  logging.info(f'[response] {gpt_replying_buffer}')
183
  break
184
  # 处理数据流的主体
185
+ chunkjson = json.loads(chunk_decoded[6:])
186
  status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
187
  # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
188
+ gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
189
  history[-1] = gpt_replying_buffer
190
  chatbot[-1] = (history[-2], history[-1])
191
  yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
 
194
  traceback.print_exc()
195
  yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
196
  chunk = get_full_error(chunk, stream_response)
197
+ error_msg = chunk_decoded
198
  if "reduce the length" in error_msg:
199
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现已释放,您可以请再次尝试.")
200
  history = [] # 清除历史
 
207
  else:
208
  from toolbox import regular_txt_to_markdown
209
  tb_str = '```\n' + traceback.format_exc() + '```'
210
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded[4:])}")
211
  yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
212
  return
213
 
version CHANGED
@@ -1,5 +1,5 @@
1
  {
2
  "version": 3.1,
3
  "show_feature": true,
4
- "new_feature": "添加支持清华ChatGLM和GPT-4 <-> 添加支持API2D(国内不需要代理) <-> 改进架构,支持与多个LLM模型同时对话 <-> 支持多API-KEY负载均衡(并列填写,逗号分割) <-> 添加输入区文本清除按键"
5
  }
 
1
  {
2
  "version": 3.1,
3
  "show_feature": true,
4
+ "new_feature": "添加支持清华ChatGLM和GPT-4 <-> 改进架构,支持与多个LLM模型同时对话 <-> 添加支持API2D(国内,可支持gpt4)<-> 支持多API-KEY负载均衡(并列填写,逗号分割) <-> 添加输入区文本清除按键"
5
  }