3v324v23 commited on
Commit
5b9de09
1 Parent(s): 01265c5

+异常处理

Browse files
Files changed (3) hide show
  1. main.py +4 -3
  2. predict.py +6 -4
  3. toolbox.py +21 -2
main.py CHANGED
@@ -12,7 +12,8 @@ PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
12
  if not AUTHENTICATION: AUTHENTICATION = None
13
 
14
  initial_prompt = "Serve me as a writing and programming assistant."
15
- title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
 
16
 
17
  # 问询记录, python 版本建议3.9+(越新越好)
18
  import logging
@@ -78,12 +79,12 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de
78
  with gr.Row():
79
  with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
80
  file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
81
- with gr.Accordion("展开SysPrompt & GPT参数 & 交互界面布局", open=False):
82
  system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
83
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
84
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
85
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
86
-
87
  # 功能区显示开关与功能区的互动
88
  def fn_area_visibility(a):
89
  ret = {}
 
12
  if not AUTHENTICATION: AUTHENTICATION = None
13
 
14
  initial_prompt = "Serve me as a writing and programming assistant."
15
+ title_html = "<h1 align=\"center\">ChatGPT 学术优化</h1>"
16
+ description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
17
 
18
  # 问询记录, python 版本建议3.9+(越新越好)
19
  import logging
 
79
  with gr.Row():
80
  with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
81
  file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
82
+ with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=False):
83
  system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
84
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
85
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
86
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
87
+ gr.Markdown(description)
88
  # 功能区显示开关与功能区的互动
89
  def fn_area_visibility(a):
90
  ret = {}
predict.py CHANGED
@@ -186,14 +186,16 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
186
  error_msg = chunk.decode()
187
  if "reduce the length" in error_msg:
188
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Input (or history) is too long, please reduce input or clear history by refreshing this page.")
189
- history = []
190
  elif "Incorrect API key" in error_msg:
191
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key provided.")
 
 
192
  else:
193
  from toolbox import regular_txt_to_markdown
194
- tb_str = regular_txt_to_markdown(traceback.format_exc())
195
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] Json Error \n\n {tb_str} \n\n {regular_txt_to_markdown(chunk.decode()[4:])}")
196
- yield chatbot, history, "Json解析不合常规" + error_msg
197
  return
198
 
199
  def generate_payload(inputs, top_p, temperature, history, system_prompt, stream):
 
186
  error_msg = chunk.decode()
187
  if "reduce the length" in error_msg:
188
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Input (or history) is too long, please reduce input or clear history by refreshing this page.")
189
+ history = [] # 清除历史
190
  elif "Incorrect API key" in error_msg:
191
  chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key provided.")
192
+ elif "exceeded your current quota" in error_msg:
193
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由,拒绝服务.")
194
  else:
195
  from toolbox import regular_txt_to_markdown
196
+ tb_str = '```\n' + traceback.format_exc() + '```'
197
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
198
+ yield chatbot, history, "Json异常" + error_msg
199
  return
200
 
201
  def generate_payload(inputs, top_p, temperature, history, system_prompt, stream):
toolbox.py CHANGED
@@ -115,8 +115,9 @@ def CatchException(f):
115
  from check_proxy import check_proxy
116
  from toolbox import get_conf
117
  proxies, = get_conf('proxies')
118
- tb_str = regular_txt_to_markdown(traceback.format_exc())
119
- chatbot[-1] = (chatbot[-1][0], f"[Local Message] 实验性函数调用出错: \n\n {tb_str} \n\n 当前代理可用性: \n\n {check_proxy(proxies)}")
 
120
  yield chatbot, history, f'异常 {e}'
121
  return decorated
122
 
@@ -164,6 +165,23 @@ def markdown_convertion(txt):
164
  else:
165
  return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + suf
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  def format_io(self, y):
169
  """
@@ -172,6 +190,7 @@ def format_io(self, y):
172
  if y is None or y == []: return []
173
  i_ask, gpt_reply = y[-1]
174
  i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
 
175
  y[-1] = (
176
  None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']),
177
  None if gpt_reply is None else markdown_convertion(gpt_reply)
 
115
  from check_proxy import check_proxy
116
  from toolbox import get_conf
117
  proxies, = get_conf('proxies')
118
+ tb_str = '```\n' + traceback.format_exc() + '```'
119
+ if len(chatbot) == 0: chatbot.append(["插件调度异常","异常原因"])
120
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
121
  yield chatbot, history, f'异常 {e}'
122
  return decorated
123
 
 
165
  else:
166
  return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + suf
167
 
168
+ def close_up_code_segment_during_stream(gpt_reply):
169
+ """
170
+ 在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
171
+ """
172
+ if '```' not in gpt_reply: return gpt_reply
173
+ if gpt_reply.endswith('```'): return gpt_reply
174
+
175
+ # 排除了以上两个情况,我们
176
+ segments = gpt_reply.split('```')
177
+ n_mark = len(segments) - 1
178
+ if n_mark % 2 == 1:
179
+ print('输出代码片段中!')
180
+ return gpt_reply+'\n```'
181
+ else:
182
+ return gpt_reply
183
+
184
+
185
 
186
  def format_io(self, y):
187
  """
 
190
  if y is None or y == []: return []
191
  i_ask, gpt_reply = y[-1]
192
  i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
193
+ gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
194
  y[-1] = (
195
  None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']),
196
  None if gpt_reply is None else markdown_convertion(gpt_reply)