jclian91 commited on
Commit
bf18fd7
·
verified ·
1 Parent(s): 42edbdd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -56
app.py CHANGED
@@ -1,63 +1,80 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
  )
60
 
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
1
+ # -*- coding: utf-8 -*-
2
  import gradio as gr
3
+ import os
4
+ import re
5
+ import json
6
+ import subprocess
7
+ from openai import OpenAI
8
+ from retry import retry
9
+ from random import choices
10
+ from datetime import datetime
11
 
12
+ os.environ["OPENAI_BASE_URL"] = "http://117.50.185.39:50080/v1"
13
+ os.environ["OPENAI_API_KEY"] = "0"
14
+ client = OpenAI()
15
+ execution_desc = ["运行以上代码,输出会是: ",
16
+ "现在将上面的代码复制到Python环境中运行,运行结果为:",
17
+ "执行上述Python代码,运行结果将是:",
18
+ "上面的Python代码执行结果为:",
19
+ "运行上述代码,我们可以得到题目要求的答案。输出结果将是:"]
20
 
21
+ @retry(exceptions=Exception, tries=3, delay=2)
22
+ def question_answer(query):
23
+ g = open("collect.json", "a", encoding="utf-8")
24
+ messages = [{"role": "system", "content": "你是一个数学解题大师,请解决以下数学题,务必详细说明解题思路,并在必要时提供Python代码来支持你的推理。答案中的数值应使用\\boxed{}包围,最后的答案以“因此”开头并直接给出结论,不要添加任何多余的内容。"}]
25
+ messages.append({"role": "user", "content": f"题目:{query}"})
26
+ result = client.chat.completions.create(messages=messages,
27
+ model="gpt-3.5-turbo",
28
+ temperature=0.2,
29
+ stream=True)
30
+ reply_message = ""
31
+ for chunk in result:
32
+ if hasattr(chunk, "choices") and chunk.choices[0].delta.content:
33
+ reply_message += chunk.choices[0].delta.content
34
+
35
+ # find python code and execute the code
36
+ if '```python' in reply_message:
37
+ reply_message = '```'.join(reply_message.split('```')[:-1]).replace('```python', '\n```python') + '```'
38
+ messages.append({"role": "assistant", "content": reply_message})
39
+ python_code_string = re.findall(r'```python\n(.*?)\n```', reply_message, re.S)[0]
40
+ python_file_path = 'temp.py'
41
+ with open(python_file_path, 'w') as f:
42
+ f.write(python_code_string)
43
+ python_code_run = subprocess.run(['python3', python_file_path], stdout=subprocess.PIPE, timeout=10)
44
+ if python_code_run.returncode:
45
+ print("生成的Python代码无法运行!")
46
+ raise RuntimeError("生成的Python代码无法运行!")
47
+ python_code_execution = python_code_run.stdout.decode('utf-8')
48
+ os.remove(python_file_path)
49
+ if "``````" in python_code_execution:
50
+ raise ValueError("执行Python代码结果为空!")
51
+ code_reply_str = choices(execution_desc, k=1)[0]
52
+ code_reply = f"\n{code_reply_str}```{python_code_execution.strip()}```\n"
53
+ reply_message += code_reply
54
+ # yield reply_message
55
+ messages.append({"role": "user", "content": code_reply})
56
+ result = client.chat.completions.create(messages=messages,
57
+ model="gpt-3.5-turbo",
58
+ temperature=0.2,
59
+ stream=True)
60
+
61
+ for chunk in result:
62
+ if hasattr(chunk, "choices") and chunk.choices[0].delta.content:
63
+ reply_message += chunk.choices[0].delta.content
64
+ # yield reply_message
65
+ print(reply_message)
66
+ g.write(json.dumps({"query": query,
67
+ "answer": reply_message,
68
+ "time": datetime.now().strftime('%Y-%m-%d %H:%M:%S %f')
69
+ }, ensure_ascii=False)+"\n")
70
+ g.close()
71
+ return reply_message
72
 
 
 
 
 
 
 
 
 
 
73
 
74
+ demo = gr.Interface(
75
+ fn=question_answer,
76
+ inputs=gr.Textbox(lines=3, placeholder="题目", label="数学题目"),
77
+ outputs=gr.Markdown(),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  )
79
 
80
+ demo.launch(server_name="0.0.0.0", server_port=8001, share=True)