Upload app.py
Browse files
app.py
CHANGED
@@ -3,17 +3,17 @@ import os
|
|
3 |
import json
|
4 |
import requests
|
5 |
|
6 |
-
#
|
7 |
-
API_URL = "https://ai.fakeopen.com/v1/chat/completions"
|
8 |
|
9 |
-
#
|
10 |
def predict(openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
|
11 |
|
12 |
headers = {
|
13 |
"Content-Type": "application/json",
|
14 |
-
"Authorization": f"Bearer {openai_gpt4_key}" #
|
15 |
}
|
16 |
-
print(f"
|
17 |
if system_msg.strip() == '':
|
18 |
initial_message = [{"role": "user", "content": f"{inputs}"},]
|
19 |
multi_turn_message = []
|
@@ -33,9 +33,9 @@ def predict(openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counte
|
|
33 |
"presence_penalty":0,
|
34 |
"frequency_penalty":0,
|
35 |
}
|
36 |
-
print(f"
|
37 |
-
else: #
|
38 |
-
messages=multi_turn_message
|
39 |
for data in chatbot:
|
40 |
user = {}
|
41 |
user["role"] = "user"
|
@@ -49,12 +49,12 @@ def predict(openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counte
|
|
49 |
temp["role"] = "user"
|
50 |
temp["content"] = inputs
|
51 |
messages.append(temp)
|
52 |
-
#
|
53 |
payload = {
|
54 |
"model": "gpt-4",
|
55 |
-
"messages": messages,
|
56 |
-
"temperature" : temperature,
|
57 |
-
"top_p": top_p,
|
58 |
"n" : 1,
|
59 |
"stream": True,
|
60 |
"presence_penalty":0,
|
@@ -63,91 +63,97 @@ def predict(openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counte
|
|
63 |
chat_counter+=1
|
64 |
|
65 |
history.append(inputs)
|
66 |
-
print(f"
|
67 |
-
#
|
68 |
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
69 |
-
print(f"
|
70 |
token_counter = 0
|
71 |
partial_words = ""
|
72 |
|
73 |
counter=0
|
74 |
for chunk in response.iter_lines():
|
75 |
-
#
|
76 |
if counter == 0:
|
77 |
counter+=1
|
78 |
continue
|
79 |
-
#
|
80 |
if chunk.decode() :
|
81 |
chunk = chunk.decode()
|
82 |
-
#
|
83 |
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
84 |
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
85 |
if token_counter == 0:
|
86 |
history.append(" " + partial_words)
|
87 |
else:
|
88 |
history[-1] = partial_words
|
89 |
-
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] #
|
90 |
token_counter+=1
|
91 |
-
yield chat, history, chat_counter, response #
|
92 |
|
93 |
-
#
|
94 |
def reset_textbox():
|
95 |
return gr.update(value='')
|
96 |
|
97 |
-
#
|
98 |
def set_visible_false():
|
99 |
return gr.update(visible=False)
|
100 |
|
101 |
-
#
|
102 |
def set_visible_true():
|
103 |
return gr.update(visible=True)
|
104 |
|
105 |
-
title = """<h1 align="center"
|
106 |
-
#
|
107 |
-
theme_addon_msg = """<center>🌟
|
108 |
"""
|
109 |
|
110 |
-
#
|
111 |
-
system_msg_info = """
|
112 |
-
系统消息有助于设置AI
|
113 |
|
114 |
-
#
|
115 |
theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green",
|
116 |
text_size=gr.themes.sizes.text_lg)
|
117 |
|
118 |
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
|
119 |
theme=theme) as demo:
|
120 |
gr.HTML(title)
|
121 |
-
gr.HTML("""<h3 align="center"
|
122 |
gr.HTML(theme_addon_msg)
|
123 |
-
gr.HTML('''<center><a href="https://huggingface.co/spaces/
|
124 |
|
125 |
with gr.Column(elem_id = "col_container"):
|
126 |
-
#
|
127 |
with gr.Row():
|
128 |
-
openai_gpt4_key = gr.Textbox(
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
with gr.Accordion(label="系统消息:", open=False):
|
130 |
-
system_msg = gr.Textbox(label="指示AI助手设置其行为", info
|
131 |
-
accordion_msg = gr.HTML(value="🚧
|
132 |
|
133 |
chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot")
|
134 |
-
inputs = gr.Textbox(placeholder=
|
135 |
state = gr.State([])
|
136 |
with gr.Row():
|
137 |
with gr.Column(scale=7):
|
138 |
b1 = gr.Button().style(full_width=True)
|
139 |
with gr.Column(scale=3):
|
140 |
-
server_status_code = gr.Textbox(label="来自OpenAI服务器的状态代码", )
|
141 |
|
142 |
-
#top_p
|
143 |
-
with gr.Accordion("
|
144 |
-
top_p = gr.Slider(
|
145 |
-
temperature = gr.Slider(
|
146 |
chat_counter = gr.Number(value=0, visible=False, precision=0)
|
147 |
|
148 |
-
#
|
149 |
-
inputs.submit(
|
150 |
-
b1.click(
|
151 |
|
152 |
inputs.submit(set_visible_false, [], [system_msg])
|
153 |
b1.click(set_visible_false, [], [system_msg])
|
@@ -157,35 +163,38 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
157 |
b1.click(reset_textbox, [], [inputs])
|
158 |
inputs.submit(reset_textbox, [], [inputs])
|
159 |
|
160 |
-
#
|
161 |
-
with gr.Accordion(label="
|
162 |
gr.Examples(
|
163 |
-
examples = [
|
164 |
-
|
165 |
-
|
166 |
-
-
|
167 |
-
-
|
168 |
-
-
|
169 |
-
|
170 |
-
["
|
171 |
-
["
|
172 |
-
["
|
173 |
-
["
|
174 |
-
["
|
175 |
-
["
|
176 |
-
["
|
177 |
-
["
|
178 |
-
["
|
179 |
-
["
|
180 |
-
["
|
181 |
-
["
|
182 |
-
["
|
183 |
-
["
|
184 |
-
["
|
185 |
-
["
|
186 |
-
["
|
187 |
-
["
|
188 |
-
["
|
|
|
|
|
|
|
189 |
inputs = system_msg,)
|
190 |
|
191 |
demo.queue(max_size=99, concurrency_count=20).launch(debug=True)
|
|
|
3 |
import json
|
4 |
import requests
|
5 |
|
6 |
+
# 流式端点
|
7 |
+
API_URL = "https://ai.fakeopen.com/v1/chat/completions" # 用户需要提供自己的 OPENAI_API_KEY
|
8 |
|
9 |
+
# 推断函数
|
10 |
def predict(openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
|
11 |
|
12 |
headers = {
|
13 |
"Content-Type": "application/json",
|
14 |
+
"Authorization": f"Bearer {openai_gpt4_key}" # 用户将提供自己的 OPENAI_API_KEY
|
15 |
}
|
16 |
+
print(f"系统消息是 ^^ {system_msg}")
|
17 |
if system_msg.strip() == '':
|
18 |
initial_message = [{"role": "user", "content": f"{inputs}"},]
|
19 |
multi_turn_message = []
|
|
|
33 |
"presence_penalty":0,
|
34 |
"frequency_penalty":0,
|
35 |
}
|
36 |
+
print(f"聊天计数器 - {chat_counter}")
|
37 |
+
else: # 如果 chat_counter 不等于 0:
|
38 |
+
messages=multi_turn_message # 类型为 - [{"role": "system", "content": system_msg},]
|
39 |
for data in chatbot:
|
40 |
user = {}
|
41 |
user["role"] = "user"
|
|
|
49 |
temp["role"] = "user"
|
50 |
temp["content"] = inputs
|
51 |
messages.append(temp)
|
52 |
+
# 消息
|
53 |
payload = {
|
54 |
"model": "gpt-4",
|
55 |
+
"messages": messages, # 类型为 [{"role": "user", "content": f"{inputs}"}],
|
56 |
+
"temperature" : temperature, # 1.0,
|
57 |
+
"top_p": top_p, # 1.0,
|
58 |
"n" : 1,
|
59 |
"stream": True,
|
60 |
"presence_penalty":0,
|
|
|
63 |
chat_counter+=1
|
64 |
|
65 |
history.append(inputs)
|
66 |
+
print(f"日志记录:负载为 - {payload}")
|
67 |
+
# 使用 requests.post 方法向 API 端点发出 POST 请求,传递 stream=True
|
68 |
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
69 |
+
print(f"日志记录:响应代码 - {response}")
|
70 |
token_counter = 0
|
71 |
partial_words = ""
|
72 |
|
73 |
counter=0
|
74 |
for chunk in response.iter_lines():
|
75 |
+
# 跳过第一个块
|
76 |
if counter == 0:
|
77 |
counter+=1
|
78 |
continue
|
79 |
+
# 检查每行是否非空
|
80 |
if chunk.decode() :
|
81 |
chunk = chunk.decode()
|
82 |
+
# 将每行解码为响应数据,因为响应数据是以字节形式返回的
|
83 |
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
84 |
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
85 |
if token_counter == 0:
|
86 |
history.append(" " + partial_words)
|
87 |
else:
|
88 |
history[-1] = partial_words
|
89 |
+
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # 转换为列表的元组
|
90 |
token_counter+=1
|
91 |
+
yield chat, history, chat_counter, response # 类似于 {chatbot: chat, state: history}
|
92 |
|
93 |
+
# 重置文本框
|
94 |
def reset_textbox():
|
95 |
return gr.update(value='')
|
96 |
|
97 |
+
# 将组件设置为 visible=False
|
98 |
def set_visible_false():
|
99 |
return gr.update(visible=False)
|
100 |
|
101 |
+
# 将组件设置为 visible=True
|
102 |
def set_visible_true():
|
103 |
return gr.update(visible=True)
|
104 |
|
105 |
+
title = """<h1 align="center">🔥 使用 Chat-Completions API 和 🚀 Gradio-Streaming 的 GPT4</h1>"""
|
106 |
+
# 主题功能的显示消息
|
107 |
+
theme_addon_msg = """<center>🌟 这个演示还向你介绍了 Gradio 主题。在 Gradio 网站上使用我们的 <a href="https://gradio.app/theming-guide/" target="_blank">主题指南🎨</a>,了解更多吧!你可以从头开始开发,修改现有的 Gradio 主题,并通过简单地上传到 huggingface-hub 来与社区分享你的主题。<code>theme.push_to_hub()</code>。</center>
|
108 |
"""
|
109 |
|
110 |
+
# 使用信息添加有关 GPT4 系统消息的其他信息
|
111 |
+
system_msg_info = """对话可以从系统消息开始,以轻松地指导助手的行为。
|
112 |
+
系统消息有助于设置 AI 助手的行为。例如,可以用 '你是一个有帮助的助手。' 来指示助手。"""
|
113 |
|
114 |
+
# 修改现有的 Gradio 主题
|
115 |
theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green",
|
116 |
text_size=gr.themes.sizes.text_lg)
|
117 |
|
118 |
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
|
119 |
theme=theme) as demo:
|
120 |
gr.HTML(title)
|
121 |
+
gr.HTML("""<h3 align="center">🔥 这个 Huggingface Gradio 演示为你提供了使用 GPT4 API 的访问权限,还支持系统消息。请注意,你需要提供自己的 OPENAI API 密钥以访问 GPT4 🙌</h1>""")
|
122 |
gr.HTML(theme_addon_msg)
|
123 |
+
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>复制这个 Space 并使用你的 OpenAI API 密钥安全运行</center>''')
|
124 |
|
125 |
with gr.Column(elem_id = "col_container"):
|
126 |
+
# 用户需要提供自己的 GPT4 API 密钥,不再由 Huggingface 提供
|
127 |
with gr.Row():
|
128 |
+
openai_gpt4_key = gr.Textbox(
|
129 |
+
label="OpenAI GPT4 密钥",
|
130 |
+
value="pk-this-is-a-real-free-pool-token-for-everyone",
|
131 |
+
type="password",
|
132 |
+
placeholder="sk-...",
|
133 |
+
info="您可以提供自己的 GPT4 密钥,以使此程序正常运行",
|
134 |
+
)
|
135 |
with gr.Accordion(label="系统消息:", open=False):
|
136 |
+
system_msg = gr.Textbox(label="指示 AI 助手设置其行为", info=system_msg_info, value="", placeholder="在这里输入..")
|
137 |
+
accordion_msg = gr.HTML(value="🚧 要设置系统消息,你必须刷新程序", visible=False)
|
138 |
|
139 |
chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot")
|
140 |
+
inputs = gr.Textbox(placeholder="嗨!", label="输入文本并按 Enter 键")
|
141 |
state = gr.State([])
|
142 |
with gr.Row():
|
143 |
with gr.Column(scale=7):
|
144 |
b1 = gr.Button().style(full_width=True)
|
145 |
with gr.Column(scale=3):
|
146 |
+
server_status_code = gr.Textbox(label="来自 OpenAI 服务器的状态代码", )
|
147 |
|
148 |
+
# top_p、temperature
|
149 |
+
with gr.Accordion("参数", open=False):
|
150 |
+
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (核心采样)",)
|
151 |
+
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="采样温度",)
|
152 |
chat_counter = gr.Number(value=0, visible=False, precision=0)
|
153 |
|
154 |
+
# 事件处理
|
155 |
+
inputs.submit(predict, [openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) # openai_api_key
|
156 |
+
b1.click(predict, [openai_gpt4_key, system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) # openai_api_key
|
157 |
|
158 |
inputs.submit(set_visible_false, [], [system_msg])
|
159 |
b1.click(set_visible_false, [], [system_msg])
|
|
|
163 |
b1.click(reset_textbox, [], [inputs])
|
164 |
inputs.submit(reset_textbox, [], [inputs])
|
165 |
|
166 |
+
# 示例
|
167 |
+
with gr.Accordion(label="系统消息示例:", open=False):
|
168 |
gr.Examples(
|
169 |
+
examples = [
|
170 |
+
["""你是一个 AI 编程助手。
|
171 |
+
|
172 |
+
- 仔细并准确地遵循用户的要求。
|
173 |
+
- 首先逐步思考 - 详细描述你在伪代码中要构建的计划。
|
174 |
+
- 然后将代码以单个代码块的形式输出。
|
175 |
+
- 最小化其他的散文。"""],
|
176 |
+
["你是一位幽默的助手,名叫 ComedianGPT。你的回答都带有笑话和机智的回复。"],
|
177 |
+
["你是 ChefGPT,一位乐于助人的助手,用烹饪专业知识和一点点幽默来回答问题。"],
|
178 |
+
["你是 FitnessGuruGPT,一位健身专家,以轻松的方式分享��炼技巧和动力。"],
|
179 |
+
["你是 SciFiGPT,一位科幻话题的 AI 助手,以知识和机智的方式讨论科幻话题。"],
|
180 |
+
["你是 PhilosopherGPT,一位深思熟虑的助手,以哲学的见解和一点点幽默来回应问题。"],
|
181 |
+
["你是 EcoWarriorGPT,一位乐于助人的助手,以轻松的方式分享环保建议。"],
|
182 |
+
["你是 MusicMaestroGPT,一位知识渊博的 AI,以事实和俏皮的玩笑讨论音乐和其历史。"],
|
183 |
+
["你是 SportsFanGPT,一位兴致勃勃的助手,谈论体育并分享有趣的轶事。"],
|
184 |
+
["你是 TechWhizGPT,一位精通科技的 AI,可以帮助用户解决问题并回答与设备和软件相关的问题。"],
|
185 |
+
["你是 FashionistaGPT,一位时尚专家 AI,以幽默的方式分享时尚建议和潮流趋势。"],
|
186 |
+
["你是 ArtConnoisseurGPT,一位 AI 助手,以知识和俏皮的评论讨论艺术及其历史。"],
|
187 |
+
["你是一位提供详细准确信息的乐于助人的助手。"],
|
188 |
+
["你是一位讲莎士比亚语言的助手。"],
|
189 |
+
["你是一位友好的助手,使用非正式的语言和幽默。"],
|
190 |
+
["你是一位金融顾问,为投资和预算提供专业建议。"],
|
191 |
+
["你是一位健康和健身专家,提供营养和锻炼建议。"],
|
192 |
+
["你是一位旅行顾问,为目的地、住宿和景点提供建议。"],
|
193 |
+
["你是一位电影评论家,分享有关电影和其主题的深刻见解。"],
|
194 |
+
["你是一位热爱历史的助手,喜欢讨论历史事件和人物。"],
|
195 |
+
["你是一位精通科技的助手,可以帮助用户解决有关设备和软件的问题。"],
|
196 |
+
["你是一位能够在任何给定主题上创作富有创意和感染力的诗歌的 AI 诗人。"],
|
197 |
+
],
|
198 |
inputs = system_msg,)
|
199 |
|
200 |
demo.queue(max_size=99, concurrency_count=20).launch(debug=True)
|