Spaces:
Runtime error
Runtime error
更多模型切换
Browse files- config.py +2 -4
- main.py +3 -3
- request_llm/bridge_all.py +41 -51
- request_llm/bridge_chatgpt.py +13 -11
- toolbox.py +88 -53
config.py
CHANGED
@@ -46,14 +46,12 @@ WEB_PORT = -1
|
|
46 |
MAX_RETRY = 2
|
47 |
|
48 |
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
|
49 |
-
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
|
|
|
50 |
|
51 |
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
52 |
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
53 |
|
54 |
-
# OpenAI的API_URL
|
55 |
-
API_URL = "https://api.openai.com/v1/chat/completions"
|
56 |
-
|
57 |
# 设置gradio的并行线程数(不需要修改)
|
58 |
CONCURRENT_COUNT = 100
|
59 |
|
|
|
46 |
MAX_RETRY = 2
|
47 |
|
48 |
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
|
49 |
+
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
|
50 |
+
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "chatglm", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
|
51 |
|
52 |
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
53 |
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
54 |
|
|
|
|
|
|
|
55 |
# 设置gradio的并行线程数(不需要修改)
|
56 |
CONCURRENT_COUNT = 100
|
57 |
|
main.py
CHANGED
@@ -5,8 +5,8 @@ def main():
|
|
5 |
from request_llm.bridge_all import predict
|
6 |
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
7 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
8 |
-
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
9 |
-
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
10 |
|
11 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
12 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
@@ -101,7 +101,7 @@ def main():
|
|
101 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
102 |
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
103 |
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
104 |
-
md_dropdown = gr.Dropdown(
|
105 |
|
106 |
gr.Markdown(description)
|
107 |
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
|
|
5 |
from request_llm.bridge_all import predict
|
6 |
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
7 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
8 |
+
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
|
9 |
+
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS')
|
10 |
|
11 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
12 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
|
|
101 |
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
102 |
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
|
103 |
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
104 |
+
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="").style(container=False)
|
105 |
|
106 |
gr.Markdown(description)
|
107 |
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
request_llm/bridge_all.py
CHANGED
@@ -21,38 +21,42 @@ from .bridge_chatglm import predict as chatglm_ui
|
|
21 |
from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
|
22 |
from .bridge_tgui import predict as tgui_ui
|
23 |
|
24 |
-
|
25 |
-
"openai-no-ui": chatgpt_noui,
|
26 |
-
"openai-ui": chatgpt_ui,
|
27 |
-
|
28 |
-
"chatglm-no-ui": chatglm_noui,
|
29 |
-
"chatglm-ui": chatglm_ui,
|
30 |
-
|
31 |
-
"tgui-no-ui": tgui_noui,
|
32 |
-
"tgui-ui": tgui_ui,
|
33 |
-
}
|
34 |
|
35 |
model_info = {
|
36 |
# openai
|
37 |
"gpt-3.5-turbo": {
|
|
|
|
|
|
|
38 |
"max_token": 4096,
|
39 |
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
|
40 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
|
41 |
},
|
42 |
|
43 |
"gpt-4": {
|
|
|
|
|
|
|
44 |
"max_token": 4096,
|
45 |
"tokenizer": tiktoken.encoding_for_model("gpt-4"),
|
46 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
|
47 |
},
|
|
|
48 |
# api_2d
|
49 |
-
"gpt-3.5-turbo
|
|
|
|
|
|
|
50 |
"max_token": 4096,
|
51 |
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
|
52 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
|
53 |
},
|
54 |
|
55 |
-
"gpt-4
|
|
|
|
|
|
|
56 |
"max_token": 4096,
|
57 |
"tokenizer": tiktoken.encoding_for_model("gpt-4"),
|
58 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
|
@@ -60,18 +64,20 @@ model_info = {
|
|
60 |
|
61 |
# chatglm
|
62 |
"chatglm": {
|
|
|
|
|
|
|
63 |
"max_token": 1024,
|
64 |
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
|
65 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
|
66 |
},
|
67 |
|
68 |
-
|
69 |
}
|
70 |
|
71 |
|
72 |
def LLM_CATCH_EXCEPTION(f):
|
73 |
"""
|
74 |
-
|
75 |
"""
|
76 |
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
77 |
try:
|
@@ -85,21 +91,20 @@ def LLM_CATCH_EXCEPTION(f):
|
|
85 |
return tb_str
|
86 |
return decorated
|
87 |
|
88 |
-
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
89 |
|
90 |
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
91 |
"""
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
"""
|
104 |
import threading, time, copy
|
105 |
|
@@ -109,12 +114,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|
109 |
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
|
110 |
|
111 |
# 如果只询问1个大语言模型:
|
112 |
-
|
113 |
-
method = methods['openai-no-ui']
|
114 |
-
elif model == 'chatglm':
|
115 |
-
method = methods['chatglm-no-ui']
|
116 |
-
elif model.startswith('tgui'):
|
117 |
-
method = methods['tgui-no-ui']
|
118 |
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
119 |
else:
|
120 |
# 如果同时询问多个大语言模型:
|
@@ -129,12 +129,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|
129 |
futures = []
|
130 |
for i in range(n_model):
|
131 |
model = models[i]
|
132 |
-
|
133 |
-
method = methods['openai-no-ui']
|
134 |
-
elif model == 'chatglm':
|
135 |
-
method = methods['chatglm-no-ui']
|
136 |
-
elif model.startswith('tgui'):
|
137 |
-
method = methods['tgui-no-ui']
|
138 |
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
139 |
llm_kwargs_feedin['llm_model'] = model
|
140 |
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
@@ -176,20 +171,15 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|
176 |
|
177 |
def predict(inputs, llm_kwargs, *args, **kwargs):
|
178 |
"""
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
"""
|
187 |
-
if llm_kwargs['llm_model'].startswith('gpt'):
|
188 |
-
method = methods['openai-ui']
|
189 |
-
elif llm_kwargs['llm_model'] == 'chatglm':
|
190 |
-
method = methods['chatglm-ui']
|
191 |
-
elif llm_kwargs['llm_model'].startswith('tgui'):
|
192 |
-
method = methods['tgui-ui']
|
193 |
|
|
|
194 |
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
195 |
|
|
|
21 |
from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
|
22 |
from .bridge_tgui import predict as tgui_ui
|
23 |
|
24 |
+
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
model_info = {
|
27 |
# openai
|
28 |
"gpt-3.5-turbo": {
|
29 |
+
"fn_with_ui": chatgpt_ui,
|
30 |
+
"fn_without_ui": chatgpt_noui,
|
31 |
+
"endpoint": "https://api.openai.com/v1/chat/completions",
|
32 |
"max_token": 4096,
|
33 |
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
|
34 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
|
35 |
},
|
36 |
|
37 |
"gpt-4": {
|
38 |
+
"fn_with_ui": chatgpt_ui,
|
39 |
+
"fn_without_ui": chatgpt_noui,
|
40 |
+
"endpoint": "https://api.openai.com/v1/chat/completions",
|
41 |
"max_token": 4096,
|
42 |
"tokenizer": tiktoken.encoding_for_model("gpt-4"),
|
43 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
|
44 |
},
|
45 |
+
|
46 |
# api_2d
|
47 |
+
"api2d-gpt-3.5-turbo": {
|
48 |
+
"fn_with_ui": chatgpt_ui,
|
49 |
+
"fn_without_ui": chatgpt_noui,
|
50 |
+
"endpoint": "https://openai.api2d.net/v1/chat/completions",
|
51 |
"max_token": 4096,
|
52 |
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
|
53 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
|
54 |
},
|
55 |
|
56 |
+
"api2d-gpt-4": {
|
57 |
+
"fn_with_ui": chatgpt_ui,
|
58 |
+
"fn_without_ui": chatgpt_noui,
|
59 |
+
"endpoint": "https://openai.api2d.net/v1/chat/completions",
|
60 |
"max_token": 4096,
|
61 |
"tokenizer": tiktoken.encoding_for_model("gpt-4"),
|
62 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
|
|
|
64 |
|
65 |
# chatglm
|
66 |
"chatglm": {
|
67 |
+
"fn_with_ui": chatglm_ui,
|
68 |
+
"fn_without_ui": chatglm_noui,
|
69 |
+
"endpoint": None,
|
70 |
"max_token": 1024,
|
71 |
"tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
|
72 |
"token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
|
73 |
},
|
74 |
|
|
|
75 |
}
|
76 |
|
77 |
|
78 |
def LLM_CATCH_EXCEPTION(f):
|
79 |
"""
|
80 |
+
装饰器函数,将错误显示出来
|
81 |
"""
|
82 |
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
83 |
try:
|
|
|
91 |
return tb_str
|
92 |
return decorated
|
93 |
|
|
|
94 |
|
95 |
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
96 |
"""
|
97 |
+
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
98 |
+
inputs:
|
99 |
+
是本次问询的输入
|
100 |
+
sys_prompt:
|
101 |
+
系统静默prompt
|
102 |
+
llm_kwargs:
|
103 |
+
LLM的内部调优参数
|
104 |
+
history:
|
105 |
+
是之前的对话列表
|
106 |
+
observe_window = None:
|
107 |
+
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
108 |
"""
|
109 |
import threading, time, copy
|
110 |
|
|
|
114 |
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
|
115 |
|
116 |
# 如果只询问1个大语言模型:
|
117 |
+
method = model_info[model]["fn_without_ui"]
|
|
|
|
|
|
|
|
|
|
|
118 |
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
119 |
else:
|
120 |
# 如果同时询问多个大语言模型:
|
|
|
129 |
futures = []
|
130 |
for i in range(n_model):
|
131 |
model = models[i]
|
132 |
+
method = model_info[model]["fn_without_ui"]
|
|
|
|
|
|
|
|
|
|
|
133 |
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
134 |
llm_kwargs_feedin['llm_model'] = model
|
135 |
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
|
|
171 |
|
172 |
def predict(inputs, llm_kwargs, *args, **kwargs):
|
173 |
"""
|
174 |
+
发送至LLM,流式获取输出。
|
175 |
+
用于基础的对话功能。
|
176 |
+
inputs 是本次问询的输入
|
177 |
+
top_p, temperature是LLM的内部调优参数
|
178 |
+
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
179 |
+
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
180 |
+
additional_fn代表点击的哪个按钮,按钮见functional.py
|
181 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
|
183 |
+
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"]
|
184 |
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
185 |
|
request_llm/bridge_chatgpt.py
CHANGED
@@ -21,9 +21,9 @@ import importlib
|
|
21 |
|
22 |
# config_private.py放自己的秘密如API和代理网址
|
23 |
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
24 |
-
from toolbox import get_conf, update_ui
|
25 |
-
proxies,
|
26 |
-
get_conf('proxies', '
|
27 |
|
28 |
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
29 |
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
@@ -60,7 +60,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|
60 |
while True:
|
61 |
try:
|
62 |
# make a POST request to the API endpoint, stream=False
|
63 |
-
response = requests.post(
|
64 |
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
65 |
except requests.exceptions.ReadTimeout as e:
|
66 |
retry += 1
|
@@ -113,14 +113,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
113 |
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
114 |
additional_fn代表点击的哪个按钮,按钮见functional.py
|
115 |
"""
|
116 |
-
if
|
117 |
chatbot._cookies['api_key'] = inputs
|
118 |
chatbot.append(("输入已识别为openai的api_key", "api_key已导入"))
|
119 |
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
120 |
return
|
121 |
-
elif
|
122 |
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
123 |
-
yield from update_ui(chatbot=chatbot, history=history, msg="api_key
|
124 |
return
|
125 |
|
126 |
if additional_fn is not None:
|
@@ -143,7 +143,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
143 |
while True:
|
144 |
try:
|
145 |
# make a POST request to the API endpoint, stream=True
|
146 |
-
response = requests.post(
|
147 |
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
148 |
except:
|
149 |
retry += 1
|
@@ -202,12 +202,14 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|
202 |
"""
|
203 |
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
204 |
"""
|
205 |
-
if
|
206 |
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
207 |
|
|
|
|
|
208 |
headers = {
|
209 |
"Content-Type": "application/json",
|
210 |
-
"Authorization": f"Bearer {
|
211 |
}
|
212 |
|
213 |
conversation_cnt = len(history) // 2
|
@@ -235,7 +237,7 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|
235 |
messages.append(what_i_ask_now)
|
236 |
|
237 |
payload = {
|
238 |
-
"model": llm_kwargs['llm_model'],
|
239 |
"messages": messages,
|
240 |
"temperature": llm_kwargs['temperature'], # 1.0,
|
241 |
"top_p": llm_kwargs['top_p'], # 1.0,
|
|
|
21 |
|
22 |
# config_private.py放自己的秘密如API和代理网址
|
23 |
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
24 |
+
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key
|
25 |
+
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
|
26 |
+
get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
27 |
|
28 |
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
29 |
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
|
|
60 |
while True:
|
61 |
try:
|
62 |
# make a POST request to the API endpoint, stream=False
|
63 |
+
response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies,
|
64 |
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
65 |
except requests.exceptions.ReadTimeout as e:
|
66 |
retry += 1
|
|
|
113 |
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
114 |
additional_fn代表点击的哪个按钮,按钮见functional.py
|
115 |
"""
|
116 |
+
if is_any_api_key(inputs):
|
117 |
chatbot._cookies['api_key'] = inputs
|
118 |
chatbot.append(("输入已识别为openai的api_key", "api_key已导入"))
|
119 |
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
120 |
return
|
121 |
+
elif not is_any_api_key(chatbot._cookies['api_key']):
|
122 |
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
123 |
+
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
124 |
return
|
125 |
|
126 |
if additional_fn is not None:
|
|
|
143 |
while True:
|
144 |
try:
|
145 |
# make a POST request to the API endpoint, stream=True
|
146 |
+
response = requests.post(llm_kwargs['endpoint'], headers=headers, proxies=proxies,
|
147 |
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
148 |
except:
|
149 |
retry += 1
|
|
|
202 |
"""
|
203 |
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
204 |
"""
|
205 |
+
if not is_any_api_key(llm_kwargs['api_key']):
|
206 |
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
207 |
|
208 |
+
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
209 |
+
|
210 |
headers = {
|
211 |
"Content-Type": "application/json",
|
212 |
+
"Authorization": f"Bearer {api_key}"
|
213 |
}
|
214 |
|
215 |
conversation_cnt = len(history) // 2
|
|
|
237 |
messages.append(what_i_ask_now)
|
238 |
|
239 |
payload = {
|
240 |
+
"model": llm_kwargs['llm_model'].strip('api2d-'),
|
241 |
"messages": messages,
|
242 |
"temperature": llm_kwargs['temperature'], # 1.0,
|
243 |
"top_p": llm_kwargs['top_p'], # 1.0,
|
toolbox.py
CHANGED
@@ -1,13 +1,10 @@
|
|
1 |
import markdown
|
2 |
-
import mdtex2html
|
3 |
-
import threading
|
4 |
import importlib
|
5 |
import traceback
|
6 |
import inspect
|
7 |
import re
|
8 |
from latex2mathml.converter import convert as tex2mathml
|
9 |
from functools import wraps, lru_cache
|
10 |
-
|
11 |
############################### 插件输入输出接驳区 #######################################
|
12 |
class ChatBotWithCookies(list):
|
13 |
def __init__(self, cookie):
|
@@ -25,9 +22,10 @@ class ChatBotWithCookies(list):
|
|
25 |
|
26 |
def ArgsGeneralWrapper(f):
|
27 |
"""
|
28 |
-
|
29 |
"""
|
30 |
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, *args):
|
|
|
31 |
txt_passon = txt
|
32 |
if txt == "" and txt2 != "": txt_passon = txt2
|
33 |
# 引入一个有cookie的chatbot
|
@@ -38,6 +36,7 @@ def ArgsGeneralWrapper(f):
|
|
38 |
llm_kwargs = {
|
39 |
'api_key': cookies['api_key'],
|
40 |
'llm_model': llm_model,
|
|
|
41 |
'top_p':top_p,
|
42 |
'max_length': max_length,
|
43 |
'temperature':temperature,
|
@@ -56,8 +55,47 @@ def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
|
|
56 |
"""
|
57 |
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。"
|
58 |
yield chatbot.get_cookies(), chatbot, history, msg
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
def get_reduce_token_percent(text):
|
63 |
"""
|
@@ -80,7 +118,7 @@ def get_reduce_token_percent(text):
|
|
80 |
|
81 |
def write_results_to_file(history, file_name=None):
|
82 |
"""
|
83 |
-
|
84 |
"""
|
85 |
import os
|
86 |
import time
|
@@ -108,7 +146,7 @@ def write_results_to_file(history, file_name=None):
|
|
108 |
|
109 |
def regular_txt_to_markdown(text):
|
110 |
"""
|
111 |
-
|
112 |
"""
|
113 |
text = text.replace('\n', '\n\n')
|
114 |
text = text.replace('\n\n\n', '\n\n')
|
@@ -116,48 +154,11 @@ def regular_txt_to_markdown(text):
|
|
116 |
return text
|
117 |
|
118 |
|
119 |
-
def CatchException(f):
|
120 |
-
"""
|
121 |
-
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
122 |
-
"""
|
123 |
-
@wraps(f)
|
124 |
-
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
125 |
-
try:
|
126 |
-
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
127 |
-
except Exception as e:
|
128 |
-
from check_proxy import check_proxy
|
129 |
-
from toolbox import get_conf
|
130 |
-
proxies, = get_conf('proxies')
|
131 |
-
tb_str = '```\n' + traceback.format_exc() + '```'
|
132 |
-
if chatbot is None or len(chatbot) == 0:
|
133 |
-
chatbot = [["插件调度异常", "异常原因"]]
|
134 |
-
chatbot[-1] = (chatbot[-1][0],
|
135 |
-
f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
|
136 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
|
137 |
-
return decorated
|
138 |
-
|
139 |
-
|
140 |
-
def HotReload(f):
|
141 |
-
"""
|
142 |
-
HotReload的装饰器函数,用于实现Python函数插件的热更新。
|
143 |
-
函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。
|
144 |
-
在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。
|
145 |
-
内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块,
|
146 |
-
然后通过getattr函数获取函数名,并在新模块中重新加载函数。
|
147 |
-
最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。
|
148 |
-
最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。
|
149 |
-
"""
|
150 |
-
@wraps(f)
|
151 |
-
def decorated(*args, **kwargs):
|
152 |
-
fn_name = f.__name__
|
153 |
-
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
|
154 |
-
yield from f_hot_reload(*args, **kwargs)
|
155 |
-
return decorated
|
156 |
|
157 |
|
158 |
def report_execption(chatbot, history, a, b):
|
159 |
"""
|
160 |
-
|
161 |
"""
|
162 |
chatbot.append((a, b))
|
163 |
history.append(a)
|
@@ -166,7 +167,7 @@ def report_execption(chatbot, history, a, b):
|
|
166 |
|
167 |
def text_divide_paragraph(text):
|
168 |
"""
|
169 |
-
|
170 |
"""
|
171 |
if '```' in text:
|
172 |
# careful input
|
@@ -182,7 +183,7 @@ def text_divide_paragraph(text):
|
|
182 |
|
183 |
def markdown_convertion(txt):
|
184 |
"""
|
185 |
-
|
186 |
"""
|
187 |
pre = '<div class="markdown-body">'
|
188 |
suf = '</div>'
|
@@ -274,7 +275,7 @@ def close_up_code_segment_during_stream(gpt_reply):
|
|
274 |
|
275 |
def format_io(self, y):
|
276 |
"""
|
277 |
-
|
278 |
"""
|
279 |
if y is None or y == []:
|
280 |
return []
|
@@ -290,7 +291,7 @@ def format_io(self, y):
|
|
290 |
|
291 |
def find_free_port():
|
292 |
"""
|
293 |
-
|
294 |
"""
|
295 |
import socket
|
296 |
from contextlib import closing
|
@@ -410,9 +411,43 @@ def on_report_generated(files, chatbot):
|
|
410 |
return report_files, chatbot
|
411 |
|
412 |
def is_openai_api_key(key):
|
413 |
-
# 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
|
414 |
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
|
415 |
-
return API_MATCH
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
416 |
|
417 |
@lru_cache(maxsize=128)
|
418 |
def read_single_conf_with_lru_cache(arg):
|
@@ -423,7 +458,7 @@ def read_single_conf_with_lru_cache(arg):
|
|
423 |
r = getattr(importlib.import_module('config'), arg)
|
424 |
# 在读���API_KEY时,检查一下是不是忘了改config
|
425 |
if arg == 'API_KEY':
|
426 |
-
if
|
427 |
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
428 |
else:
|
429 |
print亮红( "[API_KEY] 正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
|
|
1 |
import markdown
|
|
|
|
|
2 |
import importlib
|
3 |
import traceback
|
4 |
import inspect
|
5 |
import re
|
6 |
from latex2mathml.converter import convert as tex2mathml
|
7 |
from functools import wraps, lru_cache
|
|
|
8 |
############################### 插件输入输出接驳区 #######################################
|
9 |
class ChatBotWithCookies(list):
|
10 |
def __init__(self, cookie):
|
|
|
22 |
|
23 |
def ArgsGeneralWrapper(f):
|
24 |
"""
|
25 |
+
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
26 |
"""
|
27 |
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, *args):
|
28 |
+
from request_llm.bridge_all import model_info
|
29 |
txt_passon = txt
|
30 |
if txt == "" and txt2 != "": txt_passon = txt2
|
31 |
# 引入一个有cookie的chatbot
|
|
|
36 |
llm_kwargs = {
|
37 |
'api_key': cookies['api_key'],
|
38 |
'llm_model': llm_model,
|
39 |
+
'endpoint': model_info[llm_model]['endpoint'],
|
40 |
'top_p':top_p,
|
41 |
'max_length': max_length,
|
42 |
'temperature':temperature,
|
|
|
55 |
"""
|
56 |
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。"
|
57 |
yield chatbot.get_cookies(), chatbot, history, msg
|
58 |
+
|
59 |
+
def CatchException(f):
|
60 |
+
"""
|
61 |
+
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
62 |
+
"""
|
63 |
+
@wraps(f)
|
64 |
+
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
65 |
+
try:
|
66 |
+
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
67 |
+
except Exception as e:
|
68 |
+
from check_proxy import check_proxy
|
69 |
+
from toolbox import get_conf
|
70 |
+
proxies, = get_conf('proxies')
|
71 |
+
tb_str = '```\n' + traceback.format_exc() + '```'
|
72 |
+
if chatbot is None or len(chatbot) == 0:
|
73 |
+
chatbot = [["插件调度异常", "异常原因"]]
|
74 |
+
chatbot[-1] = (chatbot[-1][0],
|
75 |
+
f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
|
76 |
+
yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
|
77 |
+
return decorated
|
78 |
+
|
79 |
+
|
80 |
+
def HotReload(f):
|
81 |
+
"""
|
82 |
+
HotReload的装饰器函数,用于实现Python函数插件的热更新。
|
83 |
+
函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。
|
84 |
+
在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。
|
85 |
+
内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块,
|
86 |
+
然后通过getattr函数获取函数名,并在新模块中重新加载函数。
|
87 |
+
最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。
|
88 |
+
最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。
|
89 |
+
"""
|
90 |
+
@wraps(f)
|
91 |
+
def decorated(*args, **kwargs):
|
92 |
+
fn_name = f.__name__
|
93 |
+
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
|
94 |
+
yield from f_hot_reload(*args, **kwargs)
|
95 |
+
return decorated
|
96 |
+
|
97 |
+
|
98 |
+
####################################### 其他小工具 #####################################
|
99 |
|
100 |
def get_reduce_token_percent(text):
|
101 |
"""
|
|
|
118 |
|
119 |
def write_results_to_file(history, file_name=None):
|
120 |
"""
|
121 |
+
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
122 |
"""
|
123 |
import os
|
124 |
import time
|
|
|
146 |
|
147 |
def regular_txt_to_markdown(text):
|
148 |
"""
|
149 |
+
将普通文本转换为Markdown格式的文本。
|
150 |
"""
|
151 |
text = text.replace('\n', '\n\n')
|
152 |
text = text.replace('\n\n\n', '\n\n')
|
|
|
154 |
return text
|
155 |
|
156 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
|
159 |
def report_execption(chatbot, history, a, b):
|
160 |
"""
|
161 |
+
向chatbot中添加错误信息
|
162 |
"""
|
163 |
chatbot.append((a, b))
|
164 |
history.append(a)
|
|
|
167 |
|
168 |
def text_divide_paragraph(text):
|
169 |
"""
|
170 |
+
将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
171 |
"""
|
172 |
if '```' in text:
|
173 |
# careful input
|
|
|
183 |
|
184 |
def markdown_convertion(txt):
|
185 |
"""
|
186 |
+
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
187 |
"""
|
188 |
pre = '<div class="markdown-body">'
|
189 |
suf = '</div>'
|
|
|
275 |
|
276 |
def format_io(self, y):
|
277 |
"""
|
278 |
+
将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
|
279 |
"""
|
280 |
if y is None or y == []:
|
281 |
return []
|
|
|
291 |
|
292 |
def find_free_port():
|
293 |
"""
|
294 |
+
返回当前系统中可用的未使用端口。
|
295 |
"""
|
296 |
import socket
|
297 |
from contextlib import closing
|
|
|
411 |
return report_files, chatbot
|
412 |
|
413 |
def is_openai_api_key(key):
|
|
|
414 |
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
|
415 |
+
return bool(API_MATCH)
|
416 |
+
|
417 |
+
def is_api2d_key(key):
|
418 |
+
if key.startswith('fk') and len(key) == 41:
|
419 |
+
return True
|
420 |
+
else:
|
421 |
+
return False
|
422 |
+
|
423 |
+
def is_any_api_key(key):
|
424 |
+
if ',' in key:
|
425 |
+
keys = key.split(',')
|
426 |
+
for k in keys:
|
427 |
+
if is_any_api_key(k): return True
|
428 |
+
return False
|
429 |
+
else:
|
430 |
+
return is_openai_api_key(key) or is_api2d_key(key)
|
431 |
+
|
432 |
+
|
433 |
+
def select_api_key(keys, llm_model):
|
434 |
+
import random
|
435 |
+
avail_key_list = []
|
436 |
+
key_list = keys.split(',')
|
437 |
+
|
438 |
+
if llm_model.startswith('gpt-'):
|
439 |
+
for k in key_list:
|
440 |
+
if is_openai_api_key(k): avail_key_list.append(k)
|
441 |
+
|
442 |
+
if llm_model.startswith('api2d-'):
|
443 |
+
for k in key_list:
|
444 |
+
if is_api2d_key(k): avail_key_list.append(k)
|
445 |
+
|
446 |
+
if len(avail_key_list) == 0:
|
447 |
+
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。")
|
448 |
+
|
449 |
+
api_key = random.choice(avail_key_list) # 随机负载均衡
|
450 |
+
return api_key
|
451 |
|
452 |
@lru_cache(maxsize=128)
|
453 |
def read_single_conf_with_lru_cache(arg):
|
|
|
458 |
r = getattr(importlib.import_module('config'), arg)
|
459 |
# 在读���API_KEY时,检查一下是不是忘了改config
|
460 |
if arg == 'API_KEY':
|
461 |
+
if is_any_api_key(r):
|
462 |
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
463 |
else:
|
464 |
print亮红( "[API_KEY] 正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|