3v324v23 commited on
Commit
01a377d
1 Parent(s): b7d4ade

还原API_URL的设置

Browse files
Files changed (2) hide show
  1. config.py +4 -0
  2. request_llm/bridge_all.py +26 -6
config.py CHANGED
@@ -56,3 +56,7 @@ CONCURRENT_COUNT = 100
56
  # 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
57
  # [("username", "password"), ("username2", "password2"), ...]
58
  AUTHENTICATION = []
 
 
 
 
 
56
  # 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
57
  # [("username", "password"), ("username2", "password2"), ...]
58
  AUTHENTICATION = []
59
+
60
+ # 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
61
+ # 格式 {"https://api.openai.com/v1/chat/completions": "重定向的URL"}
62
+ API_URL_REDIRECT = {}
request_llm/bridge_all.py CHANGED
@@ -9,8 +9,9 @@
9
  2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
10
  """
11
  import tiktoken
12
- from functools import wraps, lru_cache
13
  from concurrent.futures import ThreadPoolExecutor
 
14
 
15
  from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
16
  from .bridge_chatgpt import predict as chatgpt_ui
@@ -42,18 +43,37 @@ class LazyloadTiktoken(object):
42
  def decode(self, *args, **kwargs):
43
  encoder = self.get_encoder(self.model)
44
  return encoder.decode(*args, **kwargs)
45
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
47
  tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
48
  get_token_num_gpt35 = lambda txt: len(tokenizer_gpt35.encode(txt, disallowed_special=()))
49
  get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_special=()))
50
 
 
51
  model_info = {
52
  # openai
53
  "gpt-3.5-turbo": {
54
  "fn_with_ui": chatgpt_ui,
55
  "fn_without_ui": chatgpt_noui,
56
- "endpoint": "https://api.openai.com/v1/chat/completions",
57
  "max_token": 4096,
58
  "tokenizer": tokenizer_gpt35,
59
  "token_cnt": get_token_num_gpt35,
@@ -62,7 +82,7 @@ model_info = {
62
  "gpt-4": {
63
  "fn_with_ui": chatgpt_ui,
64
  "fn_without_ui": chatgpt_noui,
65
- "endpoint": "https://api.openai.com/v1/chat/completions",
66
  "max_token": 8192,
67
  "tokenizer": tokenizer_gpt4,
68
  "token_cnt": get_token_num_gpt4,
@@ -72,7 +92,7 @@ model_info = {
72
  "api2d-gpt-3.5-turbo": {
73
  "fn_with_ui": chatgpt_ui,
74
  "fn_without_ui": chatgpt_noui,
75
- "endpoint": "https://openai.api2d.net/v1/chat/completions",
76
  "max_token": 4096,
77
  "tokenizer": tokenizer_gpt35,
78
  "token_cnt": get_token_num_gpt35,
@@ -81,7 +101,7 @@ model_info = {
81
  "api2d-gpt-4": {
82
  "fn_with_ui": chatgpt_ui,
83
  "fn_without_ui": chatgpt_noui,
84
- "endpoint": "https://openai.api2d.net/v1/chat/completions",
85
  "max_token": 8192,
86
  "tokenizer": tokenizer_gpt4,
87
  "token_cnt": get_token_num_gpt4,
 
9
  2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
10
  """
11
  import tiktoken
12
+ from functools import lru_cache
13
  from concurrent.futures import ThreadPoolExecutor
14
+ from toolbox import get_conf
15
 
16
  from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
17
  from .bridge_chatgpt import predict as chatgpt_ui
 
43
  def decode(self, *args, **kwargs):
44
  encoder = self.get_encoder(self.model)
45
  return encoder.decode(*args, **kwargs)
46
+
47
+ # Endpoint 重定向
48
+ API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
49
+ openai_endpoint = "https://api.openai.com/v1/chat/completions"
50
+ api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
51
+ # 兼容旧版的配置
52
+ try:
53
+ API_URL, = get_conf("API_URL")
54
+ if API_URL != "https://api.openai.com/v1/chat/completions":
55
+ openai_endpoint = API_URL
56
+ print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
57
+ except:
58
+ pass
59
+ # 新版配置
60
+ if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
61
+ if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
62
+
63
+
64
+ # 获取tokenizer
65
  tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
66
  tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
67
  get_token_num_gpt35 = lambda txt: len(tokenizer_gpt35.encode(txt, disallowed_special=()))
68
  get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_special=()))
69
 
70
+
71
  model_info = {
72
  # openai
73
  "gpt-3.5-turbo": {
74
  "fn_with_ui": chatgpt_ui,
75
  "fn_without_ui": chatgpt_noui,
76
+ "endpoint": openai_endpoint,
77
  "max_token": 4096,
78
  "tokenizer": tokenizer_gpt35,
79
  "token_cnt": get_token_num_gpt35,
 
82
  "gpt-4": {
83
  "fn_with_ui": chatgpt_ui,
84
  "fn_without_ui": chatgpt_noui,
85
+ "endpoint": openai_endpoint,
86
  "max_token": 8192,
87
  "tokenizer": tokenizer_gpt4,
88
  "token_cnt": get_token_num_gpt4,
 
92
  "api2d-gpt-3.5-turbo": {
93
  "fn_with_ui": chatgpt_ui,
94
  "fn_without_ui": chatgpt_noui,
95
+ "endpoint": api2d_endpoint,
96
  "max_token": 4096,
97
  "tokenizer": tokenizer_gpt35,
98
  "token_cnt": get_token_num_gpt35,
 
101
  "api2d-gpt-4": {
102
  "fn_with_ui": chatgpt_ui,
103
  "fn_without_ui": chatgpt_noui,
104
+ "endpoint": api2d_endpoint,
105
  "max_token": 8192,
106
  "tokenizer": tokenizer_gpt4,
107
  "token_cnt": get_token_num_gpt4,