lihuayong commited on
Commit
d5e6cde
·
1 Parent(s): d85193e

更新模型

Browse files
Files changed (1) hide show
  1. modules/presets.py +15 -40
modules/presets.py CHANGED
@@ -45,45 +45,20 @@ INDEX_QUERY_TEMPRATURE = 1.0
45
  CHUANHU_TITLE = i18n("川虎Chat 🚀")
46
 
47
  CHUANHU_DESCRIPTION = i18n(
48
- "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本")
 
49
 
50
  ONLINE_MODELS = [
51
- "gpt-3.5-turbo-1106",
52
- "gpt-4-1106-preview",
53
  "gpt-3.5-turbo",
54
- "gpt-3.5-turbo-16k",
55
- "gpt-3.5-turbo-0301",
56
- "gpt-3.5-turbo-0613",
57
- "gpt-4",
58
- "gpt-4-0314",
59
- "gpt-4-0613",
60
- "gpt-4-32k",
61
- "gpt-4-32k-0314",
62
- "gpt-4-32k-0613",
63
- "川虎助理",
64
- "川虎助理 Pro",
65
- "xmchat",
66
- "yuanai-1.0-base_10B",
67
- "yuanai-1.0-translate",
68
- "yuanai-1.0-dialog",
69
- "yuanai-1.0-rhythm_poems",
70
- "minimax-abab4-chat",
71
- "minimax-abab5-chat",
72
  ]
73
 
74
- LOCAL_MODELS = [
75
- "chatglm-6b",
76
- "chatglm-6b-int4",
77
- "chatglm-6b-int4-qe",
78
- "StableLM",
79
- "MOSS",
80
- "llama-7b-hf",
81
- "llama-13b-hf",
82
- "llama-30b-hf",
83
- "llama-65b-hf",
84
- ]
85
 
86
- if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
87
  MODELS = ONLINE_MODELS
88
  else:
89
  MODELS = ONLINE_MODELS + LOCAL_MODELS
@@ -110,7 +85,12 @@ MODEL_TOKEN_LIMIT = {
110
  "gpt-4-0613": 8192,
111
  "gpt-4-32k": 32768,
112
  "gpt-4-32k-0314": 32768,
113
- "gpt-4-32k-0613": 32768
 
 
 
 
 
114
  }
115
 
116
  TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
@@ -119,13 +99,8 @@ REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数
119
 
120
  REPLY_LANGUAGES = [
121
  "简体中文",
122
- "繁體中文",
123
  "English",
124
- "日本語",
125
- "Español",
126
- "Français",
127
- "Deutsch",
128
- "跟随问题语言(不稳定)"
129
  ]
130
 
131
  WEBSEARCH_PTOMPT_TEMPLATE = """\
 
45
  CHUANHU_TITLE = i18n("川虎Chat 🚀")
46
 
47
  CHUANHU_DESCRIPTION = i18n(
48
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536)、[明昭MZhao](https://space.bilibili.com/24807452) 和 [Keldos](https://github.com/Keldos-Li) 开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本"
49
+ )
50
 
51
  ONLINE_MODELS = [
52
+ "gpt-4-turbo",
 
53
  "gpt-3.5-turbo",
54
+ "gpt-4-turbo-2024-04-09",
55
+ "gpt-4-1106-preview",
56
+ "gpt-4-0125-preview",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  ]
58
 
59
+ LOCAL_MODELS = ["chatglm-6b"]
 
 
 
 
 
 
 
 
 
 
60
 
61
+ if os.environ.get("HIDE_LOCAL_MODELS", "false") == "true":
62
  MODELS = ONLINE_MODELS
63
  else:
64
  MODELS = ONLINE_MODELS + LOCAL_MODELS
 
85
  "gpt-4-0613": 8192,
86
  "gpt-4-32k": 32768,
87
  "gpt-4-32k-0314": 32768,
88
+ "gpt-4-32k-0613": 32768,
89
+ "gpt-4-turbo": 128000,
90
+ "gpt-3.5-turbo": 16385,
91
+ "gpt-4-turbo-2024-04-09": 128000,
92
+ "gpt-4-1106-preview": 128000,
93
+ "gpt-4-0125-preview": 128000,
94
  }
95
 
96
  TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
 
99
 
100
  REPLY_LANGUAGES = [
101
  "简体中文",
 
102
  "English",
103
+ "跟随问题语言(不稳定)",
 
 
 
 
104
  ]
105
 
106
  WEBSEARCH_PTOMPT_TEMPLATE = """\