Spaces:
Runtime error
Runtime error
JohnSmith9982
commited on
Commit
•
064635f
1
Parent(s):
2414c23
Upload 38 files
Browse files- ChuanhuChatbot.py +1 -1
- modules/base_model.py +1 -1
- modules/models.py +1 -1
- modules/presets.py +5 -1
ChuanhuChatbot.py
CHANGED
@@ -247,7 +247,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
247 |
lines=1,
|
248 |
)
|
249 |
|
250 |
-
with gr.Accordion(i18n("网络设置"), open=False
|
251 |
# 优先展示自定义的api_host
|
252 |
apihostTxt = gr.Textbox(
|
253 |
show_label=True,
|
|
|
247 |
lines=1,
|
248 |
)
|
249 |
|
250 |
+
with gr.Accordion(i18n("网络设置"), open=False):
|
251 |
# 优先展示自定义的api_host
|
252 |
apihostTxt = gr.Textbox(
|
253 |
show_label=True,
|
modules/base_model.py
CHANGED
@@ -41,7 +41,7 @@ class ModelType(Enum):
|
|
41 |
model_type = ModelType.ChatGLM
|
42 |
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
|
43 |
model_type = ModelType.LLaMA
|
44 |
-
elif "
|
45 |
model_type = ModelType.XMBot
|
46 |
else:
|
47 |
model_type = ModelType.Unknown
|
|
|
41 |
model_type = ModelType.ChatGLM
|
42 |
elif "llama" in model_name_lower or "alpaca" in model_name_lower:
|
43 |
model_type = ModelType.LLaMA
|
44 |
+
elif "xmchat" in model_name_lower:
|
45 |
model_type = ModelType.XMBot
|
46 |
else:
|
47 |
model_type = ModelType.Unknown
|
modules/models.py
CHANGED
@@ -393,7 +393,7 @@ class LLaMA_Client(BaseLLMModel):
|
|
393 |
|
394 |
class XMBot_Client(BaseLLMModel):
|
395 |
def __init__(self, api_key):
|
396 |
-
super().__init__(model_name="
|
397 |
self.api_key = api_key
|
398 |
self.session_id = None
|
399 |
self.reset()
|
|
|
393 |
|
394 |
class XMBot_Client(BaseLLMModel):
|
395 |
def __init__(self, api_key):
|
396 |
+
super().__init__(model_name="xmchat")
|
397 |
self.api_key = api_key
|
398 |
self.session_id = None
|
399 |
self.reset()
|
modules/presets.py
CHANGED
@@ -67,7 +67,7 @@ ONLINE_MODELS = [
|
|
67 |
"gpt-4-0314",
|
68 |
"gpt-4-32k",
|
69 |
"gpt-4-32k-0314",
|
70 |
-
"
|
71 |
]
|
72 |
|
73 |
LOCAL_MODELS = [
|
@@ -75,8 +75,12 @@ LOCAL_MODELS = [
|
|
75 |
"chatglm-6b-int4",
|
76 |
"chatglm-6b-int4-qe",
|
77 |
"llama-7b-hf",
|
|
|
|
|
78 |
"llama-13b-hf",
|
|
|
79 |
"llama-30b-hf",
|
|
|
80 |
"llama-65b-hf"
|
81 |
]
|
82 |
|
|
|
67 |
"gpt-4-0314",
|
68 |
"gpt-4-32k",
|
69 |
"gpt-4-32k-0314",
|
70 |
+
"xmchat",
|
71 |
]
|
72 |
|
73 |
LOCAL_MODELS = [
|
|
|
75 |
"chatglm-6b-int4",
|
76 |
"chatglm-6b-int4-qe",
|
77 |
"llama-7b-hf",
|
78 |
+
"llama-7b-hf-int4",
|
79 |
+
"llama-7b-hf-int8",
|
80 |
"llama-13b-hf",
|
81 |
+
"llama-13b-hf-int4",
|
82 |
"llama-30b-hf",
|
83 |
+
"llama-30b-hf-int4",
|
84 |
"llama-65b-hf"
|
85 |
]
|
86 |
|