Spaces:
Sleeping
Sleeping
Yarik
commited on
Commit
β’
0fc9653
1
Parent(s):
473fa15
Update space
Browse files- apis/chat_api.py +2 -2
- messagers/message_composer.py +18 -23
- networks/message_streamer.py +3 -5
apis/chat_api.py
CHANGED
@@ -55,8 +55,8 @@ class ChatAPIApp:
|
|
55 |
"owned_by": "NousResearch",
|
56 |
},
|
57 |
{
|
58 |
-
"id": "
|
59 |
-
"description": "[
|
60 |
"object": "model",
|
61 |
"created": 1700000000,
|
62 |
"owned_by": "TheBloke",
|
|
|
55 |
"owned_by": "NousResearch",
|
56 |
},
|
57 |
{
|
58 |
+
"id": "zephyr-7b-beta",
|
59 |
+
"description": "[HuggingFaceH4/zephyr-7b-beta]: https://huggingface.co/HuggingFaceH4/zephyr-7b-beta",
|
60 |
"object": "model",
|
61 |
"created": 1700000000,
|
62 |
"owned_by": "TheBloke",
|
messagers/message_composer.py
CHANGED
@@ -8,7 +8,7 @@ class MessageComposer:
|
|
8 |
AVALAIBLE_MODELS = [
|
9 |
"mixtral-8x7b",
|
10 |
"mistral-7b",
|
11 |
-
"
|
12 |
"nous-mixtral-8x7b",
|
13 |
]
|
14 |
|
@@ -95,27 +95,21 @@ class MessageComposer:
|
|
95 |
self.merged_str_list.append("<|im_start|>assistant")
|
96 |
self.merged_str = "\n".join(self.merged_str_list)
|
97 |
# https://huggingface.co/openchat/openchat-3.5-0106
|
98 |
-
elif self.model in ["
|
99 |
self.messages = self.concat_messages_by_role(messages)
|
100 |
-
self.
|
101 |
-
self.end_of_turn = "<|end_of_turn|>"
|
102 |
for message in self.messages:
|
103 |
role = message["role"]
|
104 |
content = message["content"]
|
105 |
if role in self.inst_roles:
|
106 |
-
self.
|
107 |
-
f"GPT4 Correct User:\n{content}{self.end_of_turn}"
|
108 |
-
)
|
109 |
elif role in self.answer_roles:
|
110 |
-
self.
|
111 |
-
|
112 |
-
)
|
113 |
else:
|
114 |
-
self.
|
115 |
-
|
116 |
-
|
117 |
-
self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
|
118 |
-
self.merged_str = "\n".join(self.merged_str_list)
|
119 |
else:
|
120 |
self.merged_str = "\n".join(
|
121 |
[
|
@@ -191,18 +185,19 @@ class MessageComposer:
|
|
191 |
role = match.group("role")
|
192 |
content = match.group("content")
|
193 |
self.messages.append({"role": role, "content": content.strip()})
|
194 |
-
elif self.model in ["
|
195 |
-
pair_pattern =
|
196 |
-
|
197 |
-
pair_pattern, self.merged_str, flags=re.MULTILINE | re.IGNORECASE
|
198 |
)
|
|
|
199 |
pair_matches_list = list(pair_matches)
|
|
|
200 |
self.messages = self.convert_pair_matches_to_messages(pair_matches_list)
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
)
|
205 |
inst_matches_list = list(inst_matches)
|
|
|
206 |
self.append_last_instruction_to_messages(
|
207 |
inst_matches_list, pair_matches_list
|
208 |
)
|
|
|
8 |
AVALAIBLE_MODELS = [
|
9 |
"mixtral-8x7b",
|
10 |
"mistral-7b",
|
11 |
+
"zephyr-7b-beta",
|
12 |
"nous-mixtral-8x7b",
|
13 |
]
|
14 |
|
|
|
95 |
self.merged_str_list.append("<|im_start|>assistant")
|
96 |
self.merged_str = "\n".join(self.merged_str_list)
|
97 |
# https://huggingface.co/openchat/openchat-3.5-0106
|
98 |
+
elif self.model in ["zephyr-7b-beta"]:
|
99 |
self.messages = self.concat_messages_by_role(messages)
|
100 |
+
self.cached_str = ""
|
|
|
101 |
for message in self.messages:
|
102 |
role = message["role"]
|
103 |
content = message["content"]
|
104 |
if role in self.inst_roles:
|
105 |
+
self.cached_str = f"[INST] {content} [/INST]"
|
|
|
|
|
106 |
elif role in self.answer_roles:
|
107 |
+
self.merged_str += f"<s> {self.cached_str} {content} </s>\n"
|
108 |
+
self.cached_str = ""
|
|
|
109 |
else:
|
110 |
+
self.cached_str = f"[INST] {content} [/INST]"
|
111 |
+
if self.cached_str:
|
112 |
+
self.merged_str += f"{self.cached_str}"
|
|
|
|
|
113 |
else:
|
114 |
self.merged_str = "\n".join(
|
115 |
[
|
|
|
185 |
role = match.group("role")
|
186 |
content = match.group("content")
|
187 |
self.messages.append({"role": role, "content": content.strip()})
|
188 |
+
elif self.model in ["zephyr-7b-beta"]:
|
189 |
+
pair_pattern = (
|
190 |
+
r"<s>\s*\[INST\](?P<inst>[\s\S]*?)\[/INST\](?P<answer>[\s\S]*?)</s>"
|
|
|
191 |
)
|
192 |
+
pair_matches = re.finditer(pair_pattern, self.merged_str, re.MULTILINE)
|
193 |
pair_matches_list = list(pair_matches)
|
194 |
+
|
195 |
self.messages = self.convert_pair_matches_to_messages(pair_matches_list)
|
196 |
+
|
197 |
+
inst_pattern = r"\[INST\](?P<inst>[\s\S]*?)\[/INST\]"
|
198 |
+
inst_matches = re.finditer(inst_pattern, self.merged_str, re.MULTILINE)
|
|
|
199 |
inst_matches_list = list(inst_matches)
|
200 |
+
|
201 |
self.append_last_instruction_to_messages(
|
202 |
inst_matches_list, pair_matches_list
|
203 |
)
|
networks/message_streamer.py
CHANGED
@@ -12,8 +12,7 @@ class MessageStreamer:
|
|
12 |
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # 72.62, fast [Recommended]
|
13 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2", # 65.71, fast
|
14 |
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
15 |
-
"
|
16 |
-
# "zephyr-7b-beta": "HuggingFaceH4/zephyr-7b-beta", # β Too Slow
|
17 |
# "llama-70b": "meta-llama/Llama-2-70b-chat-hf", # β Require Pro User
|
18 |
# "codellama-34b": "codellama/CodeLlama-34b-Instruct-hf", # β Low Score
|
19 |
# "falcon-180b": "tiiuae/falcon-180B-chat", # β Require Pro User
|
@@ -23,15 +22,14 @@ class MessageStreamer:
|
|
23 |
"mixtral-8x7b": "</s>",
|
24 |
"mistral-7b": "</s>",
|
25 |
"nous-mixtral-8x7b": "<|im_end|>",
|
26 |
-
"
|
27 |
|
28 |
}
|
29 |
TOKEN_LIMIT_MAP = {
|
30 |
"mixtral-8x7b": 32768,
|
31 |
"mistral-7b": 32768,
|
32 |
"nous-mixtral-8x7b": 32768,
|
33 |
-
"
|
34 |
-
|
35 |
}
|
36 |
TOKEN_RESERVED = 100
|
37 |
|
|
|
12 |
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # 72.62, fast [Recommended]
|
13 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2", # 65.71, fast
|
14 |
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
15 |
+
"zephyr-7b-beta": "HuggingFaceH4/zephyr-7b-beta", # β Too Slow
|
|
|
16 |
# "llama-70b": "meta-llama/Llama-2-70b-chat-hf", # β Require Pro User
|
17 |
# "codellama-34b": "codellama/CodeLlama-34b-Instruct-hf", # β Low Score
|
18 |
# "falcon-180b": "tiiuae/falcon-180B-chat", # β Require Pro User
|
|
|
22 |
"mixtral-8x7b": "</s>",
|
23 |
"mistral-7b": "</s>",
|
24 |
"nous-mixtral-8x7b": "<|im_end|>",
|
25 |
+
"zephyr-7b-beta": "<|end_of_turn|>",
|
26 |
|
27 |
}
|
28 |
TOKEN_LIMIT_MAP = {
|
29 |
"mixtral-8x7b": 32768,
|
30 |
"mistral-7b": 32768,
|
31 |
"nous-mixtral-8x7b": 32768,
|
32 |
+
"zephyr-7b-beta": 4096,
|
|
|
33 |
}
|
34 |
TOKEN_RESERVED = 100
|
35 |
|