Spaces:
Runtime error
Runtime error
openaccess ai open chat
Browse filesdropdown default
fix dropdown class
rp needs model name
expects the dropdown, not a string
grdio wonkiness
- app.py +143 -16
- calculate_elo.py +2 -1
app.py
CHANGED
@@ -12,7 +12,7 @@ import itertools
|
|
12 |
|
13 |
from collections import defaultdict
|
14 |
from time import sleep
|
15 |
-
from typing import Generator, Tuple
|
16 |
|
17 |
import boto3
|
18 |
import gradio as gr
|
@@ -40,6 +40,12 @@ def prompt_chat(system_msg, history):
|
|
40 |
for item in history])
|
41 |
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
class Pipeline:
|
44 |
prefer_async = True
|
45 |
|
@@ -61,8 +67,11 @@ class Pipeline:
|
|
61 |
"stop": ["</s>", "USER:", "### Instruction:"] + stop_tokens,
|
62 |
}
|
63 |
|
64 |
-
def
|
65 |
-
|
|
|
|
|
|
|
66 |
input["prompt"] = prompt
|
67 |
|
68 |
if self.prefer_async:
|
@@ -79,7 +88,7 @@ class Pipeline:
|
|
79 |
task_id = data.get('id')
|
80 |
return self.stream_output(task_id)
|
81 |
|
82 |
-
def stream_output(self,task_id) -> Generator[str, None, None]:
|
83 |
url = f"https://api.runpod.ai/v2/{self.endpoint_id}/stream/{task_id}"
|
84 |
headers = {
|
85 |
"Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}"
|
@@ -127,15 +136,23 @@ AVAILABLE_MODELS = {
|
|
127 |
"guanaco-13b": ("yxl8w98z017mw2", prompt_instruct),
|
128 |
}
|
129 |
|
|
|
|
|
|
|
|
|
|
|
130 |
_memoized_models = defaultdict()
|
131 |
|
132 |
|
133 |
def get_model_pipeline(model_name):
|
134 |
if not _memoized_models.get(model_name):
|
135 |
kwargs = {}
|
136 |
-
if
|
137 |
-
|
138 |
-
|
|
|
|
|
|
|
139 |
return _memoized_models.get(model_name)
|
140 |
|
141 |
start_message = """- The Assistant is helpful and transparent.
|
@@ -167,15 +184,20 @@ def token_generator(generator1, generator2, mapping_fn=None, fillvalue=None):
|
|
167 |
yield token1, token2
|
168 |
|
169 |
|
170 |
-
def chat(history1, history2, system_msg):
|
171 |
history1 = history1 or []
|
172 |
history2 = history2 or []
|
173 |
|
174 |
-
arena_bots =
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
messages1 = model1.transform_prompt(system_msg, history1)
|
181 |
messages2 = model2.transform_prompt(system_msg, history2)
|
@@ -194,7 +216,8 @@ def chat(history1, history2, system_msg):
|
|
194 |
if t2 is not None:
|
195 |
history2[-1][1] += t2
|
196 |
# stream the response
|
197 |
-
|
|
|
198 |
sleep(0.2)
|
199 |
|
200 |
|
@@ -263,6 +286,56 @@ def dataset_to_markdown():
|
|
263 |
return markdown_string
|
264 |
|
265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
with gr.Blocks() as arena:
|
267 |
with gr.Row():
|
268 |
with gr.Column():
|
@@ -311,12 +384,14 @@ with gr.Blocks() as arena:
|
|
311 |
with gr.Row():
|
312 |
arena_submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
|
313 |
arena_clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
|
|
|
314 |
arena_state = gr.State({})
|
315 |
|
316 |
arena_clear.click(lambda: None, None, arena_chatbot1, queue=False)
|
317 |
arena_clear.click(lambda: None, None, arena_chatbot2, queue=False)
|
318 |
arena_clear.click(lambda: None, None, arena_message, queue=False)
|
319 |
arena_clear.click(lambda: None, None, arena_nudge_msg, queue=False)
|
|
|
320 |
|
321 |
submit_click_event = arena_submit.click(
|
322 |
lambda *args: (
|
@@ -328,7 +403,7 @@ with gr.Blocks() as arena:
|
|
328 |
).then(
|
329 |
fn=user, inputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], outputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], queue=True
|
330 |
).then(
|
331 |
-
fn=chat, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg], outputs=[arena_chatbot1, arena_chatbot2, arena_message, reveal1, reveal2, arena_state], queue=True
|
332 |
).then(
|
333 |
lambda *args: (
|
334 |
gr.update(visible=False, interactive=False),
|
@@ -412,8 +487,16 @@ with gr.Blocks() as arena:
|
|
412 |
gr.update(visible=False),
|
413 |
None,
|
414 |
None,
|
|
|
415 |
),
|
416 |
-
inputs=[], outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
417 |
)
|
418 |
with gr.Tab("Leaderboard"):
|
419 |
with gr.Column():
|
@@ -422,5 +505,49 @@ with gr.Blocks() as arena:
|
|
422 |
""")
|
423 |
leaderboad_refresh = gr.Button(value="Refresh Leaderboard", variant="secondary").style(full_width=True)
|
424 |
leaderboad_refresh.click(fn=refresh_md, inputs=[], outputs=[leaderboard_markdown])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
425 |
|
426 |
arena.queue(concurrency_count=5, max_size=16).launch(debug=True, server_name="0.0.0.0", server_port=7860)
|
|
|
12 |
|
13 |
from collections import defaultdict
|
14 |
from time import sleep
|
15 |
+
from typing import Generator, Tuple, List, Dict
|
16 |
|
17 |
import boto3
|
18 |
import gradio as gr
|
|
|
40 |
for item in history])
|
41 |
|
42 |
|
43 |
+
def prompt_roleplay(system_msg, history):
|
44 |
+
return "<|system|>" + system_msg.strip() + "\n" + \
|
45 |
+
"\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]])
|
46 |
+
for item in history])
|
47 |
+
|
48 |
+
|
49 |
class Pipeline:
|
50 |
prefer_async = True
|
51 |
|
|
|
67 |
"stop": ["</s>", "USER:", "### Instruction:"] + stop_tokens,
|
68 |
}
|
69 |
|
70 |
+
def get_generation_config(self):
|
71 |
+
return self.generation_config.copy()
|
72 |
+
|
73 |
+
def __call__(self, prompt, config=None) -> Generator[List[Dict[str, str]], None, None]:
|
74 |
+
input = config if config else self.generation_config.copy()
|
75 |
input["prompt"] = prompt
|
76 |
|
77 |
if self.prefer_async:
|
|
|
88 |
task_id = data.get('id')
|
89 |
return self.stream_output(task_id)
|
90 |
|
91 |
+
def stream_output(self,task_id) -> Generator[List[Dict[str, str]], None, None]:
|
92 |
url = f"https://api.runpod.ai/v2/{self.endpoint_id}/stream/{task_id}"
|
93 |
headers = {
|
94 |
"Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}"
|
|
|
136 |
"guanaco-13b": ("yxl8w98z017mw2", prompt_instruct),
|
137 |
}
|
138 |
|
139 |
+
OAAIC_MODELS = ["manticore-13b-chat"]
|
140 |
+
OAAIC_MODELS_ROLEPLAY = {
|
141 |
+
"manticore-13b-chat": ("u6tv84bpomhfei", prompt_roleplay),
|
142 |
+
}
|
143 |
+
|
144 |
_memoized_models = defaultdict()
|
145 |
|
146 |
|
147 |
def get_model_pipeline(model_name):
|
148 |
if not _memoized_models.get(model_name):
|
149 |
kwargs = {}
|
150 |
+
if model_name in AVAILABLE_MODELS:
|
151 |
+
if len(AVAILABLE_MODELS[model_name]) >= 3:
|
152 |
+
kwargs["stop_tokens"] = AVAILABLE_MODELS[model_name][2]
|
153 |
+
_memoized_models[model_name] = Pipeline(AVAILABLE_MODELS[model_name][0], model_name, AVAILABLE_MODELS[model_name][1], **kwargs)
|
154 |
+
elif model_name in OAAIC_MODELS_ROLEPLAY:
|
155 |
+
_memoized_models[model_name] = Pipeline(OAAIC_MODELS_ROLEPLAY[model_name][0], model_name, OAAIC_MODELS_ROLEPLAY[model_name][1], **kwargs)
|
156 |
return _memoized_models.get(model_name)
|
157 |
|
158 |
start_message = """- The Assistant is helpful and transparent.
|
|
|
184 |
yield token1, token2
|
185 |
|
186 |
|
187 |
+
def chat(history1, history2, system_msg, state):
|
188 |
history1 = history1 or []
|
189 |
history2 = history2 or []
|
190 |
|
191 |
+
arena_bots = None
|
192 |
+
if state and "models" in state and state['models']:
|
193 |
+
arena_bots = state['models']
|
194 |
+
if not arena_bots:
|
195 |
+
arena_bots = list(AVAILABLE_MODELS.keys())
|
196 |
+
random.shuffle(arena_bots)
|
197 |
+
|
198 |
+
battle = arena_bots[0:2]
|
199 |
+
model1 = get_model_pipeline(battle[0])
|
200 |
+
model2 = get_model_pipeline(battle[1])
|
201 |
|
202 |
messages1 = model1.transform_prompt(system_msg, history1)
|
203 |
messages2 = model2.transform_prompt(system_msg, history2)
|
|
|
216 |
if t2 is not None:
|
217 |
history2[-1][1] += t2
|
218 |
# stream the response
|
219 |
+
# [arena_chatbot1, arena_chatbot2, arena_message, reveal1, reveal2, arena_state]
|
220 |
+
yield history1, history2, "", gr.update(value=battle[0]), gr.update(value=battle[1]), {"models": [model1.name, model2.name]}
|
221 |
sleep(0.2)
|
222 |
|
223 |
|
|
|
286 |
return markdown_string
|
287 |
|
288 |
|
289 |
+
"""
|
290 |
+
OpenAccess AI Chatbots chat
|
291 |
+
"""
|
292 |
+
|
293 |
+
def open_clear_chat(chat_history_state, chat_message, nudge_msg):
|
294 |
+
chat_history_state = []
|
295 |
+
chat_message = ''
|
296 |
+
nudge_msg = ''
|
297 |
+
return chat_history_state, chat_message, nudge_msg
|
298 |
+
|
299 |
+
|
300 |
+
def open_user(message, nudge_msg, history):
|
301 |
+
history = history or []
|
302 |
+
# Append the user's message to the conversation history
|
303 |
+
history.append([message, nudge_msg])
|
304 |
+
return "", nudge_msg, history
|
305 |
+
|
306 |
+
|
307 |
+
def open_chat(model_name, history, system_msg, max_new_tokens, temperature, top_p, top_k, repetition_penalty, roleplay=False):
|
308 |
+
history = history or []
|
309 |
+
|
310 |
+
model = get_model_pipeline(model_name)
|
311 |
+
config = model.get_generation_config()
|
312 |
+
config["max_new_tokens"] = max_new_tokens
|
313 |
+
config["temperature"] = temperature
|
314 |
+
config["temperature"] = temperature
|
315 |
+
config["top_p"] = top_p
|
316 |
+
config["top_k"] = top_k
|
317 |
+
config["repetition_penalty"] = repetition_penalty
|
318 |
+
|
319 |
+
messages = model.transform_prompt(system_msg, history)
|
320 |
+
|
321 |
+
# remove last space from assistant, some models output a ZWSP if you leave a space
|
322 |
+
messages = messages.rstrip()
|
323 |
+
|
324 |
+
model_res = model(messages, config=config) # type: Generator[List[Dict[str, str]], None, None]
|
325 |
+
for res in model_res:
|
326 |
+
tokens = re.findall(r'\s*\S+\s*', res[0]['generated_text'])
|
327 |
+
for s in tokens:
|
328 |
+
answer = s
|
329 |
+
history[-1][1] += answer
|
330 |
+
# stream the response
|
331 |
+
yield history, history, ""
|
332 |
+
sleep(0.01)
|
333 |
+
|
334 |
+
|
335 |
+
def open_rp_chat(*args):
|
336 |
+
return open_chat(*args, roleplay=True)[0]
|
337 |
+
|
338 |
+
|
339 |
with gr.Blocks() as arena:
|
340 |
with gr.Row():
|
341 |
with gr.Column():
|
|
|
384 |
with gr.Row():
|
385 |
arena_submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
|
386 |
arena_clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
|
387 |
+
# arena_regenerate = gr.Button(value="Regenerate", variant="secondary").style(full_width=False)
|
388 |
arena_state = gr.State({})
|
389 |
|
390 |
arena_clear.click(lambda: None, None, arena_chatbot1, queue=False)
|
391 |
arena_clear.click(lambda: None, None, arena_chatbot2, queue=False)
|
392 |
arena_clear.click(lambda: None, None, arena_message, queue=False)
|
393 |
arena_clear.click(lambda: None, None, arena_nudge_msg, queue=False)
|
394 |
+
arena_clear.click(lambda: None, None, arena_state, queue=False)
|
395 |
|
396 |
submit_click_event = arena_submit.click(
|
397 |
lambda *args: (
|
|
|
403 |
).then(
|
404 |
fn=user, inputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], outputs=[arena_message, arena_nudge_msg, arena_chatbot1, arena_chatbot2], queue=True
|
405 |
).then(
|
406 |
+
fn=chat, inputs=[arena_chatbot1, arena_chatbot2, arena_system_msg, arena_state], outputs=[arena_chatbot1, arena_chatbot2, arena_message, reveal1, reveal2, arena_state], queue=True
|
407 |
).then(
|
408 |
lambda *args: (
|
409 |
gr.update(visible=False, interactive=False),
|
|
|
487 |
gr.update(visible=False),
|
488 |
None,
|
489 |
None,
|
490 |
+
None,
|
491 |
),
|
492 |
+
inputs=[], outputs=[
|
493 |
+
arena_message,
|
494 |
+
dismiss_reveal,
|
495 |
+
arena_clear, arena_submit,
|
496 |
+
reveal1, reveal2,
|
497 |
+
arena_chatbot1, arena_chatbot2,
|
498 |
+
arena_state,
|
499 |
+
], queue=True
|
500 |
)
|
501 |
with gr.Tab("Leaderboard"):
|
502 |
with gr.Column():
|
|
|
505 |
""")
|
506 |
leaderboad_refresh = gr.Button(value="Refresh Leaderboard", variant="secondary").style(full_width=True)
|
507 |
leaderboad_refresh.click(fn=refresh_md, inputs=[], outputs=[leaderboard_markdown])
|
508 |
+
with gr.Tab("OAAIC Chatbots"):
|
509 |
+
gr.Markdown("# GGML Spaces Chatbot Demo")
|
510 |
+
open_model_choice = gr.Dropdown(label="Model", choices=OAAIC_MODELS, value=OAAIC_MODELS[0])
|
511 |
+
open_chatbot = gr.Chatbot()
|
512 |
+
with gr.Row():
|
513 |
+
open_message = gr.Textbox(
|
514 |
+
label="What do you want to chat about?",
|
515 |
+
placeholder="Ask me anything.",
|
516 |
+
lines=3,
|
517 |
+
)
|
518 |
+
with gr.Row():
|
519 |
+
open_submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
|
520 |
+
open_roleplay = gr.Button(value="Roleplay", variant="secondary").style(full_width=True)
|
521 |
+
open_clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
|
522 |
+
open_stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
|
523 |
+
with gr.Row():
|
524 |
+
with gr.Column():
|
525 |
+
open_max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300)
|
526 |
+
open_temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8)
|
527 |
+
open_top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
|
528 |
+
open_top_k = gr.Slider(0, 100, label="Top K", step=1, value=40)
|
529 |
+
open_repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
|
530 |
+
|
531 |
+
open_system_msg = gr.Textbox(
|
532 |
+
start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5)
|
533 |
+
|
534 |
+
open_nudge_msg = gr.Textbox(
|
535 |
+
"", label="Assistant Nudge", interactive=True, visible=True, placeholder="the first words of the assistant response to nudge them in the right direction.", lines=1)
|
536 |
+
|
537 |
+
open_chat_history_state = gr.State()
|
538 |
+
open_clear.click(open_clear_chat, inputs=[open_chat_history_state, open_message, open_nudge_msg], outputs=[open_chat_history_state, open_message, open_nudge_msg], queue=False)
|
539 |
+
open_clear.click(lambda: None, None, open_chatbot, queue=False)
|
540 |
+
|
541 |
+
open_submit_click_event = open_submit.click(
|
542 |
+
fn=open_user, inputs=[open_message, open_nudge_msg, open_chat_history_state], outputs=[open_message, open_nudge_msg, open_chat_history_state], queue=True
|
543 |
+
).then(
|
544 |
+
fn=open_chat, inputs=[open_model_choice, open_chat_history_state, open_system_msg, open_max_tokens, open_temperature, open_top_p, open_top_k, open_repetition_penalty], outputs=[open_chatbot, open_chat_history_state, open_message], queue=True
|
545 |
+
)
|
546 |
+
open_roleplay_click_event = open_roleplay.click(
|
547 |
+
fn=open_user, inputs=[open_message, open_nudge_msg, open_chat_history_state], outputs=[open_message, open_nudge_msg, open_chat_history_state], queue=True
|
548 |
+
).then(
|
549 |
+
fn=open_rp_chat, inputs=[open_model_choice, open_chat_history_state, open_system_msg, open_max_tokens, open_temperature, open_top_p, open_top_k, open_repetition_penalty], outputs=[open_chatbot, open_chat_history_state, open_message], queue=True
|
550 |
+
)
|
551 |
+
open_stop.click(fn=None, inputs=None, outputs=None, cancels=[open_submit_click_event, open_roleplay_click_event], queue=False)
|
552 |
|
553 |
arena.queue(concurrency_count=5, max_size=16).launch(debug=True, server_name="0.0.0.0", server_port=7860)
|
calculate_elo.py
CHANGED
@@ -15,7 +15,7 @@ dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
|
15 |
|
16 |
|
17 |
def _create_arena_table():
|
18 |
-
|
19 |
TableName='oaaic_chatbot_arena',
|
20 |
KeySchema=[
|
21 |
{
|
@@ -267,6 +267,7 @@ def main():
|
|
267 |
elo_scores = {}
|
268 |
|
269 |
for battle in battles:
|
|
|
270 |
if battle['label'] in {-1, 0, 1, 2}:
|
271 |
outcome = battle['label']
|
272 |
for chatbot_name in [battle['choice1_name'], battle['choice2_name']]:
|
|
|
15 |
|
16 |
|
17 |
def _create_arena_table():
|
18 |
+
dynamodb.create_table(
|
19 |
TableName='oaaic_chatbot_arena',
|
20 |
KeySchema=[
|
21 |
{
|
|
|
267 |
elo_scores = {}
|
268 |
|
269 |
for battle in battles:
|
270 |
+
print(repr(battle))
|
271 |
if battle['label'] in {-1, 0, 1, 2}:
|
272 |
outcome = battle['label']
|
273 |
for chatbot_name in [battle['choice1_name'], battle['choice2_name']]:
|