Spaces:
Build error
Build error
Grgrgr examples...
Browse files
app.py
CHANGED
@@ -2,13 +2,10 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
2 |
import gradio as gr
|
3 |
|
4 |
prologue = """quality: high
|
5 |
-
|
6 |
[System]
|
7 |
Assistant is a distilled language model trained by the community.<|STK_SP|>
|
8 |
-
|
9 |
[System]
|
10 |
<|STK_SP|>
|
11 |
-
|
12 |
[User]"""
|
13 |
|
14 |
tokenizer = AutoTokenizer.from_pretrained("mrsteyk/openchatgpt-neo-125m", use_fast=True)
|
@@ -16,7 +13,7 @@ model = AutoModelForCausalLM.from_pretrained("mrsteyk/openchatgpt-neo-125m")
|
|
16 |
|
17 |
def chat(inpt, max_new_tokens, top_k, top_p, temperature, repetition_penalty):
|
18 |
inputs = tokenizer(f"{prologue}\n{inpt}<|STK_SP|>\n\n[Assistant]\n", return_tensors="pt").input_ids
|
19 |
-
outputs = model.generate(inputs, max_new_tokens=max_new_tokens, do_sample=True, top_k=top_k, top_p=top_p, eos_token_id=tokenizer.sep_token_id, temperature=temperature)
|
20 |
return tokenizer.batch_decode([i[len(inputs[0]):] for i in outputs], skip_special_tokens=True)[0]
|
21 |
|
22 |
gr.Interface(
|
@@ -24,21 +21,21 @@ gr.Interface(
|
|
24 |
inputs=["textbox", gr.Slider(767, 2048 + 1), gr.Slider(0, 100, value=50), gr.Slider(0, 1, step=0.01, value=0.95), gr.Slider(0.01, 1, step=0.01, value=1), gr.Slider(1, 100, step=0.5)],
|
25 |
outputs=[gr.Textbox(label="Assistant says")],
|
26 |
examples=[
|
27 |
-
["Hi!"],
|
28 |
-
["Hello, I am trying to use the fft function in Python, but I am not sure how to interpret the results. Can you explain how to interpret the output of the fft function?"],
|
29 |
-
["Hello, I have a question about American history. Who is the current Vice President of the United States?"],
|
30 |
-
["Hello, I have a question about quantum computing. Can quantum computers solve NP-complete problems in polynomial time?"],
|
31 |
-
["I'm wondering how to make an apple pie?"],
|
32 |
-
["Hey, what are some pros and cons of using a neural network for image recognition?"],
|
33 |
-
["Hi! I want to build a website using python and Flask. I just want to know what are the requirements for building a website using Flask?"],
|
34 |
-
["Hi, I want to know about the GPT-3 model. Could you provide me some information about it?"],
|
35 |
-
["Please, help me with GPT-2 training"],
|
36 |
-
["Please, help me understand LLMs!"],
|
37 |
-
["What are the health benefits of ginger?"],
|
38 |
-
["What is the fundamental theorem of algebra?"],
|
39 |
-
["What is the meaning of life?"],
|
40 |
-
["What is the origin of the word 'sushi'?"],
|
41 |
-
["What's the difference between a chatbot and an AI?"],
|
42 |
-
["What's the difference between a monad and a functor in functional programming?"],
|
43 |
]
|
44 |
).launch()
|
|
|
2 |
import gradio as gr
|
3 |
|
4 |
prologue = """quality: high
|
|
|
5 |
[System]
|
6 |
Assistant is a distilled language model trained by the community.<|STK_SP|>
|
|
|
7 |
[System]
|
8 |
<|STK_SP|>
|
|
|
9 |
[User]"""
|
10 |
|
11 |
tokenizer = AutoTokenizer.from_pretrained("mrsteyk/openchatgpt-neo-125m", use_fast=True)
|
|
|
13 |
|
14 |
def chat(inpt, max_new_tokens, top_k, top_p, temperature, repetition_penalty):
|
15 |
inputs = tokenizer(f"{prologue}\n{inpt}<|STK_SP|>\n\n[Assistant]\n", return_tensors="pt").input_ids
|
16 |
+
outputs = model.generate(inputs, max_new_tokens=max_new_tokens, do_sample=True, top_k=top_k, top_p=top_p, eos_token_id=tokenizer.sep_token_id, temperature=temperature, repetition_penalty=repetition_penalty)
|
17 |
return tokenizer.batch_decode([i[len(inputs[0]):] for i in outputs], skip_special_tokens=True)[0]
|
18 |
|
19 |
gr.Interface(
|
|
|
21 |
inputs=["textbox", gr.Slider(767, 2048 + 1), gr.Slider(0, 100, value=50), gr.Slider(0, 1, step=0.01, value=0.95), gr.Slider(0.01, 1, step=0.01, value=1), gr.Slider(1, 100, step=0.5)],
|
22 |
outputs=[gr.Textbox(label="Assistant says")],
|
23 |
examples=[
|
24 |
+
["Hi!", 767, 50, 0.95, 1, 1],
|
25 |
+
["Hello, I am trying to use the fft function in Python, but I am not sure how to interpret the results. Can you explain how to interpret the output of the fft function?", 767, 50, 0.95, 1, 1],
|
26 |
+
["Hello, I have a question about American history. Who is the current Vice President of the United States?", 767, 50, 0.95, 1, 1],
|
27 |
+
["Hello, I have a question about quantum computing. Can quantum computers solve NP-complete problems in polynomial time?", 767, 50, 0.95, 1, 1],
|
28 |
+
["I'm wondering how to make an apple pie?", 767, 50, 0.95, 1, 1],
|
29 |
+
["Hey, what are some pros and cons of using a neural network for image recognition?", 767, 50, 0.95, 1, 1],
|
30 |
+
["Hi! I want to build a website using python and Flask. I just want to know what are the requirements for building a website using Flask?", 767, 50, 0.95, 1, 1],
|
31 |
+
["Hi, I want to know about the GPT-3 model. Could you provide me some information about it?", 767, 50, 0.95, 1, 1],
|
32 |
+
["Please, help me with GPT-2 training", 767, 50, 0.95, 1, 1],
|
33 |
+
["Please, help me understand LLMs!", 767, 50, 0.95, 1, 1],
|
34 |
+
["What are the health benefits of ginger?", 767, 50, 0.95, 1, 1],
|
35 |
+
["What is the fundamental theorem of algebra?", 767, 50, 0.95, 1, 1],
|
36 |
+
["What is the meaning of life?", 767, 50, 0.95, 1, 1],
|
37 |
+
["What is the origin of the word 'sushi'?", 767, 50, 0.95, 1, 1],
|
38 |
+
["What's the difference between a chatbot and an AI?", 767, 50, 0.95, 1, 1],
|
39 |
+
["What's the difference between a monad and a functor in functional programming?", 767, 50, 0.95, 1, 1],
|
40 |
]
|
41 |
).launch()
|