Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -36,15 +36,15 @@ def generate(
|
|
36 |
max_new_tokens: int = 1024,
|
37 |
temperature: float = 0.6,
|
38 |
top_p: float = 0.9,
|
39 |
-
top_k: int = 50,
|
40 |
-
repetition_penalty: float = 1.2,
|
41 |
) -> Iterator[str]:
|
42 |
|
43 |
conversation = []
|
44 |
if system_prompt:
|
45 |
conversation.append({"role": "system", "content": system_prompt})
|
|
|
46 |
for user, assistant in chat_history:
|
47 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
|
|
48 |
conversation.append({"role": "user", "content": message})
|
49 |
|
50 |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
|
@@ -60,10 +60,7 @@ def generate(
|
|
60 |
max_new_tokens=max_new_tokens,
|
61 |
do_sample=True,
|
62 |
top_p=top_p,
|
63 |
-
top_k=top_k,
|
64 |
temperature=temperature,
|
65 |
-
num_beams=1,
|
66 |
-
repetition_penalty=repetition_penalty,
|
67 |
)
|
68 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
69 |
t.start()
|
@@ -114,9 +111,9 @@ def respond(
|
|
114 |
chat_interface = gr.ChatInterface(
|
115 |
fn=generate,
|
116 |
additional_inputs=[
|
117 |
-
gr.Textbox(value="Ets un chatbot amigable.", label="System message"),
|
118 |
-
gr.Slider(minimum=1, maximum=2048, value=
|
119 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.
|
120 |
gr.Slider(
|
121 |
minimum=0.1,
|
122 |
maximum=1.0,
|
@@ -125,12 +122,17 @@ chat_interface = gr.ChatInterface(
|
|
125 |
label="Top-p (nucleus sampling)",
|
126 |
),
|
127 |
],
|
|
|
|
|
|
|
|
|
|
|
128 |
)
|
129 |
|
130 |
-
with gr.Blocks() as demo:
|
131 |
gr.Markdown(DESCRIPTION)
|
132 |
chat_interface.render()
|
133 |
|
134 |
|
135 |
if __name__ == "__main__":
|
136 |
-
demo.launch()
|
|
|
36 |
max_new_tokens: int = 1024,
|
37 |
temperature: float = 0.6,
|
38 |
top_p: float = 0.9,
|
|
|
|
|
39 |
) -> Iterator[str]:
|
40 |
|
41 |
conversation = []
|
42 |
if system_prompt:
|
43 |
conversation.append({"role": "system", "content": system_prompt})
|
44 |
+
|
45 |
for user, assistant in chat_history:
|
46 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
47 |
+
|
48 |
conversation.append({"role": "user", "content": message})
|
49 |
|
50 |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
|
|
|
60 |
max_new_tokens=max_new_tokens,
|
61 |
do_sample=True,
|
62 |
top_p=top_p,
|
|
|
63 |
temperature=temperature,
|
|
|
|
|
64 |
)
|
65 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
66 |
t.start()
|
|
|
111 |
chat_interface = gr.ChatInterface(
|
112 |
fn=generate,
|
113 |
additional_inputs=[
|
114 |
+
gr.Textbox(value="Ets un chatbot amigable. Responeu preguntes i ajudeu els usuaris", label="System message"),
|
115 |
+
gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens"),
|
116 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.6, step=0.1, label="Temperature"),
|
117 |
gr.Slider(
|
118 |
minimum=0.1,
|
119 |
maximum=1.0,
|
|
|
122 |
label="Top-p (nucleus sampling)",
|
123 |
),
|
124 |
],
|
125 |
+
examples=[
|
126 |
+
["A quina velocitat poden volar els cocodrils?"],
|
127 |
+
["Explica pas a pas com resoldre l'equaci贸 seg眉ent: 2x + 10 = 0"],
|
128 |
+
["Pot Donald Trump sopar amb Juli C猫sar?"],
|
129 |
+
],
|
130 |
)
|
131 |
|
132 |
+
with gr.Blocks(css="style.css") as demo:
|
133 |
gr.Markdown(DESCRIPTION)
|
134 |
chat_interface.render()
|
135 |
|
136 |
|
137 |
if __name__ == "__main__":
|
138 |
+
demo.queue(max_size=20).launch()
|