Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -53,7 +53,6 @@ def generate(
|
|
53 |
chatbot,
|
54 |
history,
|
55 |
temperature,
|
56 |
-
top_k,
|
57 |
top_p,
|
58 |
max_new_tokens,
|
59 |
repetition_penalty,
|
@@ -86,7 +85,6 @@ def generate(
|
|
86 |
|
87 |
generate_kwargs = {
|
88 |
"temperature": temperature,
|
89 |
-
"top_k": top_k,
|
90 |
"top_p": top_p,
|
91 |
"max_new_tokens": max_new_tokens,
|
92 |
}
|
@@ -167,10 +165,6 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
167 |
gr.Markdown(
|
168 |
"""
|
169 |
π» This demo showcases the Guanaco 33B model, released together with the paper [QLoRA](https://arxiv.org/abs/2305.14314)
|
170 |
-
|
171 |
-
|
172 |
-
The model can produce factually incorrect output, and should not be relied on to produce factually accurate information.
|
173 |
-
The model was trained on various public datasets; while great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased, or otherwise offensive outputs.
|
174 |
"""
|
175 |
)
|
176 |
|
@@ -190,25 +184,16 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
190 |
with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"):
|
191 |
temperature = gr.Slider(
|
192 |
label="Temperature",
|
193 |
-
value=0.
|
194 |
minimum=0.0,
|
195 |
maximum=1.0,
|
196 |
step=0.1,
|
197 |
interactive=True,
|
198 |
info="Higher values produce more diverse outputs",
|
199 |
)
|
200 |
-
top_k = gr.Slider(
|
201 |
-
label="Top-k",
|
202 |
-
value=50,
|
203 |
-
minimum=0.0,
|
204 |
-
maximum=100,
|
205 |
-
step=1,
|
206 |
-
interactive=True,
|
207 |
-
info="Sample from a shortlist of top-k tokens",
|
208 |
-
)
|
209 |
top_p = gr.Slider(
|
210 |
label="Top-p (nucleus sampling)",
|
211 |
-
value=0.
|
212 |
minimum=0.0,
|
213 |
maximum=1,
|
214 |
step=0.05,
|
@@ -217,7 +202,7 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
217 |
)
|
218 |
max_new_tokens = gr.Slider(
|
219 |
label="Max new tokens",
|
220 |
-
value=
|
221 |
minimum=0,
|
222 |
maximum=1024,
|
223 |
step=4,
|
@@ -252,7 +237,6 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
252 |
chatbot,
|
253 |
history,
|
254 |
temperature,
|
255 |
-
top_k,
|
256 |
top_p,
|
257 |
max_new_tokens,
|
258 |
repetition_penalty,
|
@@ -267,7 +251,6 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
267 |
chatbot,
|
268 |
history,
|
269 |
temperature,
|
270 |
-
top_k,
|
271 |
top_p,
|
272 |
max_new_tokens,
|
273 |
repetition_penalty,
|
@@ -277,4 +260,4 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
|
|
277 |
|
278 |
clear_chat_button.click(clear_chat, outputs=[chatbot, history])
|
279 |
|
280 |
-
demo.queue(concurrency_count=16).launch(debug=True)
|
|
|
53 |
chatbot,
|
54 |
history,
|
55 |
temperature,
|
|
|
56 |
top_p,
|
57 |
max_new_tokens,
|
58 |
repetition_penalty,
|
|
|
85 |
|
86 |
generate_kwargs = {
|
87 |
"temperature": temperature,
|
|
|
88 |
"top_p": top_p,
|
89 |
"max_new_tokens": max_new_tokens,
|
90 |
}
|
|
|
165 |
gr.Markdown(
|
166 |
"""
|
167 |
π» This demo showcases the Guanaco 33B model, released together with the paper [QLoRA](https://arxiv.org/abs/2305.14314)
|
|
|
|
|
|
|
|
|
168 |
"""
|
169 |
)
|
170 |
|
|
|
184 |
with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"):
|
185 |
temperature = gr.Slider(
|
186 |
label="Temperature",
|
187 |
+
value=0.7,
|
188 |
minimum=0.0,
|
189 |
maximum=1.0,
|
190 |
step=0.1,
|
191 |
interactive=True,
|
192 |
info="Higher values produce more diverse outputs",
|
193 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
top_p = gr.Slider(
|
195 |
label="Top-p (nucleus sampling)",
|
196 |
+
value=0.9,
|
197 |
minimum=0.0,
|
198 |
maximum=1,
|
199 |
step=0.05,
|
|
|
202 |
)
|
203 |
max_new_tokens = gr.Slider(
|
204 |
label="Max new tokens",
|
205 |
+
value=1024,
|
206 |
minimum=0,
|
207 |
maximum=1024,
|
208 |
step=4,
|
|
|
237 |
chatbot,
|
238 |
history,
|
239 |
temperature,
|
|
|
240 |
top_p,
|
241 |
max_new_tokens,
|
242 |
repetition_penalty,
|
|
|
251 |
chatbot,
|
252 |
history,
|
253 |
temperature,
|
|
|
254 |
top_p,
|
255 |
max_new_tokens,
|
256 |
repetition_penalty,
|
|
|
260 |
|
261 |
clear_chat_button.click(clear_chat, outputs=[chatbot, history])
|
262 |
|
263 |
+
demo.queue(concurrency_count=16).launch(debug=True)
|