Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,8 +14,11 @@ def format_prompt(message, history):
|
|
14 |
return prompt
|
15 |
|
16 |
def generate(
|
17 |
-
prompt, history,
|
18 |
):
|
|
|
|
|
|
|
19 |
temperature = float(temperature)
|
20 |
if temperature < 1e-2:
|
21 |
temperature = 1e-2
|
@@ -48,7 +51,11 @@ demo = gr.ChatInterface(fn=generate,
|
|
48 |
chatbot=mychatbot,
|
49 |
title="fast ai aaaaa!!",
|
50 |
retry_btn="Regenerate",
|
51 |
-
undo_btn="Undo"
|
|
|
|
|
|
|
|
|
52 |
)
|
53 |
|
54 |
demo.queue().launch(show_api=True)
|
|
|
14 |
return prompt
|
15 |
|
16 |
def generate(
|
17 |
+
prompt, history, system_prompt="", max_new_tokens=1024, temperature=1.2, top_p=0.95, repetition_penalty=1.0,
|
18 |
):
|
19 |
+
print(history)
|
20 |
+
print(system_prompt)
|
21 |
+
print(max_new_tokens)
|
22 |
temperature = float(temperature)
|
23 |
if temperature < 1e-2:
|
24 |
temperature = 1e-2
|
|
|
51 |
chatbot=mychatbot,
|
52 |
title="fast ai aaaaa!!",
|
53 |
retry_btn="Regenerate",
|
54 |
+
undo_btn="Undo",
|
55 |
+
additional_inputs=[
|
56 |
+
gr.Textbox(label="System prompt", lines=3)
|
57 |
+
gr.Slider(label="Max new tokens", maximum=2048, value=512)
|
58 |
+
]
|
59 |
)
|
60 |
|
61 |
demo.queue().launch(show_api=True)
|