Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -33,4 +33,129 @@ for r in stream:
|
|
33 |
break
|
34 |
# yield the generated token
|
35 |
print(r.token.text, end = "")
|
36 |
-
# yield r.token.text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
break
|
34 |
# yield the generated token
|
35 |
print(r.token.text, end = "")
|
36 |
+
# yield r.token.text
|
37 |
+
|
38 |
+
|
39 |
+
#######################################################################
|
40 |
+
#Darstellung mit Gradio
|
41 |
+
|
42 |
+
with open("custom.css", "r", encoding="utf-8") as f:
|
43 |
+
customCSS = f.read()
|
44 |
+
|
45 |
+
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
46 |
+
history = gr.State([])
|
47 |
+
user_question = gr.State("")
|
48 |
+
gr.Markdown("KIs am LI - wähle aus, was du bzgl. KI-Bots ausprobieren möchtest!")
|
49 |
+
with gr.Tabs():
|
50 |
+
with gr.TabItem("LI-Chat"):
|
51 |
+
with gr.Row():
|
52 |
+
gr.HTML(title)
|
53 |
+
status_display = gr.Markdown("Erfolg", elem_id="status_display")
|
54 |
+
gr.Markdown(description_top)
|
55 |
+
with gr.Row(scale=1).style(equal_height=True):
|
56 |
+
with gr.Column(scale=5):
|
57 |
+
with gr.Row(scale=1):
|
58 |
+
chatbotGr = gr.Chatbot(elem_id="LI_chatbot").style(height="100%")
|
59 |
+
with gr.Row(scale=1):
|
60 |
+
with gr.Column(scale=12):
|
61 |
+
user_input = gr.Textbox(
|
62 |
+
show_label=False, placeholder="Gib deinen Text / Frage ein."
|
63 |
+
).style(container=False)
|
64 |
+
with gr.Column(min_width=100, scale=1):
|
65 |
+
submitBtn = gr.Button("Absenden")
|
66 |
+
with gr.Column(min_width=100, scale=1):
|
67 |
+
cancelBtn = gr.Button("Stoppen")
|
68 |
+
with gr.Row(scale=1):
|
69 |
+
emptyBtn = gr.Button(
|
70 |
+
"🧹 Neuer Chat",
|
71 |
+
)
|
72 |
+
with gr.Column():
|
73 |
+
with gr.Column(min_width=50, scale=1):
|
74 |
+
with gr.Tab(label="Nur zum Testen:"):
|
75 |
+
gr.Markdown("# Parameter")
|
76 |
+
top_p = gr.Slider(
|
77 |
+
minimum=-0,
|
78 |
+
maximum=1.0,
|
79 |
+
value=0.95,
|
80 |
+
step=0.05,
|
81 |
+
interactive=True,
|
82 |
+
label="Top-p",
|
83 |
+
)
|
84 |
+
temperature = gr.Slider(
|
85 |
+
minimum=0.1,
|
86 |
+
maximum=2.0,
|
87 |
+
value=1,
|
88 |
+
step=0.1,
|
89 |
+
interactive=True,
|
90 |
+
label="Temperature",
|
91 |
+
)
|
92 |
+
max_length_tokens = gr.Slider(
|
93 |
+
minimum=0,
|
94 |
+
maximum=512,
|
95 |
+
value=512,
|
96 |
+
step=8,
|
97 |
+
interactive=True,
|
98 |
+
label="Max Generation Tokens",
|
99 |
+
)
|
100 |
+
max_context_length_tokens = gr.Slider(
|
101 |
+
minimum=0,
|
102 |
+
maximum=4096,
|
103 |
+
value=2048,
|
104 |
+
step=128,
|
105 |
+
interactive=True,
|
106 |
+
label="Max History Tokens",
|
107 |
+
)
|
108 |
+
gr.Markdown(description)
|
109 |
+
|
110 |
+
with gr.TabItem("Übersetzungen"):
|
111 |
+
with gr.Row():
|
112 |
+
gr.Textbox(
|
113 |
+
show_label=False, placeholder="Ist noch in Arbeit..."
|
114 |
+
).style(container=False)
|
115 |
+
with gr.TabItem("Code-Generierungen"):
|
116 |
+
with gr.Row():
|
117 |
+
gr.Textbox(
|
118 |
+
show_label=False, placeholder="Ist noch in Arbeit..."
|
119 |
+
).style(container=False)
|
120 |
+
|
121 |
+
predict_args = dict(
|
122 |
+
fn=predict,
|
123 |
+
inputs=[
|
124 |
+
user_question,
|
125 |
+
chatbotGr,
|
126 |
+
history,
|
127 |
+
top_p,
|
128 |
+
temperature,
|
129 |
+
max_length_tokens,
|
130 |
+
max_context_length_tokens,
|
131 |
+
],
|
132 |
+
outputs=[chatbotGr, history, status_display],
|
133 |
+
show_progress=True,
|
134 |
+
)
|
135 |
+
|
136 |
+
#neuer Chat
|
137 |
+
reset_args = dict(
|
138 |
+
#fn=reset_chat, inputs=[], outputs=[user_input, status_display]
|
139 |
+
fn=reset_textbox, inputs=[], outputs=[user_input, status_display]
|
140 |
+
)
|
141 |
+
|
142 |
+
# Chatbot
|
143 |
+
transfer_input_args = dict(
|
144 |
+
fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn], show_progress=True
|
145 |
+
)
|
146 |
+
|
147 |
+
#Listener auf Start-Click auf Button oder Return
|
148 |
+
predict_event1 = user_input.submit(**transfer_input_args).then(**predict_args)
|
149 |
+
predict_event2 = submitBtn.click(**transfer_input_args).then(**predict_args)
|
150 |
+
|
151 |
+
#Listener, Wenn reset...
|
152 |
+
emptyBtn.click(
|
153 |
+
reset_state,
|
154 |
+
outputs=[chatbotGr, history, status_display],
|
155 |
+
show_progress=True,
|
156 |
+
)
|
157 |
+
emptyBtn.click(**reset_args)
|
158 |
+
|
159 |
+
demo.title = "LI Chat"
|
160 |
+
#demo.queue(concurrency_count=1).launch(share=True)
|
161 |
+
demo.queue(concurrency_count=1).launch(debug=True)
|