Yuan2Z commited on
Commit
eb93171
·
verified ·
1 Parent(s): 6cced7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -15
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
@@ -20,6 +21,9 @@ def respond(
20
  temperature,
21
  top_p,
22
  ):
 
 
 
23
  messages = [{"role": "system", "content": system_message}]
24
 
25
  for val in history:
@@ -30,27 +34,34 @@ def respond(
30
 
31
  messages.append({"role": "user", "content": message})
32
 
 
33
  response = ""
34
 
35
- for message in client.text_generation(
36
- prompt="\n".join([m["content"] for m in messages]),
37
- max_new_tokens=max_tokens,
38
- stream=True,
39
- temperature=temperature,
40
- top_p=top_p,
41
- ):
42
- #token = message.token.text
43
- token = message
44
-
45
- response += token
46
- yield response
 
 
 
47
 
48
  def update_textbox(prompt):
49
- return gr.Textbox.update(value=prompt)
50
 
51
  with gr.Blocks() as demo:
52
  chatbot = gr.Chatbot()
53
- msg = gr.Textbox()
 
 
 
54
  clear = gr.ClearButton([msg, chatbot])
55
 
56
  with gr.Accordion("Advanced options", open=False):
@@ -59,9 +70,9 @@ with gr.Blocks() as demo:
59
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
60
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
61
 
62
- prompt_dropdown = gr.Dropdown(choices=prompts, label="Select a premade prompt")
63
  prompt_dropdown.change(update_textbox, inputs=[prompt_dropdown], outputs=[msg])
64
 
 
65
  msg.submit(respond, [msg, chatbot, system, max_tokens, temperature, top_p], chatbot)
66
  clear.click(lambda: None, None, chatbot, queue=False)
67
 
 
1
+
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
 
21
  temperature,
22
  top_p,
23
  ):
24
+ if not message:
25
+ return []
26
+
27
  messages = [{"role": "system", "content": system_message}]
28
 
29
  for val in history:
 
34
 
35
  messages.append({"role": "user", "content": message})
36
 
37
+ prompt = "\n".join([m["content"] for m in messages])
38
  response = ""
39
 
40
+ try:
41
+ for chunk in client.text_generation(
42
+ prompt,
43
+ max_new_tokens=max_tokens,
44
+ stream=True,
45
+ temperature=temperature,
46
+ top_p=top_p,
47
+ ):
48
+ if isinstance(chunk, str):
49
+ response += chunk
50
+ else:
51
+ response += chunk.token.text if hasattr(chunk, 'token') else chunk.generated_text
52
+ yield [(message, response)]
53
+ except Exception as e:
54
+ yield [(message, f"An error occurred: {str(e)}")]
55
 
56
  def update_textbox(prompt):
57
+ return gr.update(value=prompt)
58
 
59
  with gr.Blocks() as demo:
60
  chatbot = gr.Chatbot()
61
+ msg = gr.Textbox(label="Type your message or select a prompt")
62
+ with gr.Row():
63
+ prompt_dropdown = gr.Dropdown(choices=[""] + prompts, label="Select a premade prompt", value="")
64
+ submit = gr.Button("Submit")
65
  clear = gr.ClearButton([msg, chatbot])
66
 
67
  with gr.Accordion("Advanced options", open=False):
 
70
  temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
71
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
72
 
 
73
  prompt_dropdown.change(update_textbox, inputs=[prompt_dropdown], outputs=[msg])
74
 
75
+ submit.click(respond, [msg, chatbot, system, max_tokens, temperature, top_p], chatbot)
76
  msg.submit(respond, [msg, chatbot, system, max_tokens, temperature, top_p], chatbot)
77
  clear.click(lambda: None, None, chatbot, queue=False)
78