Spaces:
Runtime error
Runtime error
update theme for slider
Browse files
app.py
CHANGED
@@ -11,41 +11,28 @@ model_glm = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True
|
|
11 |
model_glm = model_glm.eval()
|
12 |
|
13 |
# Load pre-trained model and tokenizer for Chinese to English translator
|
14 |
-
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
|
15 |
-
model_chtoen = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
|
16 |
-
tokenizer_chtoen = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
|
17 |
|
18 |
-
def translate_Chinese_English(chinese_text):
|
19 |
-
# translate Chinese to English
|
20 |
-
tokenizer_chtoen.src_lang = "zh"
|
21 |
-
encoded_zh = tokenizer_chtoen(chinese_text, return_tensors="pt")
|
22 |
-
generated_tokens = model_chtoen.generate(**encoded_zh, forced_bos_token_id=tokenizer_chtoen.get_lang_id("en"))
|
23 |
-
trans_eng_text = tokenizer_chtoen.batch_decode(generated_tokens, skip_special_tokens=True)
|
24 |
-
return trans_eng_text[0]
|
25 |
|
26 |
|
27 |
# Define function to generate model predictions and update the history
|
28 |
-
def predict_glm_stream(input, top_p, temperature, history=[]):
|
29 |
-
print(f"1 outside for loop OG history is ^^- {history}")
|
30 |
history = list(map(tuple, history))
|
31 |
-
|
32 |
-
|
33 |
-
print(f"In for loop resonse is ^^- {response}")
|
34 |
-
print(f"In for loop updates is ^^- {updates}")
|
35 |
-
# translate Chinese to English
|
36 |
-
#history = [(query, translate_Chinese_English(response)) for query, response in history]
|
37 |
-
print(f"*******")
|
38 |
-
yield updates #history+updates
|
39 |
-
|
40 |
|
41 |
def reset_textbox():
|
42 |
return gr.update(value="")
|
43 |
|
44 |
-
def
|
45 |
-
#
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
49 |
|
50 |
|
51 |
title = """<h1 align="center"> 🚀CHatGLM-6B - A Streaming Chatbot with Gradio</h1>
|
@@ -60,7 +47,7 @@ However, due to the small size of ChatGLM-6B, it is currently known to have cons
|
|
60 |
theme = gr.themes.Default(#color contructors
|
61 |
primary_hue="violet",
|
62 |
secondary_hue="indigo",
|
63 |
-
neutral_hue="purple")
|
64 |
|
65 |
with gr.Blocks(css="""#col_container {margin-left: auto; margin-right: auto;}
|
66 |
#chatglm {height: 520px; overflow: auto;} """, theme=theme ) as demo:
|
@@ -80,7 +67,7 @@ with gr.Blocks(css="""#col_container {margin-left: auto; margin-right: auto;}
|
|
80 |
with gr.Box():
|
81 |
chatbot_glm = gr.Chatbot(elem_id="chatglm", label='THUDM-ChatGLM6B')
|
82 |
|
83 |
-
with gr.
|
84 |
gr.HTML("Parameters for ChatGLM-6B", visible=True)
|
85 |
top_p = gr.Slider(minimum=-0, maximum=1.0,value=1, step=0.05,interactive=True, label="Top-p", visible=True)
|
86 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1, step=0.1, interactive=True, label="Temperature", visible=True)
|
@@ -95,7 +82,6 @@ with gr.Blocks(css="""#col_container {margin-left: auto; margin-right: auto;}
|
|
95 |
[chatbot_glm],)
|
96 |
b1.click(reset_textbox, [], [inputs])
|
97 |
|
98 |
-
#b2.click(reset_chat, [chatbot_glm, state_glm], [chatbot_glm, state_glm])
|
99 |
b2.click(lambda: None, None, chatbot_glm, queue=False)
|
100 |
|
101 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGLM-6b_Gradio_Streaming?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
|
|
11 |
model_glm = model_glm.eval()
|
12 |
|
13 |
# Load pre-trained model and tokenizer for Chinese to English translator
|
14 |
+
#from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
|
15 |
+
#model_chtoen = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
|
16 |
+
#tokenizer_chtoen = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
|
20 |
# Define function to generate model predictions and update the history
|
21 |
+
def predict_glm_stream(input, top_p, temperature, history=[]):
|
|
|
22 |
history = list(map(tuple, history))
|
23 |
+
for response, updates in model_glm.stream_chat(tokenizer_glm, input, history, top_p=top_p, temperature=temperature):
|
24 |
+
yield updates
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
def reset_textbox():
|
27 |
return gr.update(value="")
|
28 |
|
29 |
+
def translate_Chinese_English(chinese_text):
|
30 |
+
# translate Chinese to English
|
31 |
+
tokenizer_chtoen.src_lang = "zh"
|
32 |
+
encoded_zh = tokenizer_chtoen(chinese_text, return_tensors="pt")
|
33 |
+
generated_tokens = model_chtoen.generate(**encoded_zh, forced_bos_token_id=tokenizer_chtoen.get_lang_id("en"))
|
34 |
+
trans_eng_text = tokenizer_chtoen.batch_decode(generated_tokens, skip_special_tokens=True)
|
35 |
+
return trans_eng_text[0]
|
36 |
|
37 |
|
38 |
title = """<h1 align="center"> 🚀CHatGLM-6B - A Streaming Chatbot with Gradio</h1>
|
|
|
47 |
theme = gr.themes.Default(#color contructors
|
48 |
primary_hue="violet",
|
49 |
secondary_hue="indigo",
|
50 |
+
neutral_hue="purple").set(slider_color="#800080")
|
51 |
|
52 |
with gr.Blocks(css="""#col_container {margin-left: auto; margin-right: auto;}
|
53 |
#chatglm {height: 520px; overflow: auto;} """, theme=theme ) as demo:
|
|
|
67 |
with gr.Box():
|
68 |
chatbot_glm = gr.Chatbot(elem_id="chatglm", label='THUDM-ChatGLM6B')
|
69 |
|
70 |
+
with gr.Accordion(label="Parameters for ChatGLM-6B", open=False):
|
71 |
gr.HTML("Parameters for ChatGLM-6B", visible=True)
|
72 |
top_p = gr.Slider(minimum=-0, maximum=1.0,value=1, step=0.05,interactive=True, label="Top-p", visible=True)
|
73 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1, step=0.1, interactive=True, label="Temperature", visible=True)
|
|
|
82 |
[chatbot_glm],)
|
83 |
b1.click(reset_textbox, [], [inputs])
|
84 |
|
|
|
85 |
b2.click(lambda: None, None, chatbot_glm, queue=False)
|
86 |
|
87 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGLM-6b_Gradio_Streaming?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|