Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio_client import Client
|
3 |
+
|
4 |
+
# Initializing Gradio Python Clients for the two Llama chatbots
|
5 |
+
client_llama3_1 = Client("ysharma/Chat_with_Meta_llama3_1_8b_dupe", hf_token=HF_TOKEN)
|
6 |
+
client_llama3 = Client("ysharma/Chat_with_Meta_llama3_8b_dupe", hf_token=HF_TOKEN)
|
7 |
+
|
8 |
+
|
9 |
+
css = """
|
10 |
+
h1 {
|
11 |
+
margin: 0;
|
12 |
+
flex-grow: 1;
|
13 |
+
font-size: 24px;
|
14 |
+
min-width: 200px;
|
15 |
+
}
|
16 |
+
"""
|
17 |
+
|
18 |
+
TITLE = """<h1 style="text-align: center;">Meta Llama3.1 8B V/s Meta Llama3 8B</h1>"""
|
19 |
+
|
20 |
+
PLACEHOLDER_LLAMA3 = """
|
21 |
+
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
22 |
+
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
|
23 |
+
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta Llama3</h1>
|
24 |
+
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
|
25 |
+
</div>
|
26 |
+
"""
|
27 |
+
|
28 |
+
PLACEHOLDER_LLAMA3_1 = """
|
29 |
+
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
30 |
+
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
|
31 |
+
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta Llama3.1</h1>
|
32 |
+
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
|
33 |
+
</div>
|
34 |
+
"""
|
35 |
+
|
36 |
+
|
37 |
+
# Inference functions for Chatbots
|
38 |
+
def user_llama3(user_message, history_llama3):
|
39 |
+
return "", history_llama3 + [[user_message, None]]
|
40 |
+
|
41 |
+
def user_llama3_1(user_message, history_llama3_1):
|
42 |
+
return "", history_llama3_1 + [[user_message, None]]
|
43 |
+
|
44 |
+
def chat_llama3(history_llama3, temp, max_tokens):
|
45 |
+
history_llama3[-1][1] = ""
|
46 |
+
for result in client_llama3.submit(history_llama3[-1][0], temp, max_tokens):
|
47 |
+
if "assistant" in result:
|
48 |
+
result_list_temp = result.split('assistant')
|
49 |
+
history_llama3[-1][1] = result_list_temp[-1] #result
|
50 |
+
yield history_llama3
|
51 |
+
|
52 |
+
def chat_llama3_1(history_llama3_1, temp, max_tokens):
|
53 |
+
history_llama3_1[-1][1] = ""
|
54 |
+
for result in client_llama3_1.submit(history_llama3_1[-1][0], temp, max_tokens):
|
55 |
+
history_llama3_1[-1][1] = result
|
56 |
+
yield history_llama3_1
|
57 |
+
|
58 |
+
|
59 |
+
# Gradio block
|
60 |
+
chatbot_llama3 = gr.Chatbot(height=450, label='Llama3 8b Chat',) #placeholder=PLACEHOLDER,
|
61 |
+
chatbot_llama3_1 = gr.Chatbot(height=450, label='Llama3.1 8b Chat',) #placeholder=PLACEHOLDER,
|
62 |
+
textbox = gr.Textbox(placeholder="Type your text and press Enter", scale=7, label="User Mesaages")
|
63 |
+
additional_inputs_accordion = gr.Accordion(label="⚙️ Parameters", open=False, render=False)
|
64 |
+
temperature = gr.Slider(minimum=0,
|
65 |
+
maximum=1,
|
66 |
+
step=0.1,
|
67 |
+
value=0.95,
|
68 |
+
label="Temperature",
|
69 |
+
render=False)
|
70 |
+
max_tokens = gr.Slider(minimum=128,
|
71 |
+
maximum=4096,
|
72 |
+
step=1,
|
73 |
+
value=512,
|
74 |
+
label="Max new tokens",
|
75 |
+
render=False )
|
76 |
+
|
77 |
+
examples=[
|
78 |
+
["There's a llama in my garden 😱 What should I do?"],
|
79 |
+
["What is the best way to open a can of worms?"],
|
80 |
+
["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1. "],
|
81 |
+
['How to setup a human base on Mars? Give short answer.'],
|
82 |
+
['Explain theory of relativity to me like I’m 8 years old.'],
|
83 |
+
['What is 9,000 * 9,000?'],
|
84 |
+
['Write a pun-filled happy birthday message to my friend Alex.'],
|
85 |
+
['Justify why a penguin might make a good king of the jungle.']
|
86 |
+
]
|
87 |
+
|
88 |
+
|
89 |
+
with gr.Blocks(fill_height=True,css=css ) as demo:
|
90 |
+
gr.HTML(TITLE)
|
91 |
+
with gr.Row():
|
92 |
+
chatbot_llama3_1.render()
|
93 |
+
chatbot_llama3.render()
|
94 |
+
with gr.Row():
|
95 |
+
textbox.render()
|
96 |
+
clear = gr.Button("Clear")
|
97 |
+
|
98 |
+
additional_inputs_accordion.render()
|
99 |
+
with additional_inputs_accordion:
|
100 |
+
temperature.render()
|
101 |
+
max_tokens.render()
|
102 |
+
|
103 |
+
examples = gr.Examples(examples, textbox)
|
104 |
+
|
105 |
+
textbox.submit(user_llama3, [textbox, chatbot_llama3], [textbox, chatbot_llama3], queue=False).then(
|
106 |
+
chat_llama3, [chatbot_llama3, temperature, max_tokens], chatbot_llama3)
|
107 |
+
textbox.submit(user_llama3_1, [textbox, chatbot_llama3_1], [textbox, chatbot_llama3_1], queue=False).then(
|
108 |
+
chat_llama3_1, [chatbot_llama3_1, temperature, max_tokens], chatbot_llama3_1)
|
109 |
+
|
110 |
+
clear.click(lambda: None, None, chatbot_llama3, queue=False)
|
111 |
+
clear.click(lambda: None, None, chatbot_llama3_1, queue=False)
|
112 |
+
|
113 |
+
if __name__ == "__main__":
|
114 |
+
demo.launch(debug=True, )
|