Spaces:
Runtime error
Runtime error
import gradio as gr | |
from gradio_client import Client | |
# Initializing Gradio Python Clients for the two Llama chatbots | |
client_llama3_1 = Client("ysharma/Chat_with_Meta_llama3_1_8b_dupe", hf_token=HF_TOKEN) | |
client_llama3 = Client("ysharma/Chat_with_Meta_llama3_8b_dupe", hf_token=HF_TOKEN) | |
css = """ | |
h1 { | |
margin: 0; | |
flex-grow: 1; | |
font-size: 24px; | |
min-width: 200px; | |
} | |
""" | |
TITLE = """<h1 style="text-align: center;">Meta Llama3.1 8B V/s Meta Llama3 8B</h1>""" | |
PLACEHOLDER_LLAMA3 = """ | |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> | |
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; "> | |
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta Llama3</h1> | |
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p> | |
</div> | |
""" | |
PLACEHOLDER_LLAMA3_1 = """ | |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> | |
<img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; "> | |
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta Llama3.1</h1> | |
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p> | |
</div> | |
""" | |
# Inference functions for Chatbots | |
def user_llama3(user_message, history_llama3): | |
return "", history_llama3 + [[user_message, None]] | |
def user_llama3_1(user_message, history_llama3_1): | |
return "", history_llama3_1 + [[user_message, None]] | |
def chat_llama3(history_llama3, temp, max_tokens): | |
history_llama3[-1][1] = "" | |
for result in client_llama3.submit(history_llama3[-1][0], temp, max_tokens): | |
if "assistant" in result: | |
result_list_temp = result.split('assistant') | |
history_llama3[-1][1] = result_list_temp[-1] #result | |
yield history_llama3 | |
def chat_llama3_1(history_llama3_1, temp, max_tokens): | |
history_llama3_1[-1][1] = "" | |
for result in client_llama3_1.submit(history_llama3_1[-1][0], temp, max_tokens): | |
history_llama3_1[-1][1] = result | |
yield history_llama3_1 | |
# Gradio block | |
chatbot_llama3 = gr.Chatbot(height=450, label='Llama3 8b Chat',) #placeholder=PLACEHOLDER, | |
chatbot_llama3_1 = gr.Chatbot(height=450, label='Llama3.1 8b Chat',) #placeholder=PLACEHOLDER, | |
textbox = gr.Textbox(placeholder="Type your text and press Enter", scale=7, label="User Mesaages") | |
additional_inputs_accordion = gr.Accordion(label="⚙️ Parameters", open=False, render=False) | |
temperature = gr.Slider(minimum=0, | |
maximum=1, | |
step=0.1, | |
value=0.95, | |
label="Temperature", | |
render=False) | |
max_tokens = gr.Slider(minimum=128, | |
maximum=4096, | |
step=1, | |
value=512, | |
label="Max new tokens", | |
render=False ) | |
examples=[ | |
["There's a llama in my garden 😱 What should I do?"], | |
["What is the best way to open a can of worms?"], | |
["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1. "], | |
['How to setup a human base on Mars? Give short answer.'], | |
['Explain theory of relativity to me like I’m 8 years old.'], | |
['What is 9,000 * 9,000?'], | |
['Write a pun-filled happy birthday message to my friend Alex.'], | |
['Justify why a penguin might make a good king of the jungle.'] | |
] | |
with gr.Blocks(fill_height=True,css=css ) as demo: | |
gr.HTML(TITLE) | |
with gr.Row(): | |
chatbot_llama3_1.render() | |
chatbot_llama3.render() | |
with gr.Row(): | |
textbox.render() | |
clear = gr.Button("Clear") | |
additional_inputs_accordion.render() | |
with additional_inputs_accordion: | |
temperature.render() | |
max_tokens.render() | |
examples = gr.Examples(examples, textbox) | |
textbox.submit(user_llama3, [textbox, chatbot_llama3], [textbox, chatbot_llama3], queue=False).then( | |
chat_llama3, [chatbot_llama3, temperature, max_tokens], chatbot_llama3) | |
textbox.submit(user_llama3_1, [textbox, chatbot_llama3_1], [textbox, chatbot_llama3_1], queue=False).then( | |
chat_llama3_1, [chatbot_llama3_1, temperature, max_tokens], chatbot_llama3_1) | |
clear.click(lambda: None, None, chatbot_llama3, queue=False) | |
clear.click(lambda: None, None, chatbot_llama3_1, queue=False) | |
if __name__ == "__main__": | |
demo.launch(debug=True, ) |