aweille / app.py
martianband1t's picture
Update app.py
b7fba63 verified
import gradio as gr
from huggingface_hub import InferenceClient
# Initialisation du client d'inférence pour Hugging Face
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
css = """
body, html {
height: 100%;
margin: 0;
font-family: Arial, Helvetica, sans-serif;
}
.container {
min-height: 100%;
background-image: url('./favicon.ico/'); /* Remplacez 'path_to_your_image.jpg' par le chemin de votre image */
background-position: center;
background-repeat: no-repeat;
background-size: cover;
}
h1 {
background: radial-gradient(circle, red, black);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
font-size: 2em;
text-align: center;
margin-top: 0;
}
"""
# Définition de la fonction de réponse
def respond(message, history, system_message, max_tokens, temperature, top_p):
messages = [{"role": "system", "content": system_message}]
for user_msg, bot_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if bot_msg:
messages.append({"role": "assistant", "content": bot_msg})
messages.append({"role": "user", "content": message})
response = client.chat_completion(
messages,
max_tokens=max_tokens,
stream=False,
temperature=temperature,
top_p=top_p
)
return response['choices'][0]['message']
# Création de l'interface Gradio
demo = gr.Interface(
fn=respond,
inputs=[
"text",
gr.Dataframe(headers=["User", "Assistant"], datatype=["str", "str"], value=[["Hello", "Hi there!"]]),
"text",
"number",
"number",
"number"
],
outputs="text",
title="Chat with Meta-Llama",
description="This is a Gradio interface for chatting with a model hosted on Hugging Face.",
css=css
)
if __name__ == "__main__":
demo.launch()