PacManAI-LL / app.py
Marroco93's picture
te
d2caca0
raw
history blame
628 Bytes
import gradio as gr
from transformers import pipeline, set_seed
import os
api_key = os.environ.get("LLama2Secret")
# Your Hugging Face API key
# Initialize the pipeline with the LLaMA 2-7B chat model
model_name = "meta-llama/Llama-2-7b-chat"
# model_name = "meta-llama/Llama-2-7b-chat"
chat_model = pipeline("text2text-generation", model=model_name, use_auth_token=api_key)
def ask_llama2(question):
set_seed(42)
responses = chat_model(question, max_length=50)
return responses[0]['generated_text']
iface = gr.Interface(fn=ask_llama2, inputs="text", outputs="text", title="Chat with LLaMA 2")
iface.launch()