sample / app.py
chiranthkg06's picture
Update app.py
0381361 verified
raw
history blame contribute delete
677 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Ensure the correct model path
model_name = "QuantFactory/Meta-Llama-3-8B-Instruct-GGUF"
try:
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
except OSError as e:
print(f"Error loading the model: {e}")
def generate_response(prompt):
inputs = AutoTokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
interface = gr.Interface(fn=generate_response, inputs="text", outputs="text")
interface.launch()