import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer # Replace with the actual model path or identifier model_name = "meta-llama/Llama-8B" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) def generate_text(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Create a Gradio interface iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Llama 7B Text Generation") iface.launch()