magnufucent / app.py
steven1015's picture
Update app.py
3cbc88a verified
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load model and tokenizer - specify the correct model class
tokenizer = AutoTokenizer.from_pretrained("anthracite-org/magnum-v4-12b-gguf")
model = AutoModelForCausalLM.from_pretrained("anthracite-org/magnum-v4-12b-gguf", torch_dtype=torch.float16, device_map="auto") # Adjust dtype and device_map as needed
def generate_text(prompt, max_length=50, num_return_sequences=1, temperature=0.7, top_p=0.95, top_k=50):
try:
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) # Ensure inputs are on the same device as the model
outputs = model.generate(
**inputs,
max_new_tokens=max_length, # Use max_new_tokens instead of max_length with Transformers >= 4.30
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_return_sequences=num_return_sequences
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
return "\n\n".join(generated_text)
except Exception as e: # Handle potential errors
return f"Error: {e}"
iface = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=2, placeholder="Enter a prompt...", label="Prompt"),
gr.Slider(50, 200, value=100, label="Max Length"),
gr.Slider(0, 1, value=0.7, label="Temperature"),
gr.Slider(0, 1, value=0.95, label="Top p"),
gr.Slider(0, 50, value=50, step=1, label="Top k"),
],
outputs="text",
title="Text Generation with magnum-v4-12b-gguf",
description="Generate text using the magnum model. Please be patient, as generation can take time."
)
iface.launch(share=True) # Consider adding share=True for others to try