lora_model / app.py
abdfajar707's picture
Update app.py
f3c3d6f verified
raw
history blame contribute delete
915 Bytes
from transformers import AutoModel, AutoTokenizer
import torch
import gradio as gr
# Load the model and tokenizer
model_name = "abdfajar707/rkp_llama3_lora_model"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
# Define the function for text generation
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.last_hidden_state # Adjust depending on your model's output
predicted_indices = torch.argmax(logits, dim=-1)
predicted_text = tokenizer.decode(predicted_indices[0], skip_special_tokens=True)
return predicted_text
# Create the Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text"
)
# Launch the Gradio interface
iface.launch()