Sh3rlockhomes's picture
Create app.py
b60996a verified
raw
history blame
936 Bytes
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer from the repository
model_name = "Dumele/autotrain-shhsb-57a2l"
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Define the text generation function
def generate_text(prompt):
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
result = pipe(prompt)
return result[0]['generated_text']
# Create the Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text",
title="Text Generation with Mistral-7B",
description="Generate text using the fine-tuned Mistral-7B model from the Dumele repository."
)
# Launch the Gradio interface
iface.launch()