Spaces:
Sleeping
Sleeping
File size: 1,091 Bytes
4d32908 6d39cfa 4d32908 6d39cfa 4d32908 6d39cfa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer
# Load the GPT-2 model and tokenizer
model_name = "gpt2-xl"
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
def generate_text(prompt, max_tokens=100, temperature=0.7):
input_ids = tokenizer.encode(prompt, return_tensors='pt')
# Generate text
output = model.generate(input_ids, max_length=max_tokens, temperature=temperature, num_return_sequences=1)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_text
# Create a Gradio interface
demo = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(label="Input Prompt"),
gr.Slider(minimum=1, maximum=2048, value=100, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
],
outputs="text",
title="GPT-2 Text Generation",
description="Enter a prompt to generate text using the GPT-2 model."
)
if __name__ == "__main__":
demo.launch() |