|
import numpy as np |
|
import gradio as gr |
|
from transformers import GPT2Tokenizer, TrainingArguments, Trainer, GPT2LMHeadModel |
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained('mindwrapped/gpt2-lotr-fellowship', bos_token='<|startoftext|>', |
|
eos_token='<|endoftext|>', pad_token='<|pad|>') |
|
model = GPT2LMHeadModel.from_pretrained('mindwrapped/gpt2-lotr-fellowship') |
|
|
|
|
|
def generate_text(text, temperature): |
|
stop = False |
|
out = text |
|
while not stop: |
|
generated = tokenizer("<|startoftext|> " + out, return_tensors="pt").input_ids |
|
sample_outputs = model.generate(generated, do_sample=True, top_k=50, |
|
max_length=300, top_p=0.95, temperature=float(temperature), num_return_sequences=1) |
|
|
|
for i, sample_output in enumerate(sample_outputs): |
|
out = tokenizer.decode(sample_output, skip_special_tokens=True) |
|
|
|
if len(out) > 150: |
|
stop = True |
|
|
|
return out |
|
|
|
|
|
demo = gr.Interface( |
|
fn=generate_text, |
|
inputs=[gr.Text(),gr.Slider(minimum=0.0, maximum=5.0, value=1.0, step=0.1)], |
|
outputs='text', |
|
examples=[['', 1.9],['Frodo and Sam moved quietly through the night.', 2.0], ['Frodo and Sam went to the pub. ', 3.0]], |
|
title='LOTR Generator', |
|
description='This space uses GPT2 model fine-tuned on the "The Fellowship of the Rings" to generate text. Try inputting no text to the model and messing around with the temperature.', |
|
article=' ![visitor badge](https://visitor-badge.glitch.me/badge?page_id=mindwrapped.gpt2-lotr-fellowship-generator-space)', |
|
live=False, |
|
) |
|
|
|
demo.launch(debug=True) |