Spaces:
Runtime error
Runtime error
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel, pipeline | |
import gradio as gr | |
model = TFGPT2LMHeadModel.from_pretrained("egosumkira/gpt2-fantasy") | |
tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
story = pipeline( | |
"text-generation", | |
model=model, | |
tokenizer=tokenizer, | |
device=0 | |
) | |
def generate(tags_text, temp, n_beams, max_len): | |
tags = tags_text.split(", ") | |
prefix = f"~^{'^'.join(tags)}~@" | |
g_text = story(prefix, temperature=float(temp), repetition_penalty=7.0, num_beams=int(n_beams), max_length=int(max_len))[0]['generated_text'] | |
return g_text[g_text.find("@") + 1:] | |
title = "GPT-2 fantasy story generator" | |
description = 'This is fine-tuned GPT-2 model for "conditional" generation. The model was trained on a custom-made dataset of IMDB plots & keywords.\n' \ | |
'Model page: https://huggingface.co/egosumkira/gpt2-fantasy \n' \ | |
'Notebooks: https://github.com/Agniwald/GPT-2-Fantasy' | |
iface = gr.Interface(generate, | |
inputs = [ | |
gr.Textbox(label="Keywords (comma separated)"), | |
gr.inputs.Slider(0, 2, default=1.0, step=0.05, label="Temperature"), | |
gr.inputs.Slider(1, 10, default=3, label="Number of beams", step=1), | |
gr.Number(label="Max lenght", value=128) | |
], | |
outputs = gr.Textbox(label="Output"), | |
title=title, | |
description=description, | |
examples=[ | |
["time travel, magic, rescue", 1.0, 3, 128], | |
["airplane crush", 1.0, 3, 128] | |
] | |
) | |
iface.queue() | |
iface.launch() |