|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
sonnets_pretrained_model = "datificate/gpt2-small-spanish" |
|
sonnets_tokenizer = AutoTokenizer.from_pretrained(sonnets_pretrained_model, use_fast=True) |
|
sonnets_tuned_model = 'hackathon-pln-es/gpt2-small-spanish-disco-poetry' |
|
sonnets_pipe = pipeline('text2text-generation', model=sonnets_tuned_model, tokenizer=sonnets_tokenizer) |
|
|
|
def make_new_sonnet(prompt, max_lenght): |
|
ouputs = sonnets_pipe(prompt, max_length=max_lenght, |
|
num_beams=5, |
|
early_stopping=True, |
|
repetition_penalty=20.0, |
|
num_return_sequences=1) |
|
return ouputs[0]['generated_text'] |
|
|
|
|
|
iface = gr.Interface(fn=make_new_sonnet, |
|
inputs=[ |
|
gr.inputs.Textbox(lines=2, placeholder="Escrbe algo para comenzar", label='Escribe algo para comenzar'), |
|
gr.inputs.Slider(minimum = 60, maximum = 200, default = 140, step = 10, label='Salida de caracteres')], |
|
outputs=[ |
|
gr.outputs.Textbox(label="Tu poema"), |
|
]) |
|
iface.launch(enable_queue=True) |