shawon100's picture
Create app.py
e6e2fb2
raw
history blame contribute delete
No virus
1.31 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
model = AutoModelForSeq2SeqLM.from_pretrained("ramsrigouthamg/t5-large-paraphraser-diverse-high-quality")
tokenizer = AutoTokenizer.from_pretrained("ramsrigouthamg/t5-large-paraphraser-diverse-high-quality")
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#print ("device ",device)
model = model.to(device)# Diverse Beam search
#print ("\n\n")
#print ("Original: ",context)
def generate_text(inp):
context = inp
text = "paraphrase: "+context + " </s>"
encoding = tokenizer.encode_plus(text,max_length =128, padding=True, return_tensors="pt")
input_ids,attention_mask = encoding["input_ids"].to(device), encoding["attention_mask"].to(device)
model.eval()
diverse_beam_outputs = model.generate(
input_ids=input_ids,attention_mask=attention_mask,
max_length=128,
early_stopping=True,
num_beams=5,
num_beam_groups = 5,
num_return_sequences=5,
diversity_penalty = 0.70)
sent = tokenizer.decode(diverse_beam_outputs[0], skip_special_tokens=True,clean_up_tokenization_spaces=True)
return sent
output_text = gr.outputs.Textbox()
gr.Interface(generate_text,"textbox", output_text).launch(inline=False)