import gradio as gr import torch from transformers import MBartForConditionalGeneration, MBart50TokenizerFast description = "Telugu Abstractive Summarization" title = "TeSum" model = MBartForConditionalGeneration.from_pretrained("ashokurlana/mBART-TeSum") device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) model.eval() tokenizer = MBart50TokenizerFast.from_pretrained("ashokurlana/mBART-TeSum", src_lang="te_IN", tgt_lang="te_IN") def summarize(text): model_inputs = tokenizer(src_text, return_tensors="pt") with tokenizer.as_target_tokenizer(): labels = tokenizer(tgt_text, return_tensors="pt").input_ids return model(**model_inputs, labels=labels) interface = gr.Interface(transcribe, inputs='text', outputs='text') interface.launch(share=True) # gr.Interface.load("models/ashokurlana/mBART-TeSum").launch()