# -*- coding: utf-8 -*- """ Created on Fri Nov 26 16:16:37 2021 @author: PC """ import streamlit as st from transformers import PegasusForConditionalGeneration, PegasusTokenizer from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' model_name_xsum = 'google/pegasus-xsum' tokenizer = AutoTokenizer.from_pretrained(model_name_xsum) model = PegasusForConditionalGeneration.from_pretrained(model_name_xsum).to(device) def pegasus_abs_summarize(src_text): batch = tokenizer(src_text, truncation=True, padding='longest', return_tensors="pt").to(device) translated = model.generate(**batch) target_text = tokenizer.batch_decode(translated, skip_special_tokens =True) return target_text st.text_input("Input:", key="input") abs_output = pegasus_abs_summarize(st.session_state.input) st.write('Abstractive output', abs_output)