""" app.py - the main module for the gradio app Usage: python app.py """ import contextlib import logging import os import random import re import time from pathlib import Path os.environ["USE_TORCH"] = "1" os.environ[ "TOKENIZERS_PARALLELISM" ] = "false" # parallelism on tokenizers is buggy with gradio logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) import gradio as gr import nltk import torch from cleantext import clean from doctr.models import ocr_predictor from pdf2text import convert_PDF_to_Text from summarize import load_model_and_tokenizer, summarize_via_tokenbatches from utils import load_example_filenames, saves_summary, truncate_word_count _here = Path(__file__).parent nltk.download("stopwords", quiet=True) MODEL_OPTIONS = [ "pszemraj/long-t5-tglobal-base-16384-book-summary", "pszemraj/long-t5-tglobal-base-sci-simplify", "pszemraj/long-t5-tglobal-base-sci-simplify-elife", "pszemraj/long-t5-tglobal-base-16384-booksci-summary-v1", "pszemraj/pegasus-x-large-book-summary", ] # models users can choose from def predict( input_text: str, model_name: str, token_batch_length: int = 1024, empty_cache: bool = True, **settings, ) -> list: """ predict - helper fn to support multiple models for summarization at once :param str input_text: the input text to summarize :param str model_name: model name to use :param int token_batch_length: the length of the token batches to use :param bool empty_cache: whether to empty the cache before loading a new= model :return: list of dicts with keys "summary" and "score" """ if torch.cuda.is_available() and empty_cache: torch.cuda.empty_cache() model, tokenizer = load_model_and_tokenizer(model_name) summaries = summarize_via_tokenbatches( input_text, model, tokenizer, batch_length=token_batch_length, **settings, ) return summaries def proc_submission( input_text: str, model_name: str, num_beams, token_batch_length, length_penalty, repetition_penalty, no_repeat_ngram_size, max_input_length: int = 1024, ): """ proc_submission - a helper function for the gradio module to process submissions Args: input_text (str): the input text to summarize model_name (str): the hf model tag of the model to use num_beams (int): the number of beams to use token_batch_length (int): the length of the token batches to use length_penalty (float): the length penalty to use repetition_penalty (float): the repetition penalty to use no_repeat_ngram_size (int): the no repeat ngram size to use max_input_length (int, optional): the maximum input length to use. Defaults to 768. Returns: str in HTML format, string of the summary, str of score """ settings = { "length_penalty": float(length_penalty), "repetition_penalty": float(repetition_penalty), "no_repeat_ngram_size": int(no_repeat_ngram_size), "encoder_no_repeat_ngram_size": 4, "num_beams": int(num_beams), "min_length": 4, "max_length": int(token_batch_length // 4), "early_stopping": True, "do_sample": False, } st = time.perf_counter() history = {} clean_text = clean(input_text, lower=False) max_input_length = 2048 if "base" in model_name.lower() else max_input_length processed = truncate_word_count(clean_text, max_input_length) if processed["was_truncated"]: tr_in = processed["truncated_text"] # create elaborate HTML warning input_wc = re.split(r"\s+", input_text) msg = f"""
Input text was truncated to {max_input_length} words. That's about {100*max_input_length/len(input_wc):.2f}% of the submission.
Input text is too short to summarize. Detected {len(input_text)} characters. Please load text by selecting an example from the dropdown menu or by pasting text into the text box.
Runtime: {rt} minutes with model: {model_name}
" if msg is not None: html += msg html += "" # save to file saved_file = saves_summary(_summaries) return html, sum_text_out, scores_out, saved_file def load_single_example_text( example_path: str or Path, max_pages=20, ) -> str: """ load_single_example_text - loads a single example text file :param strorPath example_path: name of the example to load :param int max_pages: the maximum number of pages to load from a PDF :return str: the text of the example """ global name_to_path full_ex_path = name_to_path[example_path] full_ex_path = Path(full_ex_path) if full_ex_path.suffix == ".txt": with open(full_ex_path, "r", encoding="utf-8", errors="ignore") as f: raw_text = f.read() text = clean(raw_text, lower=False) elif full_ex_path.suffix == ".pdf": logging.info(f"Loading PDF file {full_ex_path}") conversion_stats = convert_PDF_to_Text( full_ex_path, ocr_model=ocr_model, max_pages=max_pages, ) text = conversion_stats["converted_text"] else: logging.error(f"Unknown file type {full_ex_path.suffix}") text = "ERROR - check example path" return text def load_uploaded_file(file_obj, max_pages: int = 20, lower: bool = False) -> str: """ load_uploaded_file - loads a file uploaded by the user :param file_obj (POTENTIALLY list): Gradio file object inside a list :param int max_pages: the maximum number of pages to load from a PDF :param bool lower: whether to lowercase the text :return str: the text of the file """ # check if mysterious file object is a list if isinstance(file_obj, list): file_obj = file_obj[0] file_path = Path(file_obj.name) try: logging.info(f"Loading file:\t{file_path}") if file_path.suffix == ".txt": with open(file_path, "r", encoding="utf-8", errors="ignore") as f: raw_text = f.read() text = clean(raw_text, lower=lower) elif file_path.suffix == ".pdf": logging.info(f"loading as PDF file {file_path}") conversion_stats = convert_PDF_to_Text( file_path, ocr_model=ocr_model, max_pages=max_pages, ) text = conversion_stats["converted_text"] else: logging.error(f"Unknown file type {file_path.suffix}") text = "ERROR - check file - unknown file type" return text except Exception as e: logging.error(f"Trying to load file:\t{file_path},\nerror:\t{e}") return "Error: Could not read file. Ensure that it is a valid text file with encoding UTF-8 if text, and a PDF if PDF." if __name__ == "__main__": logging.info("Starting app instance") logging.info("Loading OCR model") with contextlib.redirect_stdout(None): ocr_model = ocr_predictor( "db_resnet50", "crnn_mobilenet_v3_large", pretrained=True, assume_straight_pages=True, ) name_to_path = load_example_filenames(_here / "examples") logging.info(f"Loaded {len(name_to_path)} examples") demo = gr.Blocks() _examples = list(name_to_path.keys()) with demo: gr.Markdown("# Document Summarization with Long-Document Transformers") gr.Markdown( "This is an example use case for fine-tuned long document transformers. The model is trained on book summaries (via the BookSum dataset). The models in this demo are [LongT5-base](https://huggingface.co/pszemraj/long-t5-tglobal-base-16384-book-summary) and [Pegasus-X-Large](https://huggingface.co/pszemraj/pegasus-x-large-book-summary)." ) with gr.Column(): gr.Markdown("## Load Inputs & Select Parameters") gr.Markdown( "Enter text below in the text area. The text will be summarized [using the selected parameters](https://huggingface.co/blog/how-to-generate). Optionally load an example below or upload a file. (`.txt` or `.pdf` - _[link to guide](https://i.imgur.com/c6Cs9ly.png)_)" ) with gr.Row(variant="compact"): with gr.Column(scale=0.5, variant="compact"): model_name = gr.Dropdown( choices=MODEL_OPTIONS, value=MODEL_OPTIONS[0], label="Model" ) num_beams = gr.Radio( choices=[2, 3, 4], label="Beam Search: # of Beams", value=2, ) with gr.Column(variant="compact"): example_name = gr.Dropdown( _examples, label="Examples", value=random.choice(_examples), ) uploaded_file = gr.File( label="File Upload", file_count="single", type="file", ) with gr.Row(): input_text = gr.Textbox( lines=4, label="Input Text (for summarization)", placeholder="Enter text to summarize, the text will be cleaned and truncated on Spaces. Narrative, academic (both papers and lecture transcription), and article text work well. May take a bit to generate depending on the input text :)", ) with gr.Column(min_width=100, scale=0.5): load_examples_button = gr.Button( "Load Example", ) load_file_button = gr.Button("Upload File") with gr.Column(): gr.Markdown("## Generate Summary") gr.Markdown( "Summarization should take ~1-2 minutes for most settings, but may extend up to 5-10 minutes in some scenarios." ) summarize_button = gr.Button( "Summarize!", variant="primary", ) output_text = gr.HTML("Output will appear below:
") gr.Markdown("### Summary Output") summary_text = gr.Textbox( label="Summary", placeholder="The generated summary will appear here" ) gr.Markdown( "The summary scores can be thought of as representing the quality of the summary. less-negative numbers (closer to 0) are better:" ) summary_scores = gr.Textbox( label="Summary Scores", placeholder="Summary scores will appear here" ) text_file = gr.File( label="Download Summary as Text File", file_count="single", type="file", interactive=False, ) gr.Markdown("---") with gr.Column(): gr.Markdown("### Advanced Settings") with gr.Row(variant="compact"): length_penalty = gr.inputs.Slider( minimum=0.5, maximum=1.0, label="length penalty", default=0.7, step=0.05, ) token_batch_length = gr.Radio( choices=[512, 768, 1024, 1536], label="token batch length", value=1024, ) with gr.Row(variant="compact"): repetition_penalty = gr.inputs.Slider( minimum=1.0, maximum=5.0, label="repetition penalty", default=3.5, step=0.1, ) no_repeat_ngram_size = gr.Radio( choices=[2, 3, 4], label="no repeat ngram size", value=3, ) with gr.Column(): gr.Markdown("### About") gr.Markdown( "These models are fine-tuned on the [BookSum dataset](https://arxiv.org/abs/2105.08209).The goal was to create a model that can generalize well and is useful in summarizing lots of text in academic and daily usage." ) gr.Markdown("---") load_examples_button.click( fn=load_single_example_text, inputs=[example_name], outputs=[input_text] ) load_file_button.click( fn=load_uploaded_file, inputs=uploaded_file, outputs=[input_text] ) summarize_button.click( fn=proc_submission, inputs=[ input_text, model_name, num_beams, token_batch_length, length_penalty, repetition_penalty, no_repeat_ngram_size, ], outputs=[output_text, summary_text, summary_scores, text_file], ) demo.launch(enable_queue=True)