Peter commited on
Commit
53cfd2d
1 Parent(s): c0a9b19

:lipstick: add runtime, clean up display

Browse files
Files changed (1) hide show
  1. app.py +14 -5
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import logging
2
  import re
3
  from pathlib import Path
4
-
5
  import gradio as gr
6
  import nltk
7
  from cleantext import clean
@@ -53,7 +53,7 @@ def proc_submission(
53
  "early_stopping": True,
54
  "do_sample": False,
55
  }
56
-
57
  history = {}
58
  clean_text = clean(input_text, lower=False)
59
  max_input_length = 1024 if model_size == "base" else max_input_length
@@ -65,7 +65,6 @@ def proc_submission(
65
  history["WARNING"] = msg
66
  else:
67
  tr_in = input_text
68
- history["was_truncated"] = False
69
 
70
  _summaries = summarize_via_tokenbatches(
71
  tr_in,
@@ -84,7 +83,9 @@ def proc_submission(
84
  history["Summary Scores"] = "\n".join(sum_scores)
85
  history["Input"] = tr_in
86
  html = ""
87
-
 
 
88
  for name, item in history.items():
89
  html += (
90
  f"<h2>{name}:</h2><hr><b>{item}</b><br><br>"
@@ -94,6 +95,7 @@ def proc_submission(
94
 
95
  html += ""
96
 
 
97
  return html
98
 
99
 
@@ -104,8 +106,15 @@ if __name__ == "__main__":
104
  model, tokenizer = load_model_and_tokenizer("pszemraj/led-large-book-summary")
105
  model_sm, tokenizer_sm = load_model_and_tokenizer("pszemraj/led-base-book-summary")
106
  title = "Long-Form Summarization: LED & BookSum"
107
- description = "A simple demo of how to use a fine-tuned LED model to summarize long-form text. [This model](https://huggingface.co/pszemraj/led-large-book-summary) is a fine-tuned version of [allenai/led-large-16384](https://huggingface.co/allenai/led-large-16384) on the [BookSum dataset](https://arxiv.org/abs/2105.08209). The goal was to create a model that can generalize well and is useful in summarizing lots of text in academic and daily usage."
 
 
 
 
 
 
108
 
 
109
  gr.Interface(
110
  proc_submission,
111
  inputs=[
 
1
  import logging
2
  import re
3
  from pathlib import Path
4
+ import time
5
  import gradio as gr
6
  import nltk
7
  from cleantext import clean
 
53
  "early_stopping": True,
54
  "do_sample": False,
55
  }
56
+ st = time.perf_counter()
57
  history = {}
58
  clean_text = clean(input_text, lower=False)
59
  max_input_length = 1024 if model_size == "base" else max_input_length
 
65
  history["WARNING"] = msg
66
  else:
67
  tr_in = input_text
 
68
 
69
  _summaries = summarize_via_tokenbatches(
70
  tr_in,
 
83
  history["Summary Scores"] = "\n".join(sum_scores)
84
  history["Input"] = tr_in
85
  html = ""
86
+ rt = round((time.perf_counter() - st)/60, 2)
87
+ print(f"Runtime: {rt} minutes")
88
+ html += f"<p>Runtime: {rt} minutes on CPU</p>"
89
  for name, item in history.items():
90
  html += (
91
  f"<h2>{name}:</h2><hr><b>{item}</b><br><br>"
 
95
 
96
  html += ""
97
 
98
+
99
  return html
100
 
101
 
 
106
  model, tokenizer = load_model_and_tokenizer("pszemraj/led-large-book-summary")
107
  model_sm, tokenizer_sm = load_model_and_tokenizer("pszemraj/led-base-book-summary")
108
  title = "Long-Form Summarization: LED & BookSum"
109
+ description = """
110
+ A simple demo of how to use a fine-tuned LED model to summarize long-form text.
111
+ - [This model](https://huggingface.co/pszemraj/led-large-book-summary) is a fine-tuned version of [allenai/led-large-16384](https://huggingface.co/allenai/led-large-16384) on the [BookSum dataset](https://arxiv.org/abs/2105.08209).
112
+ - The goal was to create a model that can generalize well and is useful in summarizing lots of text in academic and daily usage.
113
+ - See [model card](https://huggingface.co/pszemraj/led-large-book-summary) for a notebook with GPU inference (much faster) on Colab.
114
+
115
+ ---
116
 
117
+ """
118
  gr.Interface(
119
  proc_submission,
120
  inputs=[