pszemraj commited on
Commit
11d8f98
โ€ข
1 Parent(s): 724c5ad

๐Ÿ“ add words

Browse files

Signed-off-by: peter szemraj <peterszemraj@gmail.com>

Files changed (1) hide show
  1. app.py +5 -11
app.py CHANGED
@@ -86,8 +86,8 @@ def proc_submission(
86
 
87
  _summaries = summarize_via_tokenbatches(
88
  tr_in,
89
- model_sm if model_size == "base" else model,
90
- tokenizer_sm if model_size == "base" else tokenizer,
91
  batch_length=token_batch_length,
92
  **settings,
93
  )
@@ -211,7 +211,7 @@ if __name__ == "__main__":
211
 
212
  gr.Markdown("# Document Summarization with Long-Document Transformers")
213
  gr.Markdown(
214
- "TODO: Add a description of the model and how it works, and a link to the paper"
215
  )
216
  with gr.Column():
217
 
@@ -223,7 +223,7 @@ if __name__ == "__main__":
223
  with gr.Column(scale=0.5, variant='compact'):
224
 
225
  model_size = gr.Radio(
226
- choices=["base", "large"], label="Model Variant", value="base"
227
  )
228
  num_beams = gr.Radio(
229
  choices=[2, 3, 4],
@@ -308,13 +308,7 @@ if __name__ == "__main__":
308
  with gr.Column():
309
  gr.Markdown("### About the Model")
310
  gr.Markdown(
311
- "- [This model](https://huggingface.co/pszemraj/led-large-book-summary) is a fine-tuned checkpoint of [allenai/led-large-16384](https://huggingface.co/allenai/led-large-16384) on the [BookSum dataset](https://arxiv.org/abs/2105.08209).The goal was to create a model that can generalize well and is useful in summarizing lots of text in academic and daily usage."
312
- )
313
- gr.Markdown(
314
- "- The two most important parameters-empirically-are the `num_beams` and `token_batch_length`. However, increasing these will also increase the amount of time it takes to generate a summary. The `length_penalty` and `repetition_penalty` parameters are also important for the model to generate good summaries."
315
- )
316
- gr.Markdown(
317
- "- The model can be used with tag [pszemraj/led-large-book-summary](https://huggingface.co/pszemraj/led-large-book-summary). See the model card for details on usage & a notebook for a tutorial."
318
  )
319
  gr.Markdown("---")
320
 
 
86
 
87
  _summaries = summarize_via_tokenbatches(
88
  tr_in,
89
+ model_sm if "base" in model_size.lower() else model,
90
+ tokenizer_sm if "base" in model_size.lower() else tokenizer,
91
  batch_length=token_batch_length,
92
  **settings,
93
  )
 
211
 
212
  gr.Markdown("# Document Summarization with Long-Document Transformers")
213
  gr.Markdown(
214
+ "This is an example use case for fine-tuned long document transformers. The model is trained on book summaries (via the BookSum dataset). The models in this demo are [LongT5-base](https://huggingface.co/pszemraj/long-t5-tglobal-base-16384-book-summary) and [Pegasus-X-Large](https://huggingface.co/pszemraj/pegasus-x-large-book-summary)."
215
  )
216
  with gr.Column():
217
 
 
223
  with gr.Column(scale=0.5, variant='compact'):
224
 
225
  model_size = gr.Radio(
226
+ choices=["LongT5-base", "Pegasus-X-large"], label="Model Variant", value="base"
227
  )
228
  num_beams = gr.Radio(
229
  choices=[2, 3, 4],
 
308
  with gr.Column():
309
  gr.Markdown("### About the Model")
310
  gr.Markdown(
311
+ "These models are fine-tuned on the [BookSum dataset](https://arxiv.org/abs/2105.08209).The goal was to create a model that can generalize well and is useful in summarizing lots of text in academic and daily usage."
 
 
 
 
 
 
312
  )
313
  gr.Markdown("---")
314