Update app.py
Browse files
app.py
CHANGED
@@ -148,7 +148,7 @@ def load_uploaded_file(file_obj):
|
|
148 |
try:
|
149 |
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
150 |
raw_text = f.read()
|
151 |
-
text = clean(raw_text,
|
152 |
return text
|
153 |
except Exception as e:
|
154 |
logging.info(f"Trying to load file with path {file_path}, error: {e}")
|
@@ -197,9 +197,9 @@ if __name__ == "__main__":
|
|
197 |
step=0.05,
|
198 |
)
|
199 |
token_batch_length = gr.Radio(
|
200 |
-
choices=[
|
201 |
label="token batch length",
|
202 |
-
value=
|
203 |
)
|
204 |
with gr.Row():
|
205 |
example_name = gr.Dropdown(
|
@@ -254,7 +254,7 @@ if __name__ == "__main__":
|
|
254 |
"- [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) is a fine-tuned checkpoint of [Stancld/longt5-tglobal-large-16384-pubmed-3k_steps](https://huggingface.co/Stancld/longt5-tglobal-large-16384-pubmed-3k_steps) on the [SumPubMed dataset](https://aclanthology.org/2021.acl-srw.30/). [Blaise-g/longt5_tglobal_large_scitldr](https://huggingface.co/Blaise-g/longt5_tglobal_large_scitldr) is a fine-tuned checkpoint of [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) on the [Scitldr dataset](https://arxiv.org/abs/2004.15011). The goal was to create two models capable of handling the complex information contained in long biomedical documents and subsequently producing scientific summaries according to one of the two possible levels of conciseness: 1) A long explanatory synopsis that retains the majority of domain-specific language used in the original source text. 2)A one sentence long, TLDR style summary."
|
255 |
)
|
256 |
gr.Markdown(
|
257 |
-
"- The two most important text generation parameters are the `num_beams` and 'length_penalty' :
|
258 |
)
|
259 |
gr.Markdown("---")
|
260 |
|
|
|
148 |
try:
|
149 |
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
150 |
raw_text = f.read()
|
151 |
+
text = clean(raw_text, extra_spaces=True, lowercase=True, reg="\s(?=[\,.':;!?])",reg_replace="")
|
152 |
return text
|
153 |
except Exception as e:
|
154 |
logging.info(f"Trying to load file with path {file_path}, error: {e}")
|
|
|
197 |
step=0.05,
|
198 |
)
|
199 |
token_batch_length = gr.Radio(
|
200 |
+
choices=[768, 1024, 2048],
|
201 |
label="token batch length",
|
202 |
+
value=1024,
|
203 |
)
|
204 |
with gr.Row():
|
205 |
example_name = gr.Dropdown(
|
|
|
254 |
"- [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) is a fine-tuned checkpoint of [Stancld/longt5-tglobal-large-16384-pubmed-3k_steps](https://huggingface.co/Stancld/longt5-tglobal-large-16384-pubmed-3k_steps) on the [SumPubMed dataset](https://aclanthology.org/2021.acl-srw.30/). [Blaise-g/longt5_tglobal_large_scitldr](https://huggingface.co/Blaise-g/longt5_tglobal_large_scitldr) is a fine-tuned checkpoint of [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) on the [Scitldr dataset](https://arxiv.org/abs/2004.15011). The goal was to create two models capable of handling the complex information contained in long biomedical documents and subsequently producing scientific summaries according to one of the two possible levels of conciseness: 1) A long explanatory synopsis that retains the majority of domain-specific language used in the original source text. 2)A one sentence long, TLDR style summary."
|
255 |
)
|
256 |
gr.Markdown(
|
257 |
+
"- The two most important text generation parameters are the `num_beams` and 'length_penalty' : 1) Choosing a higher number of beams for the beam search algorithm results in generating a summary with higher probability (hence theoretically higher quality) at the cost of increasing computation times and memory usage. 2) The length penalty encourages the model to generate longer or shorter summary sequences by placing an exponential penalty on the beam score according to the current sequence length."
|
258 |
)
|
259 |
gr.Markdown("---")
|
260 |
|