Files changed (4) hide show
  1. app.py +13 -40
  2. controlled_summarization.py +17 -95
  3. description.py +1 -9
  4. requirements.txt +1 -2
app.py CHANGED
@@ -5,13 +5,12 @@ from reference_string_parsing import *
5
  from controlled_summarization import *
6
  from dataset_extraction import *
7
 
8
- from controlled_summarization import recommended_kw
9
  import requests
10
 
11
  # Example Usage
12
- # url = "https://arxiv.org/pdf/2305.14996.pdf"
13
- # dest_folder = "./examples/"
14
- # download_pdf(url, dest_folder)
15
 
16
 
17
  with gr.Blocks(css="#htext span {white-space: pre-line}") as demo:
@@ -29,23 +28,20 @@ with gr.Blocks(css="#htext span {white-space: pre-line}") as demo:
29
  ctrlsum_file = gr.File(label="Input File")
30
  ctrlsum_str = gr.TextArea(label="Input String", max_lines=5)
31
  with gr.Column():
32
- gr.Markdown("* Set the length of text used for summarization. Length 0 will exert no control over length.")
33
  # ctrlsum_file_beams = gr.Number(label="Number of beams for beam search", value=1, precision=0)
34
  # ctrlsum_file_sequences = gr.Number(label="Number of generated summaries", value=1, precision=0)
35
- ctrlsum_file_length = gr.Radio(label="Length", value=0, choices=[0, 50, 100, 200])
36
- kw = gr.Radio(visible=False)
37
- ctrlsum_file_keywords = gr.Textbox(label="Keywords", max_lines=1)
38
  with gr.Row():
39
  ctrlsum_file_btn = gr.Button("Generate")
40
  ctrlsum_file_output = gr.Textbox(
41
  elem_id="htext",
42
  label="Summary",
43
  )
44
- ctrlsum_file_examples = gr.Examples(
45
- examples=[["examples/H01-1042_body.txt", 50, "automatic evaluation technique", "", ""],
46
- ["examples/H01-1042.pdf", 0, "automatic evaluation technique", "", ""]],
47
- inputs=[ctrlsum_file, ctrlsum_file_length, ctrlsum_file_keywords, ctrlsum_str, ctrlsum_url
48
- ])
49
 
50
 
51
 
@@ -55,37 +51,15 @@ with gr.Blocks(css="#htext span {white-space: pre-line}") as demo:
55
  outputs=[ctrlsum_file_output, ctrlsum_str, ctrlsum_file]
56
  )
57
  def clear():
58
- return None, 0, None, None, gr.Radio(visible=False)
59
-
60
-
61
- def update_url(url):
62
- if url in recommended_kw.keys():
63
- keywords = recommended_kw[url]
64
- if keywords != None:
65
- return None, None, gr.Radio(choices=keywords[:3], label="Recommended Keywords", visible=True,
66
- interactive=True)
67
 
68
- return None, None, gr.Radio(visible=False)
69
-
70
-
71
- ctrlsum_file.upload(clear, inputs=None,
72
- outputs=[ctrlsum_str, ctrlsum_file_length, ctrlsum_file_keywords, ctrlsum_url, kw])
73
- ctrlsum_url.input(update_url, inputs=ctrlsum_url, outputs=[ctrlsum_str, ctrlsum_file, kw])
74
 
 
 
75
  ctrlsum_str.input(clear, inputs=None,
76
- outputs=[ctrlsum_url, ctrlsum_file_length, ctrlsum_file_keywords, ctrlsum_file, kw])
77
-
78
-
79
-
80
- def select_kw(env: gr.SelectData):
81
- return env.value
82
-
83
-
84
- kw.select(select_kw, None, ctrlsum_file_keywords)
85
-
86
  # Reference String Parsing
87
  with gr.TabItem("Reference String Parsing"):
88
- gr.Markdown(rsp_title_md)
89
  with gr.Box():
90
  gr.Markdown(rsp_str_md)
91
  with gr.Row():
@@ -139,7 +113,6 @@ with gr.Blocks(css="#htext span {white-space: pre-line}") as demo:
139
 
140
  # Dataset Extraction
141
  with gr.TabItem("Dataset Mentions Extraction"):
142
- gr.Markdown(de_title_md)
143
  with gr.Box():
144
  gr.Markdown(de_str_md)
145
  with gr.Row():
 
5
  from controlled_summarization import *
6
  from dataset_extraction import *
7
 
 
8
  import requests
9
 
10
  # Example Usage
11
+ #url = "https://arxiv.org/pdf/2305.14996.pdf"
12
+ #dest_folder = "./examples/"
13
+ #download_pdf(url, dest_folder)
14
 
15
 
16
  with gr.Blocks(css="#htext span {white-space: pre-line}") as demo:
 
28
  ctrlsum_file = gr.File(label="Input File")
29
  ctrlsum_str = gr.TextArea(label="Input String", max_lines=5)
30
  with gr.Column():
31
+ gr.Markdown("* Length 0 will exert no control over length.")
32
  # ctrlsum_file_beams = gr.Number(label="Number of beams for beam search", value=1, precision=0)
33
  # ctrlsum_file_sequences = gr.Number(label="Number of generated summaries", value=1, precision=0)
34
+ ctrlsum_file_length = gr.Slider(0,300,step=50, label="Length")
35
+ ctrlsum_file_keywords = gr.Textbox(label="Keywords",max_lines=1)
 
36
  with gr.Row():
37
  ctrlsum_file_btn = gr.Button("Generate")
38
  ctrlsum_file_output = gr.Textbox(
39
  elem_id="htext",
40
  label="Summary",
41
  )
42
+ ctrlsum_file_examples = gr.Examples(examples=[["examples/H01-1042_body.txt", 50, "automatic evaluation technique", "",""],["examples/H01-1042.pdf", 0, "automatic evaluation technique","",""]],
43
+ inputs=[ctrlsum_file, ctrlsum_file_length, ctrlsum_file_keywords, ctrlsum_str, ctrlsum_url])
44
+
 
 
45
 
46
 
47
 
 
51
  outputs=[ctrlsum_file_output, ctrlsum_str, ctrlsum_file]
52
  )
53
  def clear():
54
+ return None,0,None, None
 
 
 
 
 
 
 
 
55
 
 
 
 
 
 
 
56
 
57
+ ctrlsum_file.upload(clear, inputs=None,outputs=[ctrlsum_str,ctrlsum_file_length,ctrlsum_file_keywords, ctrlsum_url])
58
+ ctrlsum_url.input(clear, inputs=None, outputs=[ctrlsum_str, ctrlsum_file_length, ctrlsum_file_keywords, ctrlsum_file])
59
  ctrlsum_str.input(clear, inputs=None,
60
+ outputs=[ctrlsum_url, ctrlsum_file_length, ctrlsum_file_keywords, ctrlsum_file])
 
 
 
 
 
 
 
 
 
61
  # Reference String Parsing
62
  with gr.TabItem("Reference String Parsing"):
 
63
  with gr.Box():
64
  gr.Markdown(rsp_str_md)
65
  with gr.Row():
 
113
 
114
  # Dataset Extraction
115
  with gr.TabItem("Dataset Mentions Extraction"):
 
116
  with gr.Box():
117
  gr.Markdown(de_str_md)
118
  with gr.Row():
controlled_summarization.py CHANGED
@@ -3,71 +3,9 @@ import torch
3
  from SciAssist import Summarization
4
  import os
5
  import requests
6
- from datasets import load_dataset
7
-
8
- print(f"Is CUDA available: {torch.cuda.is_available()}")
9
- # True
10
- if torch.cuda.is_available():
11
- print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
12
- device = 'gpu'
13
- ctrlsum_pipeline = Summarization(os_name="nt",model_name="flan-t5-xl",checkpoint="dyxohjl666/flant5-xl-cocoscisum",device=device)
14
- else:
15
- device = 'cpu'
16
- ctrlsum_pipeline = Summarization(os_name="nt",device=device)
17
-
18
-
19
- acl_dict = {}
20
- recommended_kw = {}
21
- acl_data = load_dataset("dyxohjl666/CocoScisum_ACL", revision="refs/convert/parquet")
22
-
23
-
24
- def convert_to_dict(data):
25
- """ Dict:
26
- { url:
27
- {length:
28
- {keywords: summary};
29
- raw_text:
30
- str;
31
- }
32
- }
33
-
34
- """
35
- url = data["url"]
36
- text = data["text"]
37
- keywords = data["keywords"]
38
- length = data["length"]
39
- summary = data["summary"]
40
- for u, t, k, l, s in zip(url, text, keywords, length, summary):
41
- if len(u) < 5:
42
- continue
43
- u = u + ".pdf"
44
- if k == None:
45
- k = ""
46
- if l == None:
47
- l = ""
48
- k = str(k).strip()
49
- l = str(l).strip()
50
- if u in acl_dict.keys():
51
- if k in acl_dict[u][l].keys():
52
- continue
53
- else:
54
- acl_dict[u][l][k] = s
55
- else:
56
- acl_dict[u] = {"": {}, "50": {}, "100": {}, "200": {}, "raw_text": t}
57
-
58
- # kws
59
- if u in recommended_kw.keys():
60
- if k == "" or k in recommended_kw[u]:
61
- continue
62
- else:
63
- recommended_kw[u].append(k)
64
- else:
65
- recommended_kw[u] = []
66
- return 1
67
 
68
-
69
- for i in acl_data.keys():
70
- signal = convert_to_dict(acl_data[i])
71
 
72
 
73
  def download_pdf(url, dest_folder):
@@ -92,15 +30,16 @@ def download_pdf(url, dest_folder):
92
  return filename
93
 
94
 
95
- def ctrlsum_for_str(input, length=None, keywords=None) -> List[Tuple[str, str]]:
 
96
  if keywords is not None:
97
  keywords = keywords.strip().split(",")
98
  if keywords[0] == "":
99
  keywords = None
100
- if length == 0 or length is None:
101
  length = None
102
  results = ctrlsum_pipeline.predict(input, type="str",
103
- length=length, keywords=keywords, num_beams=1)
104
 
105
  output = []
106
  for res in results["summary"]:
@@ -110,49 +49,31 @@ def ctrlsum_for_str(input, length=None, keywords=None) -> List[Tuple[str, str]]:
110
 
111
  def ctrlsum_for_file(input=None, length=None, keywords="", text="", url="") -> List[Tuple[str, str, str]]:
112
  if input == None and url == "":
113
- if text == "":
114
- return None, "Input cannot be left blank.", None
115
  else:
116
- return ctrlsum_for_str(text, length, keywords), text, None
117
  else:
118
- filename = ""
119
- url = url.strip()
120
  if url != "":
121
- if len(url) > 4 and url[-3:] == "pdf":
122
- if url.strip() in acl_dict.keys():
123
- raw_text = acl_dict[url]["raw_text"]
124
- l = str(length)
125
- if length == 0:
126
- l = ""
127
- if l in acl_dict[url].keys():
128
- if keywords.strip() in acl_dict[url][l].keys():
129
- summary = acl_dict[url][l][keywords]
130
- return summary, raw_text, None
131
- if keywords.strip() == "":
132
- keywords = None
133
- if l == "":
134
- l = None
135
- return ctrlsum_for_str(raw_text, int(l), keywords), raw_text, None
136
-
137
  filename = download_pdf(url, './cache/')
138
- else:
139
- "Invalid url(Not PDF)!", None, None
140
  else:
141
  filename = input.name
142
  if keywords != "":
143
  keywords = keywords.strip().split(",")
144
  if keywords[0] == "":
145
  keywords = None
146
- if length == 0:
147
  length = None
148
  # Identify the format of input and parse reference strings
149
  if filename[-4:] == ".txt":
150
  results = ctrlsum_pipeline.predict(filename, type="txt",
151
- save_results=False,
152
- length=length, keywords=keywords, num_beams=1)
153
  elif filename[-4:] == ".pdf":
154
  results = ctrlsum_pipeline.predict(filename,
155
- save_results=False, length=length, keywords=keywords, num_beams=1)
156
  else:
157
  return "File Format Error !", None, filename
158
 
@@ -162,4 +83,5 @@ def ctrlsum_for_file(input=None, length=None, keywords="", text="", url="") -> L
162
  return "".join(output), results["raw_text"], filename
163
 
164
 
165
- ctrlsum_str_example = "Language model pre-training has been shown to be effective for improving many natural language processing tasks ( Dai and Le , 2015 ; Peters et al. , 2018a ; Radford et al. , 2018 ; Howard and Ruder , 2018 ) . These include sentence-level tasks such as natural language inference ( Bowman et al. , 2015 ; Williams et al. , 2018 ) and paraphrasing ( Dolan and Brockett , 2005 ) , which aim to predict the relationships between sentences by analyzing them holistically , as well as token-level tasks such as named entity recognition and question answering , where models are required to produce fine-grained output at the token level ( Tjong Kim Sang and De Meulder , 2003 ; Rajpurkar et al. , 2016 ) . There are two existing strategies for applying pre-trained language representations to downstream tasks : feature-based and fine-tuning . The feature-based approach , such as ELMo ( Peters et al. , 2018a ) , uses task-specific architectures that include the pre-trained representations as additional features . The fine-tuning approach , such as the Generative Pre-trained Transformer ( OpenAI GPT ) ( Radford et al. , 2018 ) , introduces minimal task-specific parameters , and is trained on the downstream tasks by simply fine-tuning all pretrained parameters . The two approaches share the same objective function during pre-training , where they use unidirectional language models to learn general language representations . We argue that current techniques restrict the power of the pre-trained representations , especially for the fine-tuning approaches . The major limitation is that standard language models are unidirectional , and this limits the choice of architectures that can be used during pre-training . For example , in OpenAI GPT , the authors use a left-toright architecture , where every token can only attend to previous tokens in the self-attention layers of the Transformer ( Vaswani et al. , 2017 ) . Such restrictions are sub-optimal for sentence-level tasks , and could be very harmful when applying finetuning based approaches to token-level tasks such as question answering , where it is crucial to incorporate context from both directions . In this paper , we improve the fine-tuning based approaches by proposing BERT : Bidirectional Encoder Representations from Transformers . BERT alleviates the previously mentioned unidirectionality constraint by using a `` masked language model '' ( MLM ) pre-training objective , inspired by the Cloze task ( Taylor , 1953 ) . The masked language model randomly masks some of the tokens from the input , and the objective is to predict the original vocabulary id of the masked arXiv:1810.04805v2 [ cs.CL ] 24 May 2019 word based only on its context . Unlike left-toright language model pre-training , the MLM objective enables the representation to fuse the left and the right context , which allows us to pretrain a deep bidirectional Transformer . In addition to the masked language model , we also use a `` next sentence prediction '' task that jointly pretrains text-pair representations . The contributions of our paper are as follows : • We demonstrate the importance of bidirectional pre-training for language representations . Unlike Radford et al . ( 2018 ) , which uses unidirectional language models for pre-training , BERT uses masked language models to enable pretrained deep bidirectional representations . This is also in contrast to Peters et al . ( 2018a ) , which uses a shallow concatenation of independently trained left-to-right and right-to-left LMs . • We show that pre-trained representations reduce the need for many heavily-engineered taskspecific architectures . BERT is the first finetuning based representation model that achieves state-of-the-art performance on a large suite of sentence-level and token-level tasks , outperforming many task-specific architectures . • BERT advances the state of the art for eleven NLP tasks . The code and pre-trained models are available at https : //github.com/ google-research/bert . "
 
 
3
  from SciAssist import Summarization
4
  import os
5
  import requests
6
+ device = "gpu" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ ctrlsum_pipeline = Summarization(os_name="nt",checkpoint="google/flan-t5-base",device=device)
 
 
9
 
10
 
11
  def download_pdf(url, dest_folder):
 
30
  return filename
31
 
32
 
33
+ def ctrlsum_for_str(input,length=None, keywords=None) -> List[Tuple[str, str]]:
34
+
35
  if keywords is not None:
36
  keywords = keywords.strip().split(",")
37
  if keywords[0] == "":
38
  keywords = None
39
+ if length==0 or length is None:
40
  length = None
41
  results = ctrlsum_pipeline.predict(input, type="str",
42
+ length=length, keywords=keywords)
43
 
44
  output = []
45
  for res in results["summary"]:
 
49
 
50
  def ctrlsum_for_file(input=None, length=None, keywords="", text="", url="") -> List[Tuple[str, str, str]]:
51
  if input == None and url == "":
52
+ if text=="":
53
+ return None,"Input cannot be left blank.",None
54
  else:
55
+ return ctrlsum_for_str(text,length,keywords),text, None
56
  else:
57
+ filename=""
 
58
  if url != "":
59
+ if len(url) > 4:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  filename = download_pdf(url, './cache/')
 
 
61
  else:
62
  filename = input.name
63
  if keywords != "":
64
  keywords = keywords.strip().split(",")
65
  if keywords[0] == "":
66
  keywords = None
67
+ if length==0:
68
  length = None
69
  # Identify the format of input and parse reference strings
70
  if filename[-4:] == ".txt":
71
  results = ctrlsum_pipeline.predict(filename, type="txt",
72
+ save_results=False,
73
+ length=length, keywords=keywords)
74
  elif filename[-4:] == ".pdf":
75
  results = ctrlsum_pipeline.predict(filename,
76
+ save_results=False, length=length, keywords=keywords)
77
  else:
78
  return "File Format Error !", None, filename
79
 
 
83
  return "".join(output), results["raw_text"], filename
84
 
85
 
86
+
87
+ ctrlsum_str_example = "Language model pre-training has been shown to be effective for improving many natural language processing tasks ( Dai and Le , 2015 ; Peters et al. , 2018a ; Radford et al. , 2018 ; Howard and Ruder , 2018 ) . These include sentence-level tasks such as natural language inference ( Bowman et al. , 2015 ; Williams et al. , 2018 ) and paraphrasing ( Dolan and Brockett , 2005 ) , which aim to predict the relationships between sentences by analyzing them holistically , as well as token-level tasks such as named entity recognition and question answering , where models are required to produce fine-grained output at the token level ( Tjong Kim Sang and De Meulder , 2003 ; Rajpurkar et al. , 2016 ) . There are two existing strategies for applying pre-trained language representations to downstream tasks : feature-based and fine-tuning . The feature-based approach , such as ELMo ( Peters et al. , 2018a ) , uses task-specific architectures that include the pre-trained representations as additional features . The fine-tuning approach , such as the Generative Pre-trained Transformer ( OpenAI GPT ) ( Radford et al. , 2018 ) , introduces minimal task-specific parameters , and is trained on the downstream tasks by simply fine-tuning all pretrained parameters . The two approaches share the same objective function during pre-training , where they use unidirectional language models to learn general language representations . We argue that current techniques restrict the power of the pre-trained representations , especially for the fine-tuning approaches . The major limitation is that standard language models are unidirectional , and this limits the choice of architectures that can be used during pre-training . For example , in OpenAI GPT , the authors use a left-toright architecture , where every token can only attend to previous tokens in the self-attention layers of the Transformer ( Vaswani et al. , 2017 ) . Such restrictions are sub-optimal for sentence-level tasks , and could be very harmful when applying finetuning based approaches to token-level tasks such as question answering , where it is crucial to incorporate context from both directions . In this paper , we improve the fine-tuning based approaches by proposing BERT : Bidirectional Encoder Representations from Transformers . BERT alleviates the previously mentioned unidirectionality constraint by using a `` masked language model '' ( MLM ) pre-training objective , inspired by the Cloze task ( Taylor , 1953 ) . The masked language model randomly masks some of the tokens from the input , and the objective is to predict the original vocabulary id of the masked arXiv:1810.04805v2 [ cs.CL ] 24 May 2019 word based only on its context . Unlike left-toright language model pre-training , the MLM objective enables the representation to fuse the left and the right context , which allows us to pretrain a deep bidirectional Transformer . In addition to the masked language model , we also use a `` next sentence prediction '' task that jointly pretrains text-pair representations . The contributions of our paper are as follows : • We demonstrate the importance of bidirectional pre-training for language representations . Unlike Radford et al . ( 2018 ) , which uses unidirectional language models for pre-training , BERT uses masked language models to enable pretrained deep bidirectional representations . This is also in contrast to Peters et al . ( 2018a ) , which uses a shallow concatenation of independently trained left-to-right and right-to-left LMs . • We show that pre-trained representations reduce the need for many heavily-engineered taskspecific architectures . BERT is the first finetuning based representation model that achieves state-of-the-art performance on a large suite of sentence-level and token-level tasks , outperforming many task-specific architectures . • BERT advances the state of the art for eleven NLP tasks . The code and pre-trained models are available at https : //github.com/ google-research/bert . "
description.py CHANGED
@@ -1,8 +1,4 @@
1
  # Reference string parsing Markdown
2
- rsp_title_md = '''
3
- ## Reference String Parsing parses a citation string, extracting information such as the title, authors, and publication date.
4
- '''
5
-
6
  rsp_str_md = '''
7
  To **test on strings**, simply input one or more strings.
8
  '''
@@ -46,8 +42,6 @@ To **test on strings**, simply input a string.
46
  ctrlsum_file_md = '''
47
  This is the demo for **CocoSciSum**.
48
 
49
- ## Controlled Summarization uses FLAN-T5 to generate user-customised summaries from your input file or URL link.
50
-
51
  To **test on a file**, the input can be:
52
 
53
  - A txt file which contains the content to be summarized.
@@ -58,9 +52,7 @@ To **test on a file**, the input can be:
58
 
59
  '''
60
 
61
- de_title_md = '''
62
- ## Dataset Extraction detects dataset mentions from the input text.
63
- '''
64
 
65
  de_str_md = '''
66
  To **test on strings**, please input your sentences or paragraphs.
 
1
  # Reference string parsing Markdown
 
 
 
 
2
  rsp_str_md = '''
3
  To **test on strings**, simply input one or more strings.
4
  '''
 
42
  ctrlsum_file_md = '''
43
  This is the demo for **CocoSciSum**.
44
 
 
 
45
  To **test on a file**, the input can be:
46
 
47
  - A txt file which contains the content to be summarized.
 
52
 
53
  '''
54
 
55
+
 
 
56
 
57
  de_str_md = '''
58
  To **test on strings**, please input your sentences or paragraphs.
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
  pip==23.2.1
2
  torch==1.12.0
3
- SciAssist==0.1.4
4
  nltk~=3.7
5
- pytest
 
1
  pip==23.2.1
2
  torch==1.12.0
3
+ SciAssist==0.0.41
4
  nltk~=3.7