jskim commited on
Commit
2fad322
1 Parent(s): 05a7bdc

added text instructions at the top; input now accepts semantic scholar profile link instead of the numeric id

Browse files
Files changed (4) hide show
  1. app.py +29 -1
  2. input_format.py +6 -4
  3. requirements.txt +1 -0
  4. score.py +2 -1
app.py CHANGED
@@ -150,6 +150,34 @@ def change_paper(selected_papers_radio):
150
 
151
  with gr.Blocks() as demo:
152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  ### INPUT
154
  with gr.Row() as input_row:
155
  with gr.Column():
@@ -158,7 +186,7 @@ with gr.Blocks() as demo:
158
  pdf_file_input = gr.File(label='OR upload a submission PDF File')
159
  with gr.Column():
160
  with gr.Row():
161
- author_id_input = gr.Textbox(label='Reviewer ID (Semantic Scholar)')
162
  with gr.Row():
163
  name = gr.Textbox(label='Confirm Reviewer Name', interactive=False)
164
  author_id_input.change(fn=update_name, inputs=author_id_input, outputs=name)
 
150
 
151
  with gr.Blocks() as demo:
152
 
153
+ # TODO Text description about the app and disclaimer
154
+ ### TEXT Description
155
+ gr.Markdown(
156
+ """
157
+ # Paper Matching Helper
158
+
159
+ This is a tool designed to help match an academic paper (submission) to a potential peer reviewer, by presenting information that may be relevant to the users.
160
+ Below we describe how to use the tool. Also feel free to check out the [video]() for a more detailed rundown.
161
+
162
+ ##### Input
163
+ - The tool requires two inputs: (1) an academic paper's abstract in text format, (2) and a potential reviewer's [Semantic Scholar](https://www.semanticscholar.org/) profile link. Once you put in a valid profile link, the reviewer's name will be displayed.
164
+ - Once the name is confirmed, press the "Search Similar Papers" button.
165
+ ##### Search Similar Papers
166
+ - Based on the input information above, the tool will search for similar papers from the reviewer's previous publications using [Semantic Scholar API](https://www.semanticscholar.org/product/api).
167
+ - It will list top 10 similar papers along with the affinity score (ranging from 0 -1), computed using text representations from a [language model](https://github.com/allenai/specter/tree/master/specter).
168
+ - You can click on different papers to see title, abstract, and affinity scores in detail.
169
+ ##### Show Relevant Parts
170
+ - Once you have retrieved the similar papers above, and selected a paper that you are interested in, you will have an option to see what parts of the selected paper may be relevant to the submission abstract. Click on the "Show Relevant Parts" button.
171
+ - On the left, you will see individual sentences from the submission abstract you can select from.
172
+ - On the right, you will see the abstract of the selected paper, with highlights.
173
+ - <span style="color:red">Red</span> highlights: sentences from the reviewer's paper abstract with high semantic similarity to the selected sentence.
174
+ - <span style="color:blue">Blue</span> highlights: matching phrases from the reviewer's paper abstract that is included in the selected sentence.
175
+ - To see relevant parts in a different paper from the reviewer, select the paper above and re-click "Show Relevant Parts" to refresh.
176
+
177
+ **Disclaimer.** This tool and its output should not serve as a sole justification for confirming a match for the submission. It is intended as a supplementary tool that the user may use at their discretion; the correctness of the output of the tool is not guaranteed. This may be improved by updating the internal models used to compute the affinity scores and sentence relevance, which may require additional research independently. The tool does not compromise the privacy of the reviewers as it relies only on their publicly-available information (e.g., names and list of previously published papers).
178
+ """
179
+ )
180
+
181
  ### INPUT
182
  with gr.Row() as input_row:
183
  with gr.Column():
 
186
  pdf_file_input = gr.File(label='OR upload a submission PDF File')
187
  with gr.Column():
188
  with gr.Row():
189
+ author_id_input = gr.Textbox(label='Reviewer Link or ID (Semantic Scholar)')
190
  with gr.Row():
191
  name = gr.Textbox(label='Confirm Reviewer Name', interactive=False)
192
  author_id_input.change(fn=update_name, inputs=author_id_input, outputs=name)
input_format.py CHANGED
@@ -69,12 +69,14 @@ def download_pdf(url, file_name):
69
  def get_text_from_author_id(author_id, max_count=100):
70
  if author_id is None:
71
  raise ValueError('Input valid author ID')
72
- author_id = str(author_id)
73
- # author_id = '1737249'
74
- url = "https://api.semanticscholar.org/graph/v1/author/%s?fields=url,name,paperCount,papers,papers.title,papers.abstract"%author_id
 
 
75
  r = requests.get(url)
76
  if r.status_code == 404:
77
- raise ValueError('Input valid author ID')
78
  data = r.json()
79
  papers = data['papers'][:max_count]
80
  name = data['name']
 
69
  def get_text_from_author_id(author_id, max_count=100):
70
  if author_id is None:
71
  raise ValueError('Input valid author ID')
72
+ aid = str(author_id)
73
+ if 'http' in aid: # handle semantic scholar url input
74
+ aid = aid.split('/')
75
+ aid = aid[aid.index('author')+2]
76
+ url = "https://api.semanticscholar.org/graph/v1/author/%s?fields=url,name,paperCount,papers,papers.title,papers.abstract"%aid
77
  r = requests.get(url)
78
  if r.status_code == 404:
79
+ raise ValueError('Author link not found.')
80
  data = r.json()
81
  papers = data['papers'][:max_count]
82
  name = data['name']
requirements.txt CHANGED
@@ -13,3 +13,4 @@ sentence-transformers==2.2.0
13
  torch==1.9.0
14
  transformers
15
  urllib3==1.26.6
 
 
13
  torch==1.9.0
14
  transformers
15
  urllib3==1.26.6
16
+ tqdm
score.py CHANGED
@@ -3,6 +3,7 @@ from nltk.tokenize import sent_tokenize
3
  from nltk import word_tokenize, pos_tag
4
  import torch
5
  import numpy as np
 
6
 
7
  def compute_sentencewise_scores(model, query_sents, candidate_sents):
8
  # TODO make this more general for different types of models
@@ -161,7 +162,7 @@ def predict_docscore(doc_model, tokenizer, query, titles, abstracts, batch=20):
161
  scores = []
162
  with torch.no_grad():
163
  # batch
164
- for i in range(no_iter):
165
  # preprocess the input
166
  inputs = tokenizer(
167
  [query] + title_abs[i*batch:(i+1)*batch],
 
3
  from nltk import word_tokenize, pos_tag
4
  import torch
5
  import numpy as np
6
+ import tqdm
7
 
8
  def compute_sentencewise_scores(model, query_sents, candidate_sents):
9
  # TODO make this more general for different types of models
 
162
  scores = []
163
  with torch.no_grad():
164
  # batch
165
+ for i in tqdm.tqdm(range(no_iter)):
166
  # preprocess the input
167
  inputs = tokenizer(
168
  [query] + title_abs[i*batch:(i+1)*batch],