Spaces:
Running
Running
shaocongma
commited on
Commit
•
a7f1695
1
Parent(s):
a6a7f17
Update prompts to support citep and citet.
Browse files- README.md +1 -1
- app.py +2 -0
- utils/prompts.py +7 -2
- utils/references.py +14 -9
README.md
CHANGED
@@ -20,7 +20,7 @@ python_version: 3.10.10
|
|
20 |
# 体验地址
|
21 |
以下链接提供简单功能的免费体验. 如果需要更定制化的功能, 请参照*使用方法*进行本地部署和自行修改.
|
22 |
|
23 |
-
https://huggingface.co/spaces/auto-academic/auto-draft
|
24 |
|
25 |
# 使用方法
|
26 |
1. 克隆此仓库:
|
|
|
20 |
# 体验地址
|
21 |
以下链接提供简单功能的免费体验. 如果需要更定制化的功能, 请参照*使用方法*进行本地部署和自行修改.
|
22 |
|
23 |
+
https://huggingface.co/spaces/auto-academic/auto-draft
|
24 |
|
25 |
# 使用方法
|
26 |
1. 克隆此仓库:
|
app.py
CHANGED
@@ -100,7 +100,9 @@ with gr.Blocks(theme=theme) as demo:
|
|
100 |
本Demo提供对[Auto-Draft](https://github.com/CCCBora/auto-draft)的auto_draft功能的测试。通过输入想要生成的论文名称(比如Playing atari with deep reinforcement learning),即可由AI辅助生成论文模板.
|
101 |
|
102 |
***2023-05-03 Update***: 在公开版本中为大家提供了输入OpenAI API Key的地址, 如果有GPT-4的API KEY的话可以在这里体验!
|
|
|
103 |
在这个Huggingface Organization里也提供一定额度的免费体验: [AUTO-ACADEMIC](https://huggingface.co/organizations/auto-academic/share/HPjgazDSlkwLNCWKiAiZoYtXaJIatkWDYM).
|
|
|
104 |
如果有更多想法和建议欢迎加入QQ群里交流, 如果我在Space里更新了Key我会第一时间通知大家. 群号: ***249738228***.
|
105 |
|
106 |
## 用法
|
|
|
100 |
本Demo提供对[Auto-Draft](https://github.com/CCCBora/auto-draft)的auto_draft功能的测试。通过输入想要生成的论文名称(比如Playing atari with deep reinforcement learning),即可由AI辅助生成论文模板.
|
101 |
|
102 |
***2023-05-03 Update***: 在公开版本中为大家提供了输入OpenAI API Key的地址, 如果有GPT-4的API KEY的话可以在这里体验!
|
103 |
+
|
104 |
在这个Huggingface Organization里也提供一定额度的免费体验: [AUTO-ACADEMIC](https://huggingface.co/organizations/auto-academic/share/HPjgazDSlkwLNCWKiAiZoYtXaJIatkWDYM).
|
105 |
+
|
106 |
如果有更多想法和建议欢迎加入QQ群里交流, 如果我在Space里更新了Key我会第一时间通知大家. 群号: ***249738228***.
|
107 |
|
108 |
## 用法
|
utils/prompts.py
CHANGED
@@ -53,9 +53,14 @@ def generate_paper_prompts(paper_info, section):
|
|
53 |
|
54 |
fundamental_subprompt = f"I am writing a machine learning paper with the title '{title}'. {description}\n"
|
55 |
instruction_subprompt = f"You need to write the {section} section. {INSTRUCTIONS[section]}\n"
|
|
|
|
|
|
|
|
|
56 |
references_subprompt = f"Please read the following references: \n{references}\n"\
|
57 |
-
f"Every time you use information from the references, you need to cite
|
58 |
-
f"
|
|
|
59 |
f"Please avoid citing the same reference in the same paragraph. \n"
|
60 |
self_subprompt = f"Here is the paper that I have written: {paper}.\n"
|
61 |
output_subprompt = r"Put your response (do not include \section{...}) in the following Python script:" \
|
|
|
53 |
|
54 |
fundamental_subprompt = f"I am writing a machine learning paper with the title '{title}'. {description}\n"
|
55 |
instruction_subprompt = f"You need to write the {section} section. {INSTRUCTIONS[section]}\n"
|
56 |
+
# references_subprompt = f"Please read the following references: \n{references}\n"\
|
57 |
+
# f"Every time you use information from the references, you need to cite its id after the sentence; " \
|
58 |
+
# f"for example, the sentence where you use information from 1905.09788 \cite{{1905.09788}}. " \
|
59 |
+
# f"Please avoid citing the same reference in the same paragraph. \n"
|
60 |
references_subprompt = f"Please read the following references: \n{references}\n"\
|
61 |
+
f"Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \
|
62 |
+
f"For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \
|
63 |
+
f"For example of \citet, \citet{{lei2022adaptive}} claims some information. \n" \
|
64 |
f"Please avoid citing the same reference in the same paragraph. \n"
|
65 |
self_subprompt = f"Here is the paper that I have written: {paper}.\n"
|
66 |
output_subprompt = r"Put your response (do not include \section{...}) in the following Python script:" \
|
utils/references.py
CHANGED
@@ -9,9 +9,9 @@ import requests
|
|
9 |
import re
|
10 |
|
11 |
|
12 |
-
|
13 |
# Some basic tools
|
14 |
-
|
15 |
def remove_newlines(serie):
|
16 |
serie = serie.replace('\n', ' ')
|
17 |
serie = serie.replace('\\n', ' ')
|
@@ -20,9 +20,9 @@ def remove_newlines(serie):
|
|
20 |
return serie
|
21 |
|
22 |
|
23 |
-
|
24 |
# Semantic Scholar (SS) API
|
25 |
-
|
26 |
def ss_search(keywords, limit=20, fields=None):
|
27 |
# space between the query to be removed and replaced with +
|
28 |
if fields is None:
|
@@ -69,7 +69,12 @@ def _collect_papers_ss(keyword, counts=3, tldr=False):
|
|
69 |
authors = [author['name'] for author in raw_authors]
|
70 |
|
71 |
authors_str = " and ".join(authors)
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
73 |
return authors_str, last_name
|
74 |
|
75 |
def parse_search_results(search_results_ss):
|
@@ -113,9 +118,9 @@ def _collect_papers_ss(keyword, counts=3, tldr=False):
|
|
113 |
return results
|
114 |
|
115 |
|
116 |
-
|
117 |
# ArXiv API
|
118 |
-
|
119 |
def _collect_papers_arxiv(keyword, counts=3, tldr=False):
|
120 |
# Build the arXiv API query URL with the given keyword and other parameters
|
121 |
def build_query_url(keyword, results_limit=3, sort_by="relevance", sort_order="descending"):
|
@@ -183,9 +188,9 @@ def _collect_papers_arxiv(keyword, counts=3, tldr=False):
|
|
183 |
return results
|
184 |
|
185 |
|
186 |
-
|
187 |
# References Class
|
188 |
-
|
189 |
|
190 |
# Each `paper` is a dictionary containing (1) paper_id (2) title (3) authors (4) year (5) link (6) abstract (7) journal
|
191 |
class References:
|
|
|
9 |
import re
|
10 |
|
11 |
|
12 |
+
######################################################################################################################
|
13 |
# Some basic tools
|
14 |
+
######################################################################################################################
|
15 |
def remove_newlines(serie):
|
16 |
serie = serie.replace('\n', ' ')
|
17 |
serie = serie.replace('\\n', ' ')
|
|
|
20 |
return serie
|
21 |
|
22 |
|
23 |
+
######################################################################################################################
|
24 |
# Semantic Scholar (SS) API
|
25 |
+
######################################################################################################################
|
26 |
def ss_search(keywords, limit=20, fields=None):
|
27 |
# space between the query to be removed and replaced with +
|
28 |
if fields is None:
|
|
|
69 |
authors = [author['name'] for author in raw_authors]
|
70 |
|
71 |
authors_str = " and ".join(authors)
|
72 |
+
try:
|
73 |
+
last_name = authors[0].split()[-1]
|
74 |
+
except:
|
75 |
+
last_name = "ma"
|
76 |
+
# pattern = r'^\w+'
|
77 |
+
# last_name = re.findall(pattern, authors[0])
|
78 |
return authors_str, last_name
|
79 |
|
80 |
def parse_search_results(search_results_ss):
|
|
|
118 |
return results
|
119 |
|
120 |
|
121 |
+
######################################################################################################################
|
122 |
# ArXiv API
|
123 |
+
######################################################################################################################
|
124 |
def _collect_papers_arxiv(keyword, counts=3, tldr=False):
|
125 |
# Build the arXiv API query URL with the given keyword and other parameters
|
126 |
def build_query_url(keyword, results_limit=3, sort_by="relevance", sort_order="descending"):
|
|
|
188 |
return results
|
189 |
|
190 |
|
191 |
+
######################################################################################################################
|
192 |
# References Class
|
193 |
+
######################################################################################################################
|
194 |
|
195 |
# Each `paper` is a dictionary containing (1) paper_id (2) title (3) authors (4) year (5) link (6) abstract (7) journal
|
196 |
class References:
|