tosanoob commited on
Commit
58754c2
1 Parent(s): e8b9514

Update database

Browse files
arxiv_records_sql CHANGED
Binary files a/arxiv_records_sql and b/arxiv_records_sql differ
 
arxivdb/chroma.sqlite3 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c48f817474996b45a3f4da1e127a2fde083db4bfeddb71893d598b8200fb056
3
  size 123736064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ccbdc4e184a9a32b70f35d7f9b7e67a5371b0f7f5514258652eb9e8d71a055
3
  size 123736064
mainflow_single_question.ipynb CHANGED
@@ -2,9 +2,18 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 4,
6
  "metadata": {},
7
- "outputs": [],
 
 
 
 
 
 
 
 
 
8
  "source": [
9
  "import google.generativeai as genai\n",
10
  "import utils\n",
@@ -15,7 +24,7 @@
15
  },
16
  {
17
  "cell_type": "code",
18
- "execution_count": null,
19
  "metadata": {},
20
  "outputs": [],
21
  "source": [
@@ -30,16 +39,18 @@
30
  " \"Gemini API Key not provided. Please provide GEMINI_API_KEY as an environment variable\"\n",
31
  " )\n",
32
  " genai.configure(api_key=gemini_api_key)\n",
33
- " model = genai.GenerativeModel(\"gemini-pro\")\n",
 
 
 
34
  " return model, sqldb, db\n",
35
  "\n",
36
- "\n",
37
  "model, sqldb, db = configure()"
38
  ]
39
  },
40
  {
41
  "cell_type": "code",
42
- "execution_count": 2,
43
  "metadata": {},
44
  "outputs": [],
45
  "source": [
@@ -47,7 +58,7 @@
47
  " \"\"\"A prompt that return a JSON block as arguments for querying database\"\"\"\n",
48
  "\n",
49
  " prompt = (\n",
50
- " \"\"\"[INST] You are an assistant that choose only one action below based on guest question.\n",
51
  " 1. If the guest question is asking for some specific document or article, you need to respond the information in JSON format with 2 keys \"title\", \"author\" if found any above. The authors are separated with the word 'and'. \n",
52
  " 2. If the guest question is asking for relevant informations about a topic, you need to respond the information in JSON format with 2 keys \"keywords\", \"description\", include a list of keywords represent the main academic topic, \\\n",
53
  " and a description about the main topic. You may paraphrase the keywords to add more. \\\n",
@@ -66,7 +77,7 @@
66
  " prompt = (\n",
67
  " \"\"\"[INST] You are an library assistant that help to search articles and documents based on user's question.\n",
68
  " From guest's question, you have found some records and documents that may help. Now you need to answer the guest with the information found.\n",
69
- " You should answer in a conversational form politely.\n",
70
  " QUESTION: '{input}'\n",
71
  " INFORMATION: '{contexts}'\n",
72
  " [/INST]\n",
@@ -79,7 +90,7 @@
79
  },
80
  {
81
  "cell_type": "code",
82
- "execution_count": 3,
83
  "metadata": {},
84
  "outputs": [],
85
  "source": [
@@ -96,6 +107,15 @@
96
  " results = db.query_relevant(keywords=keywords, query_texts=query_texts)\n",
97
  " # print(results)\n",
98
  " ids = results['metadatas'][0]\n",
 
 
 
 
 
 
 
 
 
99
  " paper_id = [id['paper_id'] for id in ids]\n",
100
  " paper_info = sqldb.query_id(paper_id)\n",
101
  " # print(paper_info)\n",
@@ -109,16 +129,20 @@
109
  " # invoke llm and return result\n",
110
  "\n",
111
  " if \"title\" in keys:\n",
112
- " results = sqldb.query(title = args['title'],author = args['author'])\n",
113
- " print(results)\n",
114
- " paper_info = sqldb.query(title = args['title'],author = args['author'])\n",
115
  " # if query not found then go crawl brh\n",
 
116
  "\n",
117
  " if len(paper_info) == 0:\n",
118
- " new_records = utils.crawl_exact_paper(title=args['title'],author=args['author'])\n",
 
 
 
119
  " db.add(new_records)\n",
120
  " sqldb.add(new_records)\n",
121
- " paper_info = sqldb.query(title = args['title'],author = args['author'])\n",
122
  " # -------------------------------------\n",
123
  " records = [] # get title (2), author (3), link (6)\n",
124
  " result_string = \"\"\n",
@@ -127,14 +151,14 @@
127
  " records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n",
128
  " # process results:\n",
129
  " if len(result_string) == 0:\n",
130
- " return \"Information not found\", None\n",
131
  " return result_string, records\n",
132
  " # invoke llm and return result"
133
  ]
134
  },
135
  {
136
  "cell_type": "code",
137
- "execution_count": 8,
138
  "metadata": {},
139
  "outputs": [
140
  {
@@ -143,27 +167,27 @@
143
  "text": [
144
  "--------------------------\n",
145
  "{\n",
146
- " \"keywords\": [\"Video action recognition\", \"Long Short-Term Memory\", \"Computer vision\", \"Deep learning\", \"Time series analysis\"],\n",
147
- " \"description\": \"Video action recognition is a challenging task in computer vision. It requires the ability to understand the temporal dynamics of a video and to identify the actions being performed. Long Short-Term Memory (LSTM) networks are a type of recurrent neural network that is well-suited for learning long-term dependencies. This makes them a good choice for video action recognition tasks. LSTM networks have been shown to achieve state-of-the-art results on a variety of video action recognition datasets using deep learning techniques.\"\n",
148
  "}\n"
149
  ]
150
  },
151
  {
152
  "data": {
153
  "text/plain": [
154
- "('Title: Applications of Deep Neural Networks with Keras, Author: Jeff Heato, Link: http://arxiv.org/pdf/2009.05673v5Title: DeepDrum: An Adaptive Conditional Neural Network, Author: Dimos Makris, Maximos Kaliakatsos-Papakostas, Katia Lida Kermanidi, Link: http://arxiv.org/pdf/1809.06127v2Title: A Video Recognition Method by using Adaptive Structural Learning of Long Short Term Memory based Deep Belief Network, Author: Shin Kamada, Takumi Ichimur, Link: http://arxiv.org/pdf/1909.13480v1',\n",
155
- " [['Applications of Deep Neural Networks with Keras',\n",
156
- " 'Jeff Heato',\n",
157
- " 'http://arxiv.org/pdf/2009.05673v5'],\n",
158
- " ['DeepDrum: An Adaptive Conditional Neural Network',\n",
159
- " 'Dimos Makris, Maximos Kaliakatsos-Papakostas, Katia Lida Kermanidi',\n",
160
- " 'http://arxiv.org/pdf/1809.06127v2'],\n",
161
- " ['A Video Recognition Method by using Adaptive Structural Learning of Long Short Term Memory based Deep Belief Network',\n",
162
- " 'Shin Kamada, Takumi Ichimur',\n",
163
- " 'http://arxiv.org/pdf/1909.13480v1']])"
164
  ]
165
  },
166
- "execution_count": 8,
167
  "metadata": {},
168
  "output_type": "execute_result"
169
  }
@@ -187,68 +211,51 @@
187
  },
188
  {
189
  "cell_type": "code",
190
- "execution_count": 10,
191
  "metadata": {},
192
  "outputs": [
193
  {
194
  "name": "stdout",
195
  "output_type": "stream",
196
  "text": [
197
- "For recognizing actions in videos using Long Short-Term Memory (LSTM) models, you may find the following papers insightful. These studies explore various aspects of action recognition using LSTM: (1) 'Action Recognition with 3D Convolutional Neural Networks and Long Short-Term Memory' by Tran et al., 2015; (2) 'ConvLSTM: A Convolutional Neural Network for Video Recognition' by Shi et al., 2015; (3) 'Using Long-Short Term Memory for Action Recognition in Videos' by Graves, 2013.\n",
198
- "['LSTM model', 'video analysis', 'action recognition']\n"
199
- ]
200
- },
201
- {
202
- "name": "stderr",
203
- "output_type": "stream",
204
- "text": [
205
- "Llama.generate: prefix-match hit\n",
206
- "\n",
207
- "llama_print_timings: load time = 139768.70 ms\n",
208
- "llama_print_timings: sample time = 140.16 ms / 412 runs ( 0.34 ms per token, 2939.41 tokens per second)\n",
209
- "llama_print_timings: prompt eval time = 0.00 ms / 1 tokens ( 0.00 ms per token, inf tokens per second)\n",
210
- "llama_print_timings: eval time = 91570.34 ms / 412 runs ( 222.26 ms per token, 4.50 tokens per second)\n",
211
- "llama_print_timings: total time = 93048.98 ms / 413 tokens\n"
212
- ]
213
- },
214
- {
215
- "name": "stdout",
216
- "output_type": "stream",
217
- "text": [
218
- "------------------------\n",
219
- "Hello there! I see that you're working on an LSTM model for recognizing actions in videos. I have found some related papers that might be of interest to you.\n",
220
- "\n",
221
- " 1. The first one is titled \"Action in Mind: A Neural Network Approach to Action Recognition and Segmentation\" by Zahra Gharae. This paper proposes a neural network approach for action recognition and segmentation, which could provide some insights into your work with LSTM models. You can find it at this link: <http://arxiv.org/pdf/2104.14870v1>\n",
222
- "\n",
223
- " 2. Another interesting paper is \"Deep Neural Networks in Video Human Action Recognition: A Review\" by Zihan Wang, Yang Yang, Zhi Liu, and Yifan Zhen. This review discusses the application of deep neural networks for video human action recognition. It could give you a broader perspective on the current state-of-the-art methods in this field. You can access it here: <http://arxiv.org/pdf/2305.15692v1>\n",
224
- "\n",
225
- " 3. Lastly, there's \"3D Convolutional Neural Networks for Ultrasound-Based Silent Speech Interfaces\" by László Tóth and Amin Honarmandi Shandi. Although the title might not seem directly related to your work on LSTM models for action recognition, it does involve the use of 3D convolutional neural networks in video processing, which could still provide valuable insights. You can read it at this link: <http://arxiv.org/pdf/2104.11532v1>\n",
226
- "\n",
227
- " I hope you find these resources helpful! Let me know if there's anything else I can assist you with. Have a great day!\n"
228
  ]
229
  }
230
  ],
231
  "source": [
232
  "# test response, second step\n",
233
- "input_prompt = \"I'm working on a LSTM model to recognize actions in a video, recommend me some related papers\"\n",
234
- "args = {\n",
235
- " \"keywords\": [\"LSTM model\", \"video analysis\", \"action recognition\"],\n",
236
- " \"description\": \"For recognizing actions in videos using Long Short-Term Memory (LSTM) models, you may find the following papers insightful. These studies explore various aspects of action recognition using LSTM: (1) 'Action Recognition with 3D Convolutional Neural Networks and Long Short-Term Memory' by Tran et al., 2015; (2) 'ConvLSTM: A Convolutional Neural Network for Video Recognition' by Shi et al., 2015; (3) 'Using Long-Short Term Memory for Action Recognition in Videos' by Graves, 2013.\"\n",
237
- " }\n",
238
  "contexts, results = response(args)\n",
239
  "if not results:\n",
240
  " # direct answer\n",
241
- " print(contexts)\n",
242
  "else:\n",
243
  " output_prompt = make_answer_prompt(input_prompt,contexts)\n",
244
- " # answer = model.invoke(output_prompt,\n",
245
- " # temperature=0.3) # ctrans\n",
246
- " answer = model(prompt=output_prompt,\n",
247
- " temperature=0.0,\n",
248
- " max_tokens=1024,\n",
249
- " ) # llama\n",
250
- " print(\"------------------------\")\n",
251
- " print(answer['choices'][0]['text'])"
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  ]
253
  },
254
  {
@@ -287,8 +294,6 @@
287
  ],
288
  "source": [
289
  "# full chain\n",
290
- "# inference time depends on hardware ability :')\n",
291
- "# with CPU i7-8750H, 16GB RAM and no GPU cuda, expect inference time up to 5-6 min\n",
292
  "# input_prompt = input()\n",
293
  "input_prompt = \"I'm working on a LSTM model to recognize actions in a video, recommend me some related papers\"\n",
294
  "first_prompt = extract_keyword_prompt(input_prompt)\n",
@@ -315,7 +320,61 @@
315
  },
316
  {
317
  "cell_type": "code",
318
- "execution_count": 17,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  "metadata": {},
320
  "outputs": [],
321
  "source": [
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "C:\\Users\\Admin\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
+ " from .autonotebook import tqdm as notebook_tqdm\n"
14
+ ]
15
+ }
16
+ ],
17
  "source": [
18
  "import google.generativeai as genai\n",
19
  "import utils\n",
 
24
  },
25
  {
26
  "cell_type": "code",
27
+ "execution_count": 6,
28
  "metadata": {},
29
  "outputs": [],
30
  "source": [
 
39
  " \"Gemini API Key not provided. Please provide GEMINI_API_KEY as an environment variable\"\n",
40
  " )\n",
41
  " genai.configure(api_key=gemini_api_key)\n",
42
+ " config = genai.GenerationConfig(max_output_tokens=1024,\n",
43
+ " temperature=0.5)\n",
44
+ " model = genai.GenerativeModel(\"gemini-pro\",\n",
45
+ " generation_config=config)\n",
46
  " return model, sqldb, db\n",
47
  "\n",
 
48
  "model, sqldb, db = configure()"
49
  ]
50
  },
51
  {
52
  "cell_type": "code",
53
+ "execution_count": 12,
54
  "metadata": {},
55
  "outputs": [],
56
  "source": [
 
58
  " \"\"\"A prompt that return a JSON block as arguments for querying database\"\"\"\n",
59
  "\n",
60
  " prompt = (\n",
61
+ " \"\"\"[INST] SYSTEM: You are an assistant that choose only one action below based on guest question.\n",
62
  " 1. If the guest question is asking for some specific document or article, you need to respond the information in JSON format with 2 keys \"title\", \"author\" if found any above. The authors are separated with the word 'and'. \n",
63
  " 2. If the guest question is asking for relevant informations about a topic, you need to respond the information in JSON format with 2 keys \"keywords\", \"description\", include a list of keywords represent the main academic topic, \\\n",
64
  " and a description about the main topic. You may paraphrase the keywords to add more. \\\n",
 
77
  " prompt = (\n",
78
  " \"\"\"[INST] You are an library assistant that help to search articles and documents based on user's question.\n",
79
  " From guest's question, you have found some records and documents that may help. Now you need to answer the guest with the information found.\n",
80
+ " If no information found in the database, you may generate some other recommendation related to user's question using your own knowledge. You should answer in a conversational form politely.\n",
81
  " QUESTION: '{input}'\n",
82
  " INFORMATION: '{contexts}'\n",
83
  " [/INST]\n",
 
90
  },
91
  {
92
  "cell_type": "code",
93
+ "execution_count": 8,
94
  "metadata": {},
95
  "outputs": [],
96
  "source": [
 
107
  " results = db.query_relevant(keywords=keywords, query_texts=query_texts)\n",
108
  " # print(results)\n",
109
  " ids = results['metadatas'][0]\n",
110
+ " if len(ids) == 0:\n",
111
+ " # go crawl some\n",
112
+ " new_records = utils.crawl_arxiv(keyword_list=keywords)\n",
113
+ " if type(new_records) == str:\n",
114
+ " return \"Error occured, information not found\", new_records\n",
115
+ " db.add(new_records)\n",
116
+ " sqldb.add(new_records)\n",
117
+ " results = db.query_relevant(keywords=keywords, query_texts=query_texts)\n",
118
+ " ids = results['metadatas'][0]\n",
119
  " paper_id = [id['paper_id'] for id in ids]\n",
120
  " paper_info = sqldb.query_id(paper_id)\n",
121
  " # print(paper_info)\n",
 
129
  " # invoke llm and return result\n",
130
  "\n",
131
  " if \"title\" in keys:\n",
132
+ " title = args['title']\n",
133
+ " authors = utils.process_authors_list(args['author'])\n",
134
+ " paper_info = sqldb.query(title = title,author = authors)\n",
135
  " # if query not found then go crawl brh\n",
136
+ " # print(paper_info)\n",
137
  "\n",
138
  " if len(paper_info) == 0:\n",
139
+ " new_records = utils.crawl_exact_paper(title=title,author=authors)\n",
140
+ " if type(new_records) == str:\n",
141
+ " # print(new_records)\n",
142
+ " return \"Error occured, information not found\", \"Information not found\"\n",
143
  " db.add(new_records)\n",
144
  " sqldb.add(new_records)\n",
145
+ " paper_info = sqldb.query(title = title,author = authors)\n",
146
  " # -------------------------------------\n",
147
  " records = [] # get title (2), author (3), link (6)\n",
148
  " result_string = \"\"\n",
 
151
  " records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n",
152
  " # process results:\n",
153
  " if len(result_string) == 0:\n",
154
+ " return \"Information not found\", \"Information not found\"\n",
155
  " return result_string, records\n",
156
  " # invoke llm and return result"
157
  ]
158
  },
159
  {
160
  "cell_type": "code",
161
+ "execution_count": 5,
162
  "metadata": {},
163
  "outputs": [
164
  {
 
167
  "text": [
168
  "--------------------------\n",
169
  "{\n",
170
+ " \"keywords\": [\"Long short-term memory\", \"Video action recognition\", \"Convolutional neural networks\"],\n",
171
+ " \"description\": \"Long short-term memory (LSTM) is a type of recurrent neural network (RNN) that is used for processing sequential data. It is particularly well-suited for tasks such as video action recognition, where the order of the frames in the video is important. Convolutional neural networks (CNNs) are another type of neural network that is often used for image and video processing. They are particularly good at learning the spatial features of images and videos. LSTM networks and CNNs can be used together to create powerful models for video action recognition.\"\n",
172
  "}\n"
173
  ]
174
  },
175
  {
176
  "data": {
177
  "text/plain": [
178
+ "('Title: LiteLSTM Architecture for Deep Recurrent Neural Networks, Author: Nelly Elsayed, Zag ElSayed, Anthony S. Maid, Link: http://arxiv.org/pdf/2201.11624v2Title: AdderNet and its Minimalist Hardware Design for Energy-Efficient Artificial Intelligence, Author: Yunhe Wang, Mingqiang Huang, Kai Han, Hanting Chen, Wei Zhang, Chunjing Xu, Dacheng Ta, Link: http://arxiv.org/pdf/2101.10015v2Title: Audio Tagging on an Embedded Hardware Platform, Author: Gabriel Bibbo, Arshdeep Singh, Mark D. Plumble, Link: http://arxiv.org/pdf/2306.09106v1',\n",
179
+ " [['LiteLSTM Architecture for Deep Recurrent Neural Networks',\n",
180
+ " 'Nelly Elsayed, Zag ElSayed, Anthony S. Maid',\n",
181
+ " 'http://arxiv.org/pdf/2201.11624v2'],\n",
182
+ " ['AdderNet and its Minimalist Hardware Design for Energy-Efficient Artificial Intelligence',\n",
183
+ " 'Yunhe Wang, Mingqiang Huang, Kai Han, Hanting Chen, Wei Zhang, Chunjing Xu, Dacheng Ta',\n",
184
+ " 'http://arxiv.org/pdf/2101.10015v2'],\n",
185
+ " ['Audio Tagging on an Embedded Hardware Platform',\n",
186
+ " 'Gabriel Bibbo, Arshdeep Singh, Mark D. Plumble',\n",
187
+ " 'http://arxiv.org/pdf/2306.09106v1']])"
188
  ]
189
  },
190
+ "execution_count": 5,
191
  "metadata": {},
192
  "output_type": "execute_result"
193
  }
 
211
  },
212
  {
213
  "cell_type": "code",
214
+ "execution_count": 17,
215
  "metadata": {},
216
  "outputs": [
217
  {
218
  "name": "stdout",
219
  "output_type": "stream",
220
  "text": [
221
+ "near \")\": syntax error\n",
222
+ "Error query: select * from arxivsql where id in )\n",
223
+ "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  ]
225
  }
226
  ],
227
  "source": [
228
  "# test response, second step\n",
229
+ "input_prompt = \"Can you suggest some key papers on model predictive control for nonlinear systems, and are there any recent reviews on the application of control theory to robotics?\"\n",
230
+ "args = \"{\\n \\\"keywords\\\": [\\n \\\"Power Electronics\\\",\\n \\\"Renewable Energy Systems\\\",\\n \\\"High-Frequency Power Converters\\\",\\n \\\"Power Electronics Applications\\\",\\n \\\"Renewable Energy Sources\\\"\\n ],\\n \\\"description\\\": \\\"Power electronics is a branch of electrical engineering that deals with the conversion, control, and conditioning of electrical power. It is used in a wide variety of applications, including power generation, transmission, distribution, and utilization. Renewable energy systems are systems that generate electricity from renewable sources, such as solar, wind, and hydro power. Power electronics is used in renewable energy systems to convert the electrical output of the renewable source into a form that can be used by the grid or by consumers.\\\"\\n}\"\n",
231
+ "args = json.loads(args)\n",
 
 
232
  "contexts, results = response(args)\n",
233
  "if not results:\n",
234
  " # direct answer\n",
235
+ " print(contexts)\n",
236
  "else:\n",
237
  " output_prompt = make_answer_prompt(input_prompt,contexts)\n",
238
+ " answer = model.generate_content(output_prompt).text\n",
239
+ " print(answer)"
240
+ ]
241
+ },
242
+ {
243
+ "cell_type": "code",
244
+ "execution_count": 14,
245
+ "metadata": {},
246
+ "outputs": [
247
+ {
248
+ "name": "stdout",
249
+ "output_type": "stream",
250
+ "text": [
251
+ "{'desired': 'Natural Language Processing (Computer Science)', 'question': 'What are some recent papers on deep learning architectures for text classification, and can you recommend any surveys or reviews on the topic?'}\n"
252
+ ]
253
+ }
254
+ ],
255
+ "source": [
256
+ "with open(\"test_questions.txt\",\"r\") as infile:\n",
257
+ " data = json.load(infile)\n",
258
+ "print(data[0])"
259
  ]
260
  },
261
  {
 
294
  ],
295
  "source": [
296
  "# full chain\n",
 
 
297
  "# input_prompt = input()\n",
298
  "input_prompt = \"I'm working on a LSTM model to recognize actions in a video, recommend me some related papers\"\n",
299
  "first_prompt = extract_keyword_prompt(input_prompt)\n",
 
320
  },
321
  {
322
  "cell_type": "code",
323
+ "execution_count": 15,
324
+ "metadata": {},
325
+ "outputs": [],
326
+ "source": [
327
+ "def full_chain_single_question(input_prompt):\n",
328
+ " try:\n",
329
+ " first_prompt = extract_keyword_prompt(input_prompt)\n",
330
+ " temp_answer = model.generate_content(first_prompt).text\n",
331
+ "\n",
332
+ " args = json.loads(utils.trimming(temp_answer))\n",
333
+ " contexts, results = response(args)\n",
334
+ " if not results:\n",
335
+ " print(contexts)\n",
336
+ " else:\n",
337
+ " output_prompt = make_answer_prompt(input_prompt,contexts)\n",
338
+ " answer = model.generate_content(output_prompt).text\n",
339
+ " return temp_answer, answer\n",
340
+ " except Exception as e:\n",
341
+ " return temp_answer, \"Error occured: \" + str(e)"
342
+ ]
343
+ },
344
+ {
345
+ "cell_type": "code",
346
+ "execution_count": 16,
347
+ "metadata": {},
348
+ "outputs": [
349
+ {
350
+ "name": "stdout",
351
+ "output_type": "stream",
352
+ "text": [
353
+ "[]\n",
354
+ "[]\n",
355
+ "Error: timed out\n",
356
+ "[]\n",
357
+ "near \")\": syntax error\n",
358
+ "Error query: select * from arxivsql where id in )\n",
359
+ "\n"
360
+ ]
361
+ }
362
+ ],
363
+ "source": [
364
+ "test_log = []\n",
365
+ "for t in data:\n",
366
+ " temp_answer, answer = full_chain_single_question(t['question'])\n",
367
+ " test_log.append({'desired topic':t['desired'],\n",
368
+ " 'question':t['question'],\n",
369
+ " 'first answer':temp_answer,\n",
370
+ " 'final answer':answer})\n",
371
+ "with open(\"test_results.json\",\"w\") as outfile:\n",
372
+ " json.dump(test_log,outfile)"
373
+ ]
374
+ },
375
+ {
376
+ "cell_type": "code",
377
+ "execution_count": 1,
378
  "metadata": {},
379
  "outputs": [],
380
  "source": [
utils.py CHANGED
@@ -27,7 +27,7 @@ class ArxivSQL:
27
  else:
28
  query_title = "True"
29
  if len(author)>0:
30
- query_author = 'author like '
31
  for auth in author:
32
  query_author += "'%{}%' or ".format(auth)
33
  query_author = query_author[:-4]
@@ -38,12 +38,16 @@ class ArxivSQL:
38
  return result.fetchall()
39
 
40
  def query_id(self, ids=[]):
41
- query = "select * from {} where id in (".format(self.table)
42
- for id in ids:
43
- query+="'"+id+"',"
44
- query = query[:-1] + ")"
45
- result = self.cur.execute(query)
46
- return result.fetchall()
 
 
 
 
47
 
48
  def add(self, crawl_records):
49
  """
@@ -71,6 +75,7 @@ class ArxivSQL:
71
  result+="\n" + query + "\n"
72
  finally:
73
  return results
 
74
  class ArxivChroma:
75
  """
76
  Create an interface to arxivdb, which only support query and addition.
@@ -161,7 +166,9 @@ def chunk_text_with_overlap(text, max_char=400, overlap=100):
161
  def trimming(txt):
162
  start = txt.find("{")
163
  end = txt.rfind("}")
164
- return txt[start:end+1]
 
 
165
 
166
  def extract_tag(txt,tagname):
167
  return txt[txt.find("<"+tagname+">")+len(tagname)+2:txt.find("</"+tagname+">")]
@@ -194,7 +201,7 @@ def choose_topic(summary):
194
  cache_dir='models')
195
  embed = model_embedding.encode(summary)
196
  cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b))
197
- descriptions = json.load(open("topic_descriptions"))
198
  topic = ""
199
  max_sim = 0.
200
  for key in descriptions:
@@ -203,21 +210,59 @@ def choose_topic(summary):
203
  topic = key
204
  max_sim = sim
205
  return topic
206
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  def crawl_arxiv(keyword_list, max_results=100):
208
  baseurl = 'http://export.arxiv.org/api/query?search_query='
209
  records = []
210
- for keyword in keyword_list:
211
  if i ==0:
212
  url = baseurl + 'all:' + keyword
213
- i = i + 1
214
  else:
215
  url = url + '+OR+' + 'all:' + keyword
216
  url = url+ '&max_results=' + str(max_results)
217
  url = url.replace(' ', '%20')
218
  try:
219
  arxiv_page = urllib.request.urlopen(url,timeout=100).read()
220
- arxiv_page = str(arxiv_page,encoding="utf-8")
221
  while xml.find("<entry>") != -1:
222
  extract = xml[xml.find("<entry>")+7:xml.find("</entry>")]
223
  xml = xml[xml.find("</entry>")+8:]
@@ -238,8 +283,8 @@ def process_authors_str(authors):
238
  def process_authors_list(string):
239
  """input a string of authors, return a list of authors"""
240
  authors = []
241
- list_auth = string.split("and").strip()
242
  for author in list_auth:
243
  if author != "et al.":
244
- authors.append(author)
245
  return authors
 
27
  else:
28
  query_title = "True"
29
  if len(author)>0:
30
+ query_author = 'authors like '
31
  for auth in author:
32
  query_author += "'%{}%' or ".format(auth)
33
  query_author = query_author[:-4]
 
38
  return result.fetchall()
39
 
40
  def query_id(self, ids=[]):
41
+ try:
42
+ query = "select * from {} where id in (".format(self.table)
43
+ for id in ids:
44
+ query+="'"+id+"',"
45
+ query = query[:-1] + ")"
46
+ result = self.cur.execute(query)
47
+ return result.fetchall()
48
+ except Exception as e:
49
+ print(e)
50
+ print("Error query: ",query)
51
 
52
  def add(self, crawl_records):
53
  """
 
75
  result+="\n" + query + "\n"
76
  finally:
77
  return results
78
+
79
  class ArxivChroma:
80
  """
81
  Create an interface to arxivdb, which only support query and addition.
 
166
  def trimming(txt):
167
  start = txt.find("{")
168
  end = txt.rfind("}")
169
+ return txt[start:end+1].replace("\n"," ")
170
+
171
+ # crawl data
172
 
173
  def extract_tag(txt,tagname):
174
  return txt[txt.find("<"+tagname+">")+len(tagname)+2:txt.find("</"+tagname+">")]
 
201
  cache_dir='models')
202
  embed = model_embedding.encode(summary)
203
  cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b))
204
+ descriptions = json.load(open("topic_descriptions.txt"))
205
  topic = ""
206
  max_sim = 0.
207
  for key in descriptions:
 
210
  topic = key
211
  max_sim = sim
212
  return topic
213
+
214
+ class TopicClassifier:
215
+ def __init__(self):
216
+ self.model_embedding = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en',
217
+ trust_remote_code=True,
218
+ cache_dir='models')
219
+ topic_descriptions = json.load(open("topic_descriptions.txt"))
220
+ self.topics = list(dict.keys(topic_descriptions))
221
+ self.embeddings = [self.model_embedding.encode(topic_descriptions[key]) for key in topic_descriptions]
222
+ self.cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b))
223
+
224
+ def classifier(self,description):
225
+ embed = self.model_embedding.encode(description)
226
+ max_sim = 0.
227
+ topic = ""
228
+ for i, key in enumerate(self.topics):
229
+ sim = self.cos_sim(embed,self.embeddings[i])
230
+ if sim > max_sim:
231
+ topic = key
232
+ max_sim = sim
233
+ return topic
234
+
235
+ def crawl_exact_paper(title,author,max_results=3):
236
+ authors = process_authors_str(author)
237
+ records = []
238
+ url = 'http://export.arxiv.org/api/query?search_query=ti:{title}+AND+au:{author}&max_results={max_results}'.format(title=title,author=authors,max_results=max_results)
239
+ url = url.replace(" ","%20")
240
+ try:
241
+ arxiv_page = urllib.request.urlopen(url,timeout=100).read()
242
+ xml = str(arxiv_page,encoding="utf-8")
243
+ while xml.find("<entry>") != -1:
244
+ extract = xml[xml.find("<entry>")+7:xml.find("</entry>")]
245
+ xml = xml[xml.find("</entry>")+8:]
246
+ extract = get_record(extract)
247
+ topic = choose_topic(extract[6])
248
+ records.append([topic,*extract])
249
+ return records
250
+ except Exception as e:
251
+ return "Error: "+str(e)
252
+
253
  def crawl_arxiv(keyword_list, max_results=100):
254
  baseurl = 'http://export.arxiv.org/api/query?search_query='
255
  records = []
256
+ for i,keyword in enumerate(keyword_list):
257
  if i ==0:
258
  url = baseurl + 'all:' + keyword
 
259
  else:
260
  url = url + '+OR+' + 'all:' + keyword
261
  url = url+ '&max_results=' + str(max_results)
262
  url = url.replace(' ', '%20')
263
  try:
264
  arxiv_page = urllib.request.urlopen(url,timeout=100).read()
265
+ xml = str(arxiv_page,encoding="utf-8")
266
  while xml.find("<entry>") != -1:
267
  extract = xml[xml.find("<entry>")+7:xml.find("</entry>")]
268
  xml = xml[xml.find("</entry>")+8:]
 
283
  def process_authors_list(string):
284
  """input a string of authors, return a list of authors"""
285
  authors = []
286
+ list_auth = string.split("and")
287
  for author in list_auth:
288
  if author != "et al.":
289
+ authors.append(author.strip())
290
  return authors