SouthpawIN commited on
Commit
e6dfd23
1 Parent(s): 2669095

Upload arxivddl.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. arxivddl.py +219 -0
arxivddl.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+ import sys
4
+ import os
5
+ from datetime import datetime
6
+ from dateutil.relativedelta import relativedelta
7
+ import logging
8
+ from multiprocessing import Pool, cpu_count
9
+ from tqdm import tqdm
10
+ import re
11
+ import feedparser
12
+ import time
13
+
14
+ # Configuration Variables
15
+ MONTHS_BACK = 99 # Default number of months back to download papers
16
+ OUTPUT_DIR = 'arxiv_dumps'
17
+ JSONL_FILE = 'downloaded_papers.jsonl'
18
+ CACHE_DIR = 'cache'
19
+ SEARCH_CACHE_FILE = os.path.join(CACHE_DIR, 'search_cache.jsonl')
20
+ MAX_RESULTS_PER_PAGE = 100
21
+ MAX_BACKOFF = 120 # Maximum backoff time in seconds
22
+ CONCURRENT_DOWNLOADS = min(32, cpu_count()) # Number of concurrent downloads
23
+
24
+ def setup_logging(enable_logging: bool):
25
+ if enable_logging:
26
+ logging.basicConfig(filename='arxivdump.log', filemode='a',
27
+ format='%(asctime)s - %(levelname)s - %(message)s',
28
+ level=logging.DEBUG)
29
+ else:
30
+ logging.basicConfig(level=logging.CRITICAL)
31
+ logging.disable(logging.CRITICAL)
32
+
33
+ def sanitize_filename(name):
34
+ return re.sub(r'[^\w\-]', '', name.replace(' ', '_'))
35
+
36
+ def load_cache(file_path):
37
+ if not os.path.exists(file_path):
38
+ return set()
39
+ with open(file_path, 'r', encoding='utf-8') as f:
40
+ return set(json.loads(line).get('id') for line in f)
41
+
42
+ def update_cache(file_path, metadata):
43
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
44
+ with open(file_path, 'a', encoding='utf-8') as f:
45
+ f.write(json.dumps(metadata, ensure_ascii=False) + '\n')
46
+
47
+ def fetch_paper_source(entry_id, headers):
48
+ src_url = entry_id.replace("/abs/", "/src/").replace("https://arxiv.org/", "https://export.arxiv.org/")
49
+ backoff = 1
50
+
51
+ while True:
52
+ try:
53
+ response = requests.get(src_url, headers=headers, allow_redirects=True, timeout=60)
54
+ content_type = response.headers.get('Content-Type', '').lower()
55
+ content_disposition = response.headers.get('Content-Disposition', '')
56
+
57
+ if 'filename=' in content_disposition:
58
+ filename = content_disposition.split('filename=')[-1].strip('"')
59
+ else:
60
+ filename = f"{entry_id.split('/')[-1]}.unlabeled_file_type"
61
+
62
+ return (response.content, content_type, dict(response.headers), filename)
63
+
64
+ except requests.exceptions.RequestException as e:
65
+ logging.warning(f"Request failed for {entry_id}: {e}. Retrying in {backoff} seconds.")
66
+ time.sleep(backoff)
67
+ backoff = min(backoff * 2, MAX_BACKOFF)
68
+
69
+ def save_archive_and_metadata(content, content_type, response_headers, filename, paper_title, index, jsonl_file, paper):
70
+ sanitized_title = sanitize_filename(paper_title)
71
+ paper_dir = os.path.join(OUTPUT_DIR, f"{index}_{sanitized_title}")
72
+ os.makedirs(paper_dir, exist_ok=True)
73
+
74
+ if content and filename:
75
+ archive_path = os.path.join(paper_dir, filename)
76
+ with open(archive_path, 'wb') as f:
77
+ f.write(content)
78
+ logging.info(f"Saved archive to: {archive_path}")
79
+ else:
80
+ logging.warning(f"No archive content to save for paper {index}_{sanitized_title}.")
81
+
82
+ metadata = {
83
+ "id": paper['id'],
84
+ "title": paper['title'],
85
+ "authors": [author['name'] for author in paper['authors']],
86
+ "summary": paper['summary'],
87
+ "categories": [tag['term'] for tag in paper['tags']],
88
+ "published": paper['published'],
89
+ "updated": paper['updated'],
90
+ "links": paper['links'],
91
+ "source_response_headers": response_headers,
92
+ "downloaded_at": datetime.utcnow().isoformat() + 'Z'
93
+ }
94
+
95
+ metadata_path = os.path.join(paper_dir, 'metadata.json')
96
+ with open(metadata_path, 'w', encoding='utf-8') as f:
97
+ json.dump(metadata, f, ensure_ascii=False, indent=4)
98
+
99
+ update_cache(jsonl_file, metadata)
100
+
101
+ def cache_search_results(results):
102
+ os.makedirs(CACHE_DIR, exist_ok=True)
103
+ with open(SEARCH_CACHE_FILE, 'a', encoding='utf-8') as f:
104
+ for paper in results:
105
+ f.write(json.dumps({
106
+ 'id': paper['id'],
107
+ 'title': paper['title'],
108
+ 'published': paper['published'],
109
+ 'updated': paper['updated'],
110
+ 'summary': paper['summary'],
111
+ 'authors': [author['name'] for author in paper['authors']],
112
+ 'categories': [tag['term'] for tag in paper['tags']],
113
+ 'links': paper['links']
114
+ }, ensure_ascii=False) + '\n')
115
+
116
+ def parse_atom_feed(feed_content):
117
+ parsed_feed = feedparser.parse(feed_content)
118
+ results = []
119
+
120
+ for entry in parsed_feed.entries:
121
+ links_dict = {link.get('title', link.get('rel', 'unknown')): link.get('href') for link in entry.links}
122
+ results.append({
123
+ 'id': entry.id,
124
+ 'title': entry.title,
125
+ 'summary': entry.summary,
126
+ 'published': entry.published,
127
+ 'updated': entry.updated,
128
+ 'tags': [{'term': tag.term} for tag in entry.get('tags', [])],
129
+ 'authors': [{'name': author.name} for author in entry.authors],
130
+ 'links': links_dict
131
+ })
132
+
133
+ return results
134
+
135
+ def process_paper(args):
136
+ index, paper, headers, jsonl_file, cached_ids = args
137
+ entry_id = paper['id']
138
+ paper_id = entry_id.split('/')[-1]
139
+
140
+ if paper_id in cached_ids:
141
+ return None
142
+
143
+ try:
144
+ content, content_type, response_headers, filename = fetch_paper_source(entry_id, headers)
145
+ if content and content_type:
146
+ save_archive_and_metadata(content, content_type, response_headers, filename, paper['title'], index, jsonl_file, paper)
147
+ return paper['published']
148
+ except Exception as e:
149
+ logging.error(f"Exception occurred while processing paper {paper_id}: {e}")
150
+
151
+ return None
152
+
153
+ def main():
154
+ setup_logging(True) # Always enable logging for now
155
+
156
+ print(f"Starting arXiv paper download for the past {MONTHS_BACK} months")
157
+ logging.info(f"Starting arXiv paper download for the past {MONTHS_BACK} months")
158
+
159
+ query = "cat:cs.*"
160
+ sort_by = "submittedDate"
161
+ sort_order = "descending"
162
+ print(f"Search query: {query} | sortBy={sort_by} | sortOrder={sort_order}")
163
+ logging.info(f"Search query: {query} | sortBy={sort_by} | sortOrder={sort_order}")
164
+
165
+ start = 0
166
+ papers = []
167
+ cutoff_date = datetime.now() - relativedelta(months=MONTHS_BACK)
168
+ reached_cutoff = False
169
+
170
+ while not reached_cutoff:
171
+ search_url = f"http://export.arxiv.org/api/query?search_query={query}&sortBy={sort_by}&sortOrder={sort_order}&start={start}&max_results={MAX_RESULTS_PER_PAGE}"
172
+ print(f"Fetching page starting from {start}: {search_url}")
173
+ logging.info(f"Fetching page starting from {start}: {search_url}")
174
+
175
+ response = requests.get(search_url)
176
+ parsed_results = parse_atom_feed(response.content)
177
+ if not parsed_results:
178
+ break
179
+
180
+ for paper in parsed_results:
181
+ published_date = datetime.strptime(paper['published'], "%Y-%m-%dT%H:%M:%SZ")
182
+ if published_date < cutoff_date:
183
+ reached_cutoff = True
184
+ break
185
+ papers.append(paper)
186
+
187
+ cache_search_results(parsed_results)
188
+ start += len(parsed_results)
189
+
190
+ if len(parsed_results) < MAX_RESULTS_PER_PAGE:
191
+ break
192
+
193
+ time.sleep(3) # Respectful delay between API calls
194
+
195
+ headers = {
196
+ "User-Agent": "Mozilla/5.0 (compatible; ArXivDownloader/1.0; +https://github.com/yourusername/arxiv-downloader)",
197
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
198
+ "Accept-Language": "en-US,en;q=0.5",
199
+ "Referer": "https://arxiv.org/",
200
+ "Connection": "keep-alive",
201
+ "Upgrade-Insecure-Requests": "1"
202
+ }
203
+
204
+ cached_ids = load_cache(JSONL_FILE)
205
+
206
+ print(f"Starting to process {len(papers)} papers with multiprocessing...")
207
+ logging.info(f"Starting to process {len(papers)} papers with multiprocessing.")
208
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
209
+
210
+ args_list = [(index, paper, headers, JSONL_FILE, cached_ids) for index, paper in enumerate(papers, start=1)]
211
+
212
+ with Pool(processes=CONCURRENT_DOWNLOADS) as pool:
213
+ list(tqdm(pool.imap_unordered(process_paper, args_list), total=len(papers), desc="Downloading papers"))
214
+
215
+ print("Processing complete.")
216
+ logging.info("Processing complete.")
217
+
218
+ if __name__ == "__main__":
219
+ main()