shchoi1019 commited on
Commit
a9bccda
·
verified ·
1 Parent(s): 389ac74

Create arxiv.py

Browse files
Files changed (1) hide show
  1. arxiv.py +127 -0
arxiv.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """arXiv Dataset."""
17
+
18
+
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @misc{clement2019arxiv,
27
+ title={On the Use of ArXiv as a Dataset},
28
+ author={Colin B. Clement and Matthew Bierbaum and Kevin P. O'Keeffe and Alexander A. Alemi},
29
+ year={2019},
30
+ eprint={1905.00075},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.IR}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ A dataset of 1.7 million arXiv articles for applications like trend analysis, paper recommender engines, category prediction, co-citation networks, knowledge graph construction and semantic search interfaces.
38
+ """
39
+
40
+ _HOMEPAGE = "https://www.kaggle.com/Cornell-University/arxiv"
41
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
42
+
43
+ _ID = "id"
44
+ _SUBMITTER = "submitter"
45
+ _AUTHORS = "authors"
46
+ _TITLE = "title"
47
+ _COMMENTS = "comments"
48
+ _JOURNAL_REF = "journal-ref"
49
+ _DOI = "doi"
50
+ _REPORT_NO = "report-no"
51
+ _CATEGORIES = "categories"
52
+ _LICENSE = "license"
53
+ _ABSTRACT = "abstract"
54
+ _UPDATE_DATE = "update_date"
55
+
56
+ _FILENAME = "arxiv-metadata-oai-snapshot.json"
57
+
58
+
59
+ class ArxivDataset(datasets.GeneratorBasedBuilder):
60
+ """arXiv Dataset: arXiv dataset and metadata of 1.7M+ scholarly papers across STEM"""
61
+
62
+ VERSION = datasets.Version("1.1.0")
63
+
64
+ @property
65
+ def manual_download_instructions(self):
66
+ return """\
67
+ You need to go to https://www.kaggle.com/Cornell-University/arxiv,
68
+ and manually download the dataset. Once it is completed,
69
+ a zip folder named archive.zip will be appeared in your Downloads folder
70
+ or whichever folder your browser chooses to save files to. Extract that folder
71
+ and you would get a arxiv-metadata-oai-snapshot.json file
72
+ You can then move that file under <path/to/folder>.
73
+ The <path/to/folder> can e.g. be "~/manual_data".
74
+ arxiv_dataset can then be loaded using the following command `datasets.load_dataset("arxiv_dataset", data_dir="<path/to/folder>")`.
75
+ """
76
+
77
+ def _info(self):
78
+ feature_names = [
79
+ _ID,
80
+ _SUBMITTER,
81
+ _AUTHORS,
82
+ _TITLE,
83
+ _COMMENTS,
84
+ _JOURNAL_REF,
85
+ _DOI,
86
+ _REPORT_NO,
87
+ _CATEGORIES,
88
+ _LICENSE,
89
+ _ABSTRACT,
90
+ _UPDATE_DATE,
91
+ ]
92
+ return datasets.DatasetInfo(
93
+ description=_DESCRIPTION,
94
+ features=datasets.Features({k: datasets.Value("string") for k in feature_names}),
95
+ supervised_keys=None,
96
+ homepage=_HOMEPAGE,
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ """Returns SplitGenerators."""
102
+ path_to_manual_file = os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), _FILENAME)
103
+ if not os.path.exists(path_to_manual_file):
104
+ raise FileNotFoundError(
105
+ "{path_to_manual_file} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('arxiv_dataset', data_dir=...)` that includes a file name {_FILENAME}. Manual download instructions: {self.manual_download_instructions})"
106
+ )
107
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"path": path_to_manual_file})]
108
+
109
+ def _generate_examples(self, path=None, title_set=None):
110
+ """Yields examples."""
111
+ with open(path, encoding="utf8") as f:
112
+ for i, entry in enumerate(f):
113
+ data = dict(json.loads(entry))
114
+ yield i, {
115
+ _ID: data["id"],
116
+ _SUBMITTER: data["submitter"],
117
+ _AUTHORS: data["authors"],
118
+ _TITLE: data["title"],
119
+ _COMMENTS: data["comments"],
120
+ _JOURNAL_REF: data["journal-ref"],
121
+ _DOI: data["doi"],
122
+ _REPORT_NO: data["report-no"],
123
+ _CATEGORIES: data["categories"],
124
+ _LICENSE: data["license"],
125
+ _ABSTRACT: data["abstract"],
126
+ _UPDATE_DATE: data["update_date"],
127
+ }