feradauto commited on
Commit
dd3dac1
1 Parent(s): 4a5135e

dataset script

Browse files
NLP4SGPapers.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """NLP4SGPAPERS dataset: a scientific dataset with three associated tasks that can help identify NLP4SG papers"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """
26
+ """
27
+
28
+ # TODO: Add description of the dataset here
29
+ # You can copy an official description
30
+ _DESCRIPTION = """\
31
+ NLP4SGPAPERS dataset: a scientific dataset with three associated tasks that can help identify NLP4SG papers
32
+ """
33
+
34
+
35
+ _HOMEPAGE = ""
36
+
37
+
38
+ _LICENSE = ""
39
+
40
+ # TODO: Add link to the official dataset URLs here
41
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
42
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
43
+ _URLS = {
44
+ "train": "https://huggingface.co/datasets/feradauto/nlp4sgpapers/blob/main/train_set_final.csv",
45
+ "test": "https://huggingface.co/datasets/feradauto/nlp4sgpapers/blob/main/test_set_final.csv",
46
+ "dev": "https://huggingface.co/datasets/feradauto/nlp4sgpapers/blob/main/dev_set_final.csv",
47
+ }
48
+
49
+
50
+ class NLP4SGPapers(datasets.GeneratorBasedBuilder):
51
+ """A scientific dataset with three associated tasks that can help identify NLP4SG papers"""
52
+
53
+ VERSION = datasets.Version("1.1.0")
54
+
55
+ # This is an example of a dataset with multiple configurations.
56
+ # If you don't want/need to define several sub-sets in your dataset,
57
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
58
+
59
+ # If you need to make complex sub-parts in the datasets with configurable options
60
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
61
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
62
+
63
+ # You will be able to load one or the other configurations in the following list with
64
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
65
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
66
+ BUILDER_CONFIGS = [
67
+ datasets.BuilderConfig(name="data", version=VERSION, description="data")
68
+
69
+ DEFAULT_CONFIG_NAME = "data" # It's not mandatory to have a default configuration. Just use one if it make sense.
70
+
71
+ def _info(self):
72
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
73
+ features = datasets.Features(
74
+ {
75
+ "ID": datasets.Value("string"),
76
+ "url": datasets.Value("string"),
77
+ "title": datasets.Value("string"),
78
+ "abstract": datasets.Value("string")
79
+ "label_nlp4sg": datasets.Value("bool")
80
+ "task": datasets.Sequence(feature=datasets.Value("string"))
81
+ "method": datasets.Sequence(feature=datasets.Value("string"))
82
+ "goal1": datasets.Value("string")
83
+ "goal2": datasets.Value("string")
84
+ "goal3": datasets.Value("string")
85
+ "acknowledgments": datasets.Value("string")
86
+ "year": datasets.Value("int32")
87
+ "test": ClassLabel(num_classes=3, names=['0','1','2'])
88
+ "sdg1": datasets.Value("bool")
89
+ "sdg2": datasets.Value("bool")
90
+ "sdg3": datasets.Value("bool")
91
+ "sdg4": datasets.Value("bool")
92
+ "sdg5": datasets.Value("bool")
93
+ "sdg6": datasets.Value("bool")
94
+ "sdg7": datasets.Value("bool")
95
+ "sdg8": datasets.Value("bool")
96
+ "sdg9": datasets.Value("bool")
97
+ "sdg10": datasets.Value("bool")
98
+ "sdg11": datasets.Value("bool")
99
+ "sdg12": datasets.Value("bool")
100
+ "sdg13": datasets.Value("bool")
101
+ "sdg14": datasets.Value("bool")
102
+ "sdg15": datasets.Value("bool")
103
+ "sdg16": datasets.Value("bool")
104
+ "sdg17": datasets.Value("bool")
105
+ # These are the features of your dataset like images, labels ...
106
+ }
107
+ )
108
+ return datasets.DatasetInfo(
109
+ # This is the description that will appear on the datasets page.
110
+ description=_DESCRIPTION,
111
+ # This defines the different columns of the dataset and their types
112
+ features=features,
113
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
114
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
115
+ # supervised_keys=("sentence", "label"),
116
+ # Homepage of the dataset for documentation
117
+ homepage=_HOMEPAGE,
118
+ license=_LICENSE,
119
+ citation=_CITATION,
120
+ )
121
+
122
+ def _split_generators(self, dl_manager):
123
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
124
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
125
+
126
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
127
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
128
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
129
+ downloaded_files = dl_manager.download_and_extract(_URLS)
130
+
131
+ return [
132
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
133
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
134
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
135
+ ]
136
+
137
+
138
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
139
+ def _generate_examples(self, filepath, split):
140
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
141
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
142
+ with open(filepath, encoding="utf-8") as f:
143
+ for key, row in enumerate(f):
144
+ data = json.loads(row)
145
+ yield key, {
146
+ "ID": data["ID"],
147
+ "url": data["url"],
148
+ "title": data["title"],
149
+ "abstract": data["abstract"]
150
+ }
dev_set_final.csv ADDED
The diff for this file is too large to render. See raw diff
 
nlp4sg_papers.csv → test_set_final.csv RENAMED
The diff for this file is too large to render. See raw diff
 
train_set_final.csv ADDED
The diff for this file is too large to render. See raw diff