Datasets:

Modalities:
Text
Libraries:
Datasets
dibyaaaaax commited on
Commit
d955afd
1 Parent(s): 410276e

Upload kptimes.py

Browse files
Files changed (1) hide show
  1. kptimes.py +155 -0
kptimes.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['train', 'test', 'valid']
5
+ _CITATION = """\
6
+ @inproceedings{gallina2019kptimes,
7
+ title={KPTimes: A Large-Scale Dataset for Keyphrase Generation on News Documents},
8
+ author={Gallina, Ygor and Boudin, Florian and Daille, B{\'e}atrice},
9
+ booktitle={Proceedings of the 12th International Conference on Natural Language Generation},
10
+ pages={130--135},
11
+ year={2019}
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+
17
+ """
18
+
19
+ _HOMEPAGE = "https://github.com/ygorg/KPTimes"
20
+
21
+ # TODO: Add the licence for the dataset here if you can find it
22
+ _LICENSE = "Apache License 2.0"
23
+
24
+ # TODO: Add link to the official dataset URLs here
25
+
26
+ _URLS = {
27
+ "test": "test.jsonl",
28
+ "train": "train.jsonl",
29
+ "valid": "valid.jsonl"
30
+ }
31
+
32
+
33
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
34
+ class KPTimes(datasets.GeneratorBasedBuilder):
35
+ """TODO: Short description of my dataset."""
36
+
37
+ VERSION = datasets.Version("0.0.1")
38
+
39
+ BUILDER_CONFIGS = [
40
+ datasets.BuilderConfig(name="extraction", version=VERSION,
41
+ description="This part of my dataset covers extraction"),
42
+ datasets.BuilderConfig(name="generation", version=VERSION,
43
+ description="This part of my dataset covers generation"),
44
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
45
+ ]
46
+
47
+ DEFAULT_CONFIG_NAME = "extraction"
48
+
49
+ def _info(self):
50
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
51
+ features = datasets.Features(
52
+ {
53
+ "id": datasets.Value("int64"),
54
+ "document": datasets.features.Sequence(datasets.Value("string")),
55
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
56
+
57
+ }
58
+ )
59
+ elif self.config.name == "generation":
60
+ features = datasets.Features(
61
+ {
62
+ "id": datasets.Value("int64"),
63
+ "document": datasets.features.Sequence(datasets.Value("string")),
64
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
65
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
66
+
67
+ }
68
+ )
69
+ else:
70
+ features = datasets.Features(
71
+ {
72
+ "id": datasets.Value("int64"),
73
+ "document": datasets.features.Sequence(datasets.Value("string")),
74
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
75
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
76
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
77
+ "other_metadata": datasets.features.Sequence(
78
+ {
79
+ "text": datasets.features.Sequence(datasets.Value("string")),
80
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
81
+ }
82
+ )
83
+
84
+ }
85
+ )
86
+ return datasets.DatasetInfo(
87
+ # This is the description that will appear on the datasets page.
88
+ description=_DESCRIPTION,
89
+ # This defines the different columns of the dataset and their types
90
+ features=features,
91
+ homepage=_HOMEPAGE,
92
+ # License for the dataset if available
93
+ license=_LICENSE,
94
+ # Citation for the dataset
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+
100
+ data_dir = dl_manager.download_and_extract(_URLS)
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ # These kwargs will be passed to _generate_examples
105
+ gen_kwargs={
106
+ "filepath": data_dir['train'],
107
+ "split": "train",
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ # These kwargs will be passed to _generate_examples
113
+ gen_kwargs={
114
+ "filepath": data_dir['test'],
115
+ "split": "test"
116
+ },
117
+ ),
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.VALIDATION,
120
+ # These kwargs will be passed to _generate_examples
121
+ gen_kwargs={
122
+ "filepath": data_dir['valid'],
123
+ "split": "valid",
124
+ },
125
+ ),
126
+ ]
127
+
128
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
129
+ def _generate_examples(self, filepath, split):
130
+ with open(filepath, encoding="utf-8") as f:
131
+ for key, row in enumerate(f):
132
+ data = json.loads(row)
133
+ if self.config.name == "extraction":
134
+ # Yields examples as (key, example) tuples
135
+ yield key, {
136
+ "id": data['paper_id'],
137
+ "document": data["document"],
138
+ "doc_bio_tags": data.get("doc_bio_tags")
139
+ }
140
+ elif self.config.name == "generation":
141
+ yield key, {
142
+ "id": data['paper_id'],
143
+ "document": data["document"],
144
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
145
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
146
+ }
147
+ else:
148
+ yield key, {
149
+ "id": data['paper_id'],
150
+ "document": data["document"],
151
+ "doc_bio_tags": data.get("doc_bio_tags"),
152
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
153
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
154
+ "other_metadata": data["other_metadata"]
155
+ }