Francesco De Toni commited on
Commit
7595e0e
1 Parent(s): a904b77

Update HIPE2020_sent-split.py

Browse files
Files changed (1) hide show
  1. HIPE2020_sent-split.py +463 -463
HIPE2020_sent-split.py CHANGED
@@ -1,464 +1,464 @@
1
- # coding=utf-8
2
- # Copyright 2022 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """TODO"""
18
-
19
- from datetime import datetime
20
- from typing import Optional
21
- import datasets
22
- import re
23
-
24
-
25
- _CITATION = """\
26
- TODO
27
- """
28
-
29
- _DESCRIPTION = """\
30
- TODO
31
- """
32
-
33
- _BASE_URL_TRAIN_DEV = "https://raw.githubusercontent.com/impresso/CLEF-HIPE-2020/master/data/v1.4/"
34
-
35
-
36
- _URLs = {
37
- "EN": {
38
- "dev": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.4-dev-en.tsv?raw=true",
39
- "test": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.4-test-masked-en.tsv?raw=true"
40
- }, # English only no train
41
- "DE": {
42
- "dev": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-dev-de.tsv?raw=true",
43
- "train": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-train-de.tsv?raw=true",
44
- "test": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-test-masked-de.tsv?raw=true"
45
- },
46
- "FR": {
47
- "dev": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-dev-fr.tsv?raw=true",
48
- "train": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-train-fr.tsv?raw=true",
49
- "test": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-test-masked-fr.tsv?raw=true"
50
- },
51
- }
52
-
53
-
54
- class HIPE2020Config(datasets.BuilderConfig):
55
- """BuilderConfig for HIPE2020"""
56
-
57
- def __init__(self, data_urls,**kwargs):
58
- """BuilderConfig for HIPE2020.
59
- Args:
60
- **kwargs: keyword arguments forwarded to super.
61
- """
62
- super(HIPE2020Config, self).__init__(**kwargs)
63
- self.data_urls = data_urls
64
-
65
-
66
- class HIPE2020(datasets.GeneratorBasedBuilder):
67
- """HIPE2020 dataset."""
68
-
69
- BUILDER_CONFIGS = [
70
- HIPE2020Config(
71
- name="en",
72
- data_urls=_URLs["EN"],
73
- version=datasets.Version("1.0.0"),
74
- description="HIPE dataset covering English",
75
- ),
76
- HIPE2020Config(
77
- name="de",
78
- data_urls=_URLs["DE"],
79
- version=datasets.Version("1.0.0"),
80
- description="HIPE dataset covering German",
81
- ),
82
- HIPE2020Config(
83
- name="fr",
84
- data_urls=_URLs["FR"],
85
- version=datasets.Version("1.0.0"),
86
- description="HIPE dataset covering French",
87
- ),
88
- ]
89
-
90
- def _info(self):
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- features=datasets.Features(
94
- {
95
- "id": datasets.Value("string"),
96
- "tokens": datasets.Sequence(datasets.Value("string")),
97
- "NE_COARSE_LIT": datasets.Sequence(
98
- datasets.features.ClassLabel(
99
- names=[
100
- "O",
101
- "B-comp",
102
- "B-loc",
103
- "B-org",
104
- "B-pers",
105
- "B-prod",
106
- "B-time",
107
- "I-loc",
108
- "I-org",
109
- "I-pers",
110
- "I-prod",
111
- "I-time",
112
- "_",
113
- ]
114
- )
115
- ),
116
- "NE_COARSE_METO_tags": datasets.Sequence(
117
- datasets.features.ClassLabel(
118
- names=[
119
- "O",
120
- "B-loc",
121
- "B-org",
122
- "B-pers",
123
- "B-prod",
124
- "I-loc",
125
- "I-org",
126
- "I-pers",
127
- "_",
128
- ]
129
- )
130
- ),
131
- "NE_FINE_LIT_tags": datasets.Sequence(
132
- datasets.features.ClassLabel(
133
- names=[
134
- "O",
135
- "B-comp.name",
136
- "B-loc",
137
- "B-loc.add.elec",
138
- "B-loc.add.phys",
139
- "B-loc.adm.nat",
140
- "B-loc.adm.reg",
141
- "B-loc.adm.sup",
142
- "B-loc.adm.town",
143
- "B-loc.fac",
144
- "B-loc.oro",
145
- "B-loc.phys.astro",
146
- "B-loc.phys.geo",
147
- "B-loc.phys.hydro",
148
- "B-loc.unk",
149
- "B-org",
150
- "B-org.adm",
151
- "B-org.ent",
152
- "B-org.ent.pressagency",
153
- "B-pers",
154
- "B-pers.coll",
155
- "B-pers.ind",
156
- "B-pers.ind.articleauthor",
157
- "B-prod",
158
- "B-prod.doctr",
159
- "B-prod.media",
160
- "B-time",
161
- "B-time.date.abs",
162
- "I-loc",
163
- "I-loc.add.elec",
164
- "I-loc.add.phys",
165
- "I-loc.adm.nat",
166
- "I-loc.adm.reg",
167
- "I-loc.adm.sup",
168
- "I-loc.adm.town",
169
- "I-loc.fac",
170
- "I-loc.oro",
171
- "I-loc.phys.geo",
172
- "I-loc.phys.hydro",
173
- "I-loc.unk",
174
- "I-org",
175
- "I-org.adm",
176
- "I-org.ent",
177
- "I-org.ent.pressagency",
178
- "I-pers",
179
- "I-pers.coll",
180
- "I-pers.ind",
181
- "I-pers.ind.articleauthor",
182
- "I-prod",
183
- "I-prod.doctr",
184
- "I-prod.media",
185
- "I-time",
186
- "I-time.date.abs",
187
- "_",
188
- ]
189
- )
190
- ),
191
- "NE_FINE_METO_tags": datasets.Sequence(
192
- datasets.features.ClassLabel(
193
- names=[
194
- "O",
195
- "B-loc",
196
- "B-loc.adm.reg",
197
- "B-loc.adm.town",
198
- "B-loc.fac",
199
- "B-loc.oro",
200
- "B-org",
201
- "B-org.adm",
202
- "B-org.ent",
203
- "B-pers.coll",
204
- "B-pers.ind",
205
- "B-prod.media",
206
- "I-loc",
207
- "I-loc.adm.reg",
208
- "I-loc.fac",
209
- "I-loc.oro",
210
- "I-org",
211
- "I-org.adm",
212
- "I-org.ent",
213
- "I-pers",
214
- "I-pers.ind",
215
- "_",
216
- ]
217
- )
218
- ),
219
- "NE_FINE_COMP_tags": datasets.Sequence(
220
- datasets.features.ClassLabel(
221
- names=[
222
- "O",
223
- "B-comp.demonym",
224
- "B-comp.function",
225
- "B-comp.name",
226
- "B-comp.qualifier",
227
- "B-comp.title",
228
- "I-comp.demonym",
229
- "I-comp.function",
230
- "I-comp.name",
231
- "I-comp.qualifier",
232
- "I-comp.title",
233
- "_",
234
- ]
235
- )
236
- ),
237
- "NE_NESTED_tags": datasets.Sequence(
238
- datasets.features.ClassLabel(
239
- names=[
240
- "O",
241
- "B-loc",
242
- "B-loc.adm.nat",
243
- "B-loc.adm.reg",
244
- "B-loc.adm.sup",
245
- "B-loc.adm.town",
246
- "B-loc.fac",
247
- "B-loc.oro",
248
- "B-loc.phys.geo",
249
- "B-loc.phys.hydro",
250
- "B-org",
251
- "B-org.adm",
252
- "B-org.ent",
253
- "B-pers.coll",
254
- "B-pers.ind",
255
- "B-prod.media",
256
- "B-time.date.abs",
257
- "I-loc",
258
- "I-loc.adm.nat",
259
- "I-loc.adm.reg",
260
- "I-loc.adm.town",
261
- "I-loc.fac",
262
- "I-loc.oro",
263
- "I-loc.phys.geo",
264
- "I-loc.phys.hydro",
265
- "I-org",
266
- "I-org.adm",
267
- "I-org.ent",
268
- "I-pers.ind",
269
- "_",
270
- ]
271
- )
272
- ),
273
- "NEL_LIT_ID": datasets.Sequence(datasets.Value("string")),
274
- "NEL_METO_ID": datasets.Sequence(datasets.Value("string")),
275
- "no_space_after": datasets.Sequence(datasets.Value("bool")),
276
- "end_of_line": datasets.Sequence(datasets.Value("bool")),
277
- "PySBDSegment":datasets.Sequence(datasets.Value("bool")),
278
- "date": datasets.Value("timestamp[s]"),
279
- "title": datasets.Value("string"),
280
- "document_id": datasets.Value("string"),
281
- }
282
- ),
283
- supervised_keys=None,
284
- homepage="TODO",
285
- citation=_CITATION,
286
- )
287
-
288
- def _split_generators(self, dl_manager):
289
- """Returns SplitGenerators."""
290
- downloaded_files = dl_manager.download_and_extract(self.config.data_urls)
291
- if self.config.name != "en":
292
- data_files = {
293
- "train": downloaded_files["train"],
294
- "dev": downloaded_files["dev"],
295
- }
296
- else:
297
- data_files = {"dev": downloaded_files["dev"]}
298
- if self.config.name == "en":
299
- return [
300
- datasets.SplitGenerator(
301
- name=datasets.Split.VALIDATION,
302
- gen_kwargs={"filepath": data_files["dev"]},
303
- ),
304
- # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}), # TODO add test splits
305
- ]
306
-
307
- else:
308
- return [
309
- datasets.SplitGenerator(
310
- name=datasets.Split.TRAIN,
311
- gen_kwargs={"filepath": data_files["train"]},
312
- ),
313
- datasets.SplitGenerator(
314
- name=datasets.Split.VALIDATION,
315
- gen_kwargs={"filepath": data_files["dev"]},
316
- ),
317
- ]
318
-
319
- def _generate_examples(self, filepath):
320
- date_re = re.compile(r"# date = (\d{4}-\d{2}-\d{02})")
321
- title_re = re.compile(r"newspaper = (\w{3})")
322
- document_id_re = re.compile(r"document_id = (.*)")
323
- with open(filepath, encoding="utf-8") as f:
324
- guid = 0
325
- tokens = []
326
- NE_COARSE_LIT_tags = []
327
- NE_COARSE_METO_tags = []
328
- NE_FINE_LIT_tags = []
329
- NE_FINE_METO_tags = []
330
- NE_FINE_COMP_tags = []
331
- NE_NESTED_tags = []
332
- NEL_LIT_ID = []
333
- NEL_METO_ID = []
334
- no_space_after = []
335
- end_of_line = []
336
- pysdbsegment = []
337
-
338
- new_sentence = False
339
-
340
- for line in f:
341
- if line.startswith(
342
- "TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC"
343
- ):
344
- continue
345
- if line.startswith("#") or line == "\n":
346
- date_match = re.search(date_re, line)
347
- if date_match:
348
- date = date_match.group(1)
349
- date = datetime.strptime(date, "%Y-%m-%d")
350
- title_match = re.search(title_re, line)
351
- if title_match:
352
- title = title_match.group(1)
353
- document_id_match = re.search(document_id_re, line)
354
- if document_id_match:
355
- document_id = document_id_match.group(1)
356
- if tokens:
357
- yield guid, {
358
- "id": str(guid),
359
- "tokens": tokens,
360
- "NE_COARSE_LIT": NE_COARSE_LIT_tags,
361
- "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
362
- "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
363
- "NE_FINE_METO_tags": NE_FINE_METO_tags,
364
- "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
365
- "NE_NESTED_tags": NE_NESTED_tags,
366
- "NEL_LIT_ID": NEL_LIT_ID,
367
- "NEL_METO_ID": NEL_METO_ID,
368
- "no_space_after": no_space_after,
369
- "end_of_line": end_of_line,
370
- "PySBDSegment":pysdbsegment,
371
- "date": date,
372
- "title": title,
373
- "document_id": document_id,
374
- }
375
- guid += 1
376
- tokens = []
377
- NE_COARSE_LIT_tags = []
378
- NE_COARSE_METO_tags = []
379
- NE_FINE_LIT_tags = []
380
- NE_FINE_METO_tags = []
381
- NE_FINE_COMP_tags = []
382
- NE_NESTED_tags = []
383
- NEL_LIT_ID = []
384
- NEL_METO_ID = []
385
- no_space_after = []
386
- end_of_line = []
387
- pysdbsegment = []
388
- else:
389
- # New row if there is a new sentence
390
- if new_sentence == True:
391
- yield guid, {
392
- "id": str(guid),
393
- "tokens": tokens,
394
- "NE_COARSE_LIT": NE_COARSE_LIT_tags,
395
- "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
396
- "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
397
- "NE_FINE_METO_tags": NE_FINE_METO_tags,
398
- "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
399
- "NE_NESTED_tags": NE_NESTED_tags,
400
- "NEL_LIT_ID": NEL_LIT_ID,
401
- "NEL_METO_ID": NEL_METO_ID,
402
- "no_space_after": no_space_after,
403
- "end_of_line": end_of_line,
404
- "PySBDSegment":pysdbsegment,
405
- "date": date,
406
- "title": title,
407
- "document_id": document_id,
408
- }
409
- guid += 1
410
- tokens = []
411
- NE_COARSE_LIT_tags = []
412
- NE_COARSE_METO_tags = []
413
- NE_FINE_LIT_tags = []
414
- NE_FINE_METO_tags = []
415
- NE_FINE_COMP_tags = []
416
- NE_NESTED_tags = []
417
- NEL_LIT_ID = []
418
- NEL_METO_ID = []
419
- no_space_after = []
420
- end_of_line = []
421
- pysdbsegment = []
422
-
423
- # HIPE 2020 tokens are tab separated
424
- splits = line.split(
425
- "\t"
426
- ) # TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC
427
- tokens.append(splits[0])
428
- NE_COARSE_LIT_tags.append(splits[1])
429
- NE_COARSE_METO_tags.append(splits[2])
430
- NE_FINE_LIT_tags.append(splits[3])
431
- NE_FINE_METO_tags.append(splits[4])
432
- NE_FINE_COMP_tags.append(splits[5])
433
- NE_NESTED_tags.append(splits[6])
434
- NEL_LIT_ID.append(splits[7])
435
- NEL_METO_ID.append(splits[8])
436
- misc = splits[-1]
437
- is_space = "NoSpaceAfter" in misc
438
- is_end_of_line = "EndOfLine" in misc
439
- PySBDSegment = "PySBDSegment" in misc
440
- no_space_after.append(is_space)
441
- end_of_line.append(is_end_of_line)
442
- pysdbsegment.append(PySBDSegment)
443
-
444
- new_sentence = PySBDSegment
445
-
446
- # last example
447
- yield guid, {
448
- "id": str(guid),
449
- "tokens": tokens,
450
- "NE_COARSE_LIT": NE_COARSE_LIT_tags,
451
- "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
452
- "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
453
- "NE_FINE_METO_tags": NE_FINE_METO_tags,
454
- "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
455
- "NE_NESTED_tags": NE_NESTED_tags,
456
- "NEL_LIT_ID": NEL_LIT_ID,
457
- "NEL_METO_ID": NEL_METO_ID,
458
- "no_space_after": no_space_after,
459
- "end_of_line": end_of_line,
460
- "PySBDSegment":pysdbsegment,
461
- "date": date,
462
- "title": title,
463
- "document_id": document_id,
464
  }
 
1
+ # coding=utf-8
2
+ # Copyright 2022 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """TODO"""
18
+
19
+ from datetime import datetime
20
+ from typing import Optional
21
+ import datasets
22
+ import re
23
+
24
+
25
+ _CITATION = """\
26
+ TODO
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ TODO
31
+ """
32
+
33
+ _BASE_URL_TRAIN_DEV = "https://raw.githubusercontent.com/impresso/CLEF-HIPE-2020/master/data/v1.4/"
34
+
35
+
36
+ _URLs = {
37
+ "EN": {
38
+ "dev": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.4-dev-en.tsv?raw=true",
39
+ "test": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.4-test-en.tsv?raw=true"
40
+ }, # English only no train
41
+ "DE": {
42
+ "dev": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-dev-de.tsv?raw=true",
43
+ "train": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-train-de.tsv?raw=true",
44
+ "test": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-test-de.tsv?raw=true"
45
+ },
46
+ "FR": {
47
+ "dev": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-dev-fr.tsv?raw=true",
48
+ "train": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-train-fr.tsv?raw=true",
49
+ "test": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-test-fr.tsv?raw=true"
50
+ },
51
+ }
52
+
53
+
54
+ class HIPE2020Config(datasets.BuilderConfig):
55
+ """BuilderConfig for HIPE2020"""
56
+
57
+ def __init__(self, data_urls,**kwargs):
58
+ """BuilderConfig for HIPE2020.
59
+ Args:
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ super(HIPE2020Config, self).__init__(**kwargs)
63
+ self.data_urls = data_urls
64
+
65
+
66
+ class HIPE2020(datasets.GeneratorBasedBuilder):
67
+ """HIPE2020 dataset."""
68
+
69
+ BUILDER_CONFIGS = [
70
+ HIPE2020Config(
71
+ name="en",
72
+ data_urls=_URLs["EN"],
73
+ version=datasets.Version("1.0.0"),
74
+ description="HIPE dataset covering English",
75
+ ),
76
+ HIPE2020Config(
77
+ name="de",
78
+ data_urls=_URLs["DE"],
79
+ version=datasets.Version("1.0.0"),
80
+ description="HIPE dataset covering German",
81
+ ),
82
+ HIPE2020Config(
83
+ name="fr",
84
+ data_urls=_URLs["FR"],
85
+ version=datasets.Version("1.0.0"),
86
+ description="HIPE dataset covering French",
87
+ ),
88
+ ]
89
+
90
+ def _info(self):
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=datasets.Features(
94
+ {
95
+ "id": datasets.Value("string"),
96
+ "tokens": datasets.Sequence(datasets.Value("string")),
97
+ "NE_COARSE_LIT": datasets.Sequence(
98
+ datasets.features.ClassLabel(
99
+ names=[
100
+ "O",
101
+ "B-comp",
102
+ "B-loc",
103
+ "B-org",
104
+ "B-pers",
105
+ "B-prod",
106
+ "B-time",
107
+ "I-loc",
108
+ "I-org",
109
+ "I-pers",
110
+ "I-prod",
111
+ "I-time",
112
+ "_",
113
+ ]
114
+ )
115
+ ),
116
+ "NE_COARSE_METO_tags": datasets.Sequence(
117
+ datasets.features.ClassLabel(
118
+ names=[
119
+ "O",
120
+ "B-loc",
121
+ "B-org",
122
+ "B-pers",
123
+ "B-prod",
124
+ "I-loc",
125
+ "I-org",
126
+ "I-pers",
127
+ "_",
128
+ ]
129
+ )
130
+ ),
131
+ "NE_FINE_LIT_tags": datasets.Sequence(
132
+ datasets.features.ClassLabel(
133
+ names=[
134
+ "O",
135
+ "B-comp.name",
136
+ "B-loc",
137
+ "B-loc.add.elec",
138
+ "B-loc.add.phys",
139
+ "B-loc.adm.nat",
140
+ "B-loc.adm.reg",
141
+ "B-loc.adm.sup",
142
+ "B-loc.adm.town",
143
+ "B-loc.fac",
144
+ "B-loc.oro",
145
+ "B-loc.phys.astro",
146
+ "B-loc.phys.geo",
147
+ "B-loc.phys.hydro",
148
+ "B-loc.unk",
149
+ "B-org",
150
+ "B-org.adm",
151
+ "B-org.ent",
152
+ "B-org.ent.pressagency",
153
+ "B-pers",
154
+ "B-pers.coll",
155
+ "B-pers.ind",
156
+ "B-pers.ind.articleauthor",
157
+ "B-prod",
158
+ "B-prod.doctr",
159
+ "B-prod.media",
160
+ "B-time",
161
+ "B-time.date.abs",
162
+ "I-loc",
163
+ "I-loc.add.elec",
164
+ "I-loc.add.phys",
165
+ "I-loc.adm.nat",
166
+ "I-loc.adm.reg",
167
+ "I-loc.adm.sup",
168
+ "I-loc.adm.town",
169
+ "I-loc.fac",
170
+ "I-loc.oro",
171
+ "I-loc.phys.geo",
172
+ "I-loc.phys.hydro",
173
+ "I-loc.unk",
174
+ "I-org",
175
+ "I-org.adm",
176
+ "I-org.ent",
177
+ "I-org.ent.pressagency",
178
+ "I-pers",
179
+ "I-pers.coll",
180
+ "I-pers.ind",
181
+ "I-pers.ind.articleauthor",
182
+ "I-prod",
183
+ "I-prod.doctr",
184
+ "I-prod.media",
185
+ "I-time",
186
+ "I-time.date.abs",
187
+ "_",
188
+ ]
189
+ )
190
+ ),
191
+ "NE_FINE_METO_tags": datasets.Sequence(
192
+ datasets.features.ClassLabel(
193
+ names=[
194
+ "O",
195
+ "B-loc",
196
+ "B-loc.adm.reg",
197
+ "B-loc.adm.town",
198
+ "B-loc.fac",
199
+ "B-loc.oro",
200
+ "B-org",
201
+ "B-org.adm",
202
+ "B-org.ent",
203
+ "B-pers.coll",
204
+ "B-pers.ind",
205
+ "B-prod.media",
206
+ "I-loc",
207
+ "I-loc.adm.reg",
208
+ "I-loc.fac",
209
+ "I-loc.oro",
210
+ "I-org",
211
+ "I-org.adm",
212
+ "I-org.ent",
213
+ "I-pers",
214
+ "I-pers.ind",
215
+ "_",
216
+ ]
217
+ )
218
+ ),
219
+ "NE_FINE_COMP_tags": datasets.Sequence(
220
+ datasets.features.ClassLabel(
221
+ names=[
222
+ "O",
223
+ "B-comp.demonym",
224
+ "B-comp.function",
225
+ "B-comp.name",
226
+ "B-comp.qualifier",
227
+ "B-comp.title",
228
+ "I-comp.demonym",
229
+ "I-comp.function",
230
+ "I-comp.name",
231
+ "I-comp.qualifier",
232
+ "I-comp.title",
233
+ "_",
234
+ ]
235
+ )
236
+ ),
237
+ "NE_NESTED_tags": datasets.Sequence(
238
+ datasets.features.ClassLabel(
239
+ names=[
240
+ "O",
241
+ "B-loc",
242
+ "B-loc.adm.nat",
243
+ "B-loc.adm.reg",
244
+ "B-loc.adm.sup",
245
+ "B-loc.adm.town",
246
+ "B-loc.fac",
247
+ "B-loc.oro",
248
+ "B-loc.phys.geo",
249
+ "B-loc.phys.hydro",
250
+ "B-org",
251
+ "B-org.adm",
252
+ "B-org.ent",
253
+ "B-pers.coll",
254
+ "B-pers.ind",
255
+ "B-prod.media",
256
+ "B-time.date.abs",
257
+ "I-loc",
258
+ "I-loc.adm.nat",
259
+ "I-loc.adm.reg",
260
+ "I-loc.adm.town",
261
+ "I-loc.fac",
262
+ "I-loc.oro",
263
+ "I-loc.phys.geo",
264
+ "I-loc.phys.hydro",
265
+ "I-org",
266
+ "I-org.adm",
267
+ "I-org.ent",
268
+ "I-pers.ind",
269
+ "_",
270
+ ]
271
+ )
272
+ ),
273
+ "NEL_LIT_ID": datasets.Sequence(datasets.Value("string")),
274
+ "NEL_METO_ID": datasets.Sequence(datasets.Value("string")),
275
+ "no_space_after": datasets.Sequence(datasets.Value("bool")),
276
+ "end_of_line": datasets.Sequence(datasets.Value("bool")),
277
+ "PySBDSegment":datasets.Sequence(datasets.Value("bool")),
278
+ "date": datasets.Value("timestamp[s]"),
279
+ "title": datasets.Value("string"),
280
+ "document_id": datasets.Value("string"),
281
+ }
282
+ ),
283
+ supervised_keys=None,
284
+ homepage="TODO",
285
+ citation=_CITATION,
286
+ )
287
+
288
+ def _split_generators(self, dl_manager):
289
+ """Returns SplitGenerators."""
290
+ downloaded_files = dl_manager.download_and_extract(self.config.data_urls)
291
+ if self.config.name != "en":
292
+ data_files = {
293
+ "train": downloaded_files["train"],
294
+ "dev": downloaded_files["dev"],
295
+ }
296
+ else:
297
+ data_files = {"dev": downloaded_files["dev"]}
298
+ if self.config.name == "en":
299
+ return [
300
+ datasets.SplitGenerator(
301
+ name=datasets.Split.VALIDATION,
302
+ gen_kwargs={"filepath": data_files["dev"]},
303
+ ),
304
+ # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}), # TODO add test splits
305
+ ]
306
+
307
+ else:
308
+ return [
309
+ datasets.SplitGenerator(
310
+ name=datasets.Split.TRAIN,
311
+ gen_kwargs={"filepath": data_files["train"]},
312
+ ),
313
+ datasets.SplitGenerator(
314
+ name=datasets.Split.VALIDATION,
315
+ gen_kwargs={"filepath": data_files["dev"]},
316
+ ),
317
+ ]
318
+
319
+ def _generate_examples(self, filepath):
320
+ date_re = re.compile(r"# date = (\d{4}-\d{2}-\d{02})")
321
+ title_re = re.compile(r"newspaper = (\w{3})")
322
+ document_id_re = re.compile(r"document_id = (.*)")
323
+ with open(filepath, encoding="utf-8") as f:
324
+ guid = 0
325
+ tokens = []
326
+ NE_COARSE_LIT_tags = []
327
+ NE_COARSE_METO_tags = []
328
+ NE_FINE_LIT_tags = []
329
+ NE_FINE_METO_tags = []
330
+ NE_FINE_COMP_tags = []
331
+ NE_NESTED_tags = []
332
+ NEL_LIT_ID = []
333
+ NEL_METO_ID = []
334
+ no_space_after = []
335
+ end_of_line = []
336
+ pysdbsegment = []
337
+
338
+ new_sentence = False
339
+
340
+ for line in f:
341
+ if line.startswith(
342
+ "TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC"
343
+ ):
344
+ continue
345
+ if line.startswith("#") or line == "\n":
346
+ date_match = re.search(date_re, line)
347
+ if date_match:
348
+ date = date_match.group(1)
349
+ date = datetime.strptime(date, "%Y-%m-%d")
350
+ title_match = re.search(title_re, line)
351
+ if title_match:
352
+ title = title_match.group(1)
353
+ document_id_match = re.search(document_id_re, line)
354
+ if document_id_match:
355
+ document_id = document_id_match.group(1)
356
+ if tokens:
357
+ yield guid, {
358
+ "id": str(guid),
359
+ "tokens": tokens,
360
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
361
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
362
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
363
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
364
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
365
+ "NE_NESTED_tags": NE_NESTED_tags,
366
+ "NEL_LIT_ID": NEL_LIT_ID,
367
+ "NEL_METO_ID": NEL_METO_ID,
368
+ "no_space_after": no_space_after,
369
+ "end_of_line": end_of_line,
370
+ "PySBDSegment":pysdbsegment,
371
+ "date": date,
372
+ "title": title,
373
+ "document_id": document_id,
374
+ }
375
+ guid += 1
376
+ tokens = []
377
+ NE_COARSE_LIT_tags = []
378
+ NE_COARSE_METO_tags = []
379
+ NE_FINE_LIT_tags = []
380
+ NE_FINE_METO_tags = []
381
+ NE_FINE_COMP_tags = []
382
+ NE_NESTED_tags = []
383
+ NEL_LIT_ID = []
384
+ NEL_METO_ID = []
385
+ no_space_after = []
386
+ end_of_line = []
387
+ pysdbsegment = []
388
+ else:
389
+ # New row if there is a new sentence
390
+ if new_sentence == True:
391
+ yield guid, {
392
+ "id": str(guid),
393
+ "tokens": tokens,
394
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
395
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
396
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
397
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
398
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
399
+ "NE_NESTED_tags": NE_NESTED_tags,
400
+ "NEL_LIT_ID": NEL_LIT_ID,
401
+ "NEL_METO_ID": NEL_METO_ID,
402
+ "no_space_after": no_space_after,
403
+ "end_of_line": end_of_line,
404
+ "PySBDSegment":pysdbsegment,
405
+ "date": date,
406
+ "title": title,
407
+ "document_id": document_id,
408
+ }
409
+ guid += 1
410
+ tokens = []
411
+ NE_COARSE_LIT_tags = []
412
+ NE_COARSE_METO_tags = []
413
+ NE_FINE_LIT_tags = []
414
+ NE_FINE_METO_tags = []
415
+ NE_FINE_COMP_tags = []
416
+ NE_NESTED_tags = []
417
+ NEL_LIT_ID = []
418
+ NEL_METO_ID = []
419
+ no_space_after = []
420
+ end_of_line = []
421
+ pysdbsegment = []
422
+
423
+ # HIPE 2020 tokens are tab separated
424
+ splits = line.split(
425
+ "\t"
426
+ ) # TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC
427
+ tokens.append(splits[0])
428
+ NE_COARSE_LIT_tags.append(splits[1])
429
+ NE_COARSE_METO_tags.append(splits[2])
430
+ NE_FINE_LIT_tags.append(splits[3])
431
+ NE_FINE_METO_tags.append(splits[4])
432
+ NE_FINE_COMP_tags.append(splits[5])
433
+ NE_NESTED_tags.append(splits[6])
434
+ NEL_LIT_ID.append(splits[7])
435
+ NEL_METO_ID.append(splits[8])
436
+ misc = splits[-1]
437
+ is_space = "NoSpaceAfter" in misc
438
+ is_end_of_line = "EndOfLine" in misc
439
+ PySBDSegment = "PySBDSegment" in misc
440
+ no_space_after.append(is_space)
441
+ end_of_line.append(is_end_of_line)
442
+ pysdbsegment.append(PySBDSegment)
443
+
444
+ new_sentence = PySBDSegment
445
+
446
+ # last example
447
+ yield guid, {
448
+ "id": str(guid),
449
+ "tokens": tokens,
450
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
451
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
452
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
453
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
454
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
455
+ "NE_NESTED_tags": NE_NESTED_tags,
456
+ "NEL_LIT_ID": NEL_LIT_ID,
457
+ "NEL_METO_ID": NEL_METO_ID,
458
+ "no_space_after": no_space_after,
459
+ "end_of_line": end_of_line,
460
+ "PySBDSegment":pysdbsegment,
461
+ "date": date,
462
+ "title": title,
463
+ "document_id": document_id,
464
  }