Datasets:

ArXiv:
License:
dfki-nlp commited on
Commit
df547fe
1 Parent(s): fc60adb

Upload multitacred.py

Browse files
Files changed (1) hide show
  1. multitacred.py +451 -0
multitacred.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The MultiTACRED Relation Classification dataset in various languages"""
17
+ import itertools
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @inproceedings{hennig-etal-2023-multitacred,
25
+ title = "MultiTACRED: A Multilingual Version of the TAC Relation Extraction Dataset",
26
+ author = "Hennig, Leonhard and Thomas, Philippe and Möller, Sebastian",
27
+ booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
28
+ month = jul,
29
+ year = "2023",
30
+ address = "Online and Toronto, Canada",
31
+ publisher = "Association for Computational Linguistics",
32
+ }
33
+
34
+ @inproceedings{zhang-etal-2017-position,
35
+ title = "Position-aware Attention and Supervised Data Improve Slot Filling",
36
+ author = "Zhang, Yuhao and
37
+ Zhong, Victor and
38
+ Chen, Danqi and
39
+ Angeli, Gabor and
40
+ Manning, Christopher D.",
41
+ booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
42
+ month = sep,
43
+ year = "2017",
44
+ address = "Copenhagen, Denmark",
45
+ publisher = "Association for Computational Linguistics",
46
+ url = "https://www.aclweb.org/anthology/D17-1004",
47
+ doi = "10.18653/v1/D17-1004",
48
+ pages = "35--45",
49
+ }
50
+
51
+ @inproceedings{alt-etal-2020-tacred,
52
+ title = "{TACRED} Revisited: A Thorough Evaluation of the {TACRED} Relation Extraction Task",
53
+ author = "Alt, Christoph and
54
+ Gabryszak, Aleksandra and
55
+ Hennig, Leonhard",
56
+ booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
57
+ month = jul,
58
+ year = "2020",
59
+ address = "Online",
60
+ publisher = "Association for Computational Linguistics",
61
+ url = "https://www.aclweb.org/anthology/2020.acl-main.142",
62
+ doi = "10.18653/v1/2020.acl-main.142",
63
+ pages = "1558--1569",
64
+ }
65
+
66
+ @inproceedings{DBLP:conf/aaai/StoicaPP21,
67
+ author = {George Stoica and
68
+ Emmanouil Antonios Platanios and
69
+ Barnab{\'{a}}s P{\'{o}}czos},
70
+ title = {Re-TACRED: Addressing Shortcomings of the {TACRED} Dataset},
71
+ booktitle = {Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI}
72
+ 2021, Thirty-Third Conference on Innovative Applications of Artificial
73
+ Intelligence, {IAAI} 2021, The Eleventh Symposium on Educational Advances
74
+ in Artificial Intelligence, {EAAI} 2021, Virtual Event, February 2-9,
75
+ 2021},
76
+ pages = {13843--13850},
77
+ publisher = {{AAAI} Press},
78
+ year = {2021},
79
+ url = {https://ojs.aaai.org/index.php/AAAI/article/view/17631},
80
+ }
81
+ """
82
+
83
+ _DESCRIPTION = """\
84
+ MultiTACRED is a multilingual version of the large-scale TAC Relation Extraction Dataset (LDC2018T24).
85
+ It covers 12 typologically diverse languages from 9 language families, and was created by the Speech & Language
86
+ Technology group of DFKI by machine-translating the  instances of the original TACRED dataset and automatically
87
+ projecting their entity annotations. For details of the original TACRED's data collection and annotation process,
88
+ see LDC2018T24. Translations are syntactically validated by checking the correctness of the XML tag markup.
89
+ Any translations with an invalid tag structure, e.g. missing or invalid head or tail tag pairs, are
90
+ discarded (on average, 2.3% of the instances).
91
+
92
+ Languages covered are: Arabic, Chinese, Finnish, French, German, Hindi, Hungarian, Japanese, Polish,
93
+ Russian, Spanish, Turkish. Intended use is supervised relation classification. Audience - researchers.
94
+
95
+ Please see our ACL paper for full details.
96
+
97
+ NOTE: This Datasetreader supports a reduced version of the original TACRED JSON format with the following changes:
98
+ - Removed fields: stanford_pos, stanford_ner, stanford_head, stanford_deprel, docid
99
+ The motivation for this is that we want to support additional languages, for which these fields were not required
100
+ or available. The reader expects the specification of a language-specific configuration specifying the variant
101
+ (original, revisited or retacred) and the language (as a two-letter iso code). The default config is 'original-de'.
102
+
103
+ The DatasetReader changes the offsets of the following fields, to conform with standard Python usage (see
104
+ #_generate_examples()):
105
+ - subj_end to subj_end + 1 (make end offset exclusive)
106
+ - obj_end to obj_end + 1 (make end offset exclusive)
107
+
108
+ NOTE 2: The MultiTACRED dataset offers an additional 'split', namely the backtranslated test data (translated to target
109
+ language and then back to English). To access this split, access dataset['backtranslated_test'].
110
+ """
111
+
112
+ _HOMEPAGE = "https://github.com/DFKI-NLP/MultiTACRED"
113
+
114
+ _LICENSE = "LDC"
115
+
116
+ _URL = "https://catalog.ldc.upenn.edu/LDC2023_TODO"
117
+
118
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
119
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
120
+ _PATCH_URLs = {
121
+ "dev": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/dev_patch.json",
122
+ "test": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/test_patch.json",
123
+ }
124
+ _RETACRED_PATCH_URLs = {
125
+ "train": "https://raw.githubusercontent.com/gstoica27/Re-TACRED/master/Re-TACRED/train_id2label.json",
126
+ "dev": "https://raw.githubusercontent.com/gstoica27/Re-TACRED/master/Re-TACRED/dev_id2label.json",
127
+ "test": "https://raw.githubusercontent.com/gstoica27/Re-TACRED/master/Re-TACRED/test_id2label.json"
128
+ }
129
+
130
+ _BACKTRANSLATION_TEST_SPLIT = "backtranslated_test"
131
+
132
+ _RETACRED = "retacred"
133
+
134
+ _REVISITED = "revisited"
135
+
136
+ _ORIGINAL = "original"
137
+
138
+ _VERSION = datasets.Version("1.1.0")
139
+
140
+ _LANGS = [
141
+ "ar",
142
+ "de",
143
+ "es",
144
+ "fi",
145
+ "fr",
146
+ "hi",
147
+ "hu",
148
+ "ja",
149
+ "pl",
150
+ "ru",
151
+ "tr",
152
+ "zh",
153
+ ]
154
+
155
+ _CLASS_LABELS = [
156
+ "no_relation",
157
+ "org:alternate_names",
158
+ "org:city_of_headquarters",
159
+ "org:country_of_headquarters",
160
+ "org:dissolved",
161
+ "org:founded",
162
+ "org:founded_by",
163
+ "org:member_of",
164
+ "org:members",
165
+ "org:number_of_employees/members",
166
+ "org:parents",
167
+ "org:political/religious_affiliation",
168
+ "org:shareholders",
169
+ "org:stateorprovince_of_headquarters",
170
+ "org:subsidiaries",
171
+ "org:top_members/employees",
172
+ "org:website",
173
+ "per:age",
174
+ "per:alternate_names",
175
+ "per:cause_of_death",
176
+ "per:charges",
177
+ "per:children",
178
+ "per:cities_of_residence",
179
+ "per:city_of_birth",
180
+ "per:city_of_death",
181
+ "per:countries_of_residence",
182
+ "per:country_of_birth",
183
+ "per:country_of_death",
184
+ "per:date_of_birth",
185
+ "per:date_of_death",
186
+ "per:employee_of",
187
+ "per:origin",
188
+ "per:other_family",
189
+ "per:parents",
190
+ "per:religion",
191
+ "per:schools_attended",
192
+ "per:siblings",
193
+ "per:spouse",
194
+ "per:stateorprovince_of_birth",
195
+ "per:stateorprovince_of_death",
196
+ "per:stateorprovinces_of_residence",
197
+ "per:title",
198
+ ]
199
+
200
+ _RETACRED_CLASS_LABELS = [
201
+ "no_relation",
202
+ "org:alternate_names",
203
+ "org:city_of_branch",
204
+ "org:country_of_branch",
205
+ "org:dissolved",
206
+ "org:founded",
207
+ "org:founded_by",
208
+ "org:member_of",
209
+ "org:members",
210
+ "org:number_of_employees/members",
211
+ "org:political/religious_affiliation",
212
+ "org:shareholders",
213
+ "org:stateorprovince_of_branch",
214
+ "org:top_members/employees",
215
+ "org:website",
216
+ "per:age",
217
+ "per:cause_of_death",
218
+ "per:charges",
219
+ "per:children",
220
+ "per:cities_of_residence",
221
+ "per:city_of_birth",
222
+ "per:city_of_death",
223
+ "per:countries_of_residence",
224
+ "per:country_of_birth",
225
+ "per:country_of_death",
226
+ "per:date_of_birth",
227
+ "per:date_of_death",
228
+ "per:employee_of",
229
+ "per:identity",
230
+ "per:origin",
231
+ "per:other_family",
232
+ "per:parents",
233
+ "per:religion",
234
+ "per:schools_attended",
235
+ "per:siblings",
236
+ "per:spouse",
237
+ "per:stateorprovince_of_birth",
238
+ "per:stateorprovince_of_death",
239
+ "per:stateorprovinces_of_residence",
240
+ "per:title"
241
+ ]
242
+
243
+ _NER_CLASS_LABELS = [
244
+ "LOCATION",
245
+ "ORGANIZATION",
246
+ "PERSON",
247
+ "DATE",
248
+ "MONEY",
249
+ "PERCENT",
250
+ "TIME",
251
+ "CAUSE_OF_DEATH",
252
+ "CITY",
253
+ "COUNTRY",
254
+ "CRIMINAL_CHARGE",
255
+ "EMAIL",
256
+ "HANDLE",
257
+ "IDEOLOGY",
258
+ "NATIONALITY",
259
+ "RELIGION",
260
+ "STATE_OR_PROVINCE",
261
+ "TITLE",
262
+ "URL",
263
+ "NUMBER",
264
+ "ORDINAL",
265
+ "MISC",
266
+ "DURATION",
267
+ "O",
268
+ ]
269
+
270
+ _DESC_TEXTS = {_ORIGINAL: 'The original TACRED.',
271
+ _REVISITED: 'TACRED Revisited (corrected labels for 5k most challenging examples in dev and test split).',
272
+ _RETACRED: 'Relabeled TACRED (corrected labels for all splits and pruned)'}
273
+
274
+
275
+ def convert_ptb_token(token: str) -> str:
276
+ """Convert PTB tokens to normal tokens"""
277
+ return {
278
+ "-lrb-": "(",
279
+ "-rrb-": ")",
280
+ "-lsb-": "[",
281
+ "-rsb-": "]",
282
+ "-lcb-": "{",
283
+ "-rcb-": "}",
284
+ }.get(token.lower(), token)
285
+
286
+
287
+ class MultiTacredConfig(datasets.BuilderConfig):
288
+ """BuilderConfig for MultiTacred."""
289
+
290
+ def __init__(self, label_variant, language, **kwargs):
291
+ """BuilderConfig for MultiTacred.
292
+ Args:
293
+ label_variant: `string`, source of labels, i.e. ORIGINAL, REVISITED or RETACRED
294
+ language: `string`, 2-letter ISO 639-1 language code
295
+ **kwargs: keyword arguments forwarded to super.
296
+ """
297
+
298
+ super(MultiTacredConfig, self).__init__(version=_VERSION, **kwargs)
299
+ self.language = language
300
+ self.label_variant = label_variant
301
+
302
+
303
+ class MultiTacred(datasets.GeneratorBasedBuilder):
304
+ """MultiTACRED is a multilingual version of the large-scale TAC Relation Extraction Dataset (LDC2018T24)."""
305
+
306
+ BUILDER_CONFIGS = [
307
+ MultiTacredConfig(
308
+ name=f"{label_variant}-{language}",
309
+ language=language,
310
+ label_variant=label_variant,
311
+ description=f"{_DESC_TEXTS[label_variant]} examples in language '{language}'.",
312
+ )
313
+ for (language, label_variant) in itertools.product(_LANGS, [_ORIGINAL, _REVISITED, _RETACRED])
314
+ ]
315
+
316
+ DEFAULT_CONFIG_NAME = "original-de" # type: ignore
317
+
318
+ @property
319
+ def manual_download_instructions(self):
320
+ return (
321
+ "To use MultiTACRED you have to download it manually. "
322
+ "It is available via the LDC at https://catalog.ldc.upenn.edu/LDC2023_TODO"
323
+ "Please extract all files in one folder and load the a language with: "
324
+ "`datasets.load_dataset('DFKI-SLT/multitacred', name=variant/lang, data_dir='path/to/folder/folder_name')`."
325
+ )
326
+
327
+ def _info(self):
328
+
329
+ features = datasets.Features(
330
+ {
331
+ "id": datasets.Value("string"),
332
+ "token": datasets.Sequence(datasets.Value("string")),
333
+ "subj_start": datasets.Value("int32"),
334
+ "subj_end": datasets.Value("int32"),
335
+ "subj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS),
336
+ "obj_start": datasets.Value("int32"),
337
+ "obj_end": datasets.Value("int32"),
338
+ "obj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS),
339
+ "relation": datasets.ClassLabel(
340
+ names=_RETACRED_CLASS_LABELS if self.config.label_variant == _RETACRED else _CLASS_LABELS),
341
+ }
342
+ )
343
+
344
+ return datasets.DatasetInfo(
345
+ # This is the description that will appear on the datasets page.
346
+ description=_DESCRIPTION,
347
+ # This defines the different columns of the dataset and their types
348
+ features=features, # Here we define them above because they are different between the two configurations
349
+ # If there's a common (input, target) tuple from the features,
350
+ # specify them here. They'll be used if as_supervised=True in
351
+ # builder.as_dataset.
352
+ supervised_keys=None,
353
+ # Homepage of the dataset for documentation
354
+ homepage=_HOMEPAGE,
355
+ # License for the dataset if available
356
+ license=_LICENSE,
357
+ # Citation for the dataset
358
+ citation=_CITATION,
359
+ )
360
+
361
+ def _split_generators(self, dl_manager):
362
+ """Returns SplitGenerators."""
363
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
364
+
365
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
366
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
367
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
368
+ patch_files = {}
369
+ if self.config.label_variant == _REVISITED:
370
+ patch_files = dl_manager.download_and_extract(_PATCH_URLs)
371
+ elif self.config.label_variant == _RETACRED:
372
+ patch_files = dl_manager.download_and_extract(_RETACRED_PATCH_URLs)
373
+
374
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
375
+
376
+ if not os.path.exists(data_dir):
377
+ raise FileNotFoundError(
378
+ "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset("
379
+ "'DFKI-SLT/multitacred', name=..., data_dir=...)` that includes the unzipped files from the "
380
+ "MULTITACRED_LDC zip. Manual download instructions: {}".format(
381
+ data_dir, self.manual_download_instructions
382
+ )
383
+ )
384
+
385
+ return [
386
+ datasets.SplitGenerator(
387
+ name=datasets.Split.TRAIN,
388
+ gen_kwargs={
389
+ "filepath": os.path.join(data_dir, self.config.language, f"train_{self.config.language}.json"),
390
+ "patch_filepath": patch_files.get("train"),
391
+ },
392
+ ),
393
+ datasets.SplitGenerator(
394
+ name=datasets.Split.TEST,
395
+ gen_kwargs={
396
+ "filepath": os.path.join(data_dir, self.config.language, f"test_{self.config.language}.json"),
397
+ "patch_filepath": patch_files.get("test"),
398
+ },
399
+ ),
400
+ datasets.SplitGenerator(
401
+ name=datasets.Split.VALIDATION,
402
+ gen_kwargs={
403
+ "filepath": os.path.join(data_dir, self.config.language, f"dev_{self.config.language}.json"),
404
+ "patch_filepath": patch_files.get("dev"),
405
+ },
406
+ ),
407
+ datasets.SplitGenerator(
408
+ name=_BACKTRANSLATION_TEST_SPLIT,
409
+ gen_kwargs={
410
+ "filepath": os.path.join(data_dir, self.config.language, f"test_en_{self.config.language}_bt.json"),
411
+ "patch_filepath": patch_files.get("test"),
412
+ },
413
+ ),
414
+ ]
415
+
416
+ def _generate_examples(self, filepath, patch_filepath):
417
+ """Yields examples."""
418
+ # This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
419
+ # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
420
+ # The key is not important, it's more here for legacy reason (legacy from tfds)
421
+ patch_examples = {}
422
+ if patch_filepath is not None:
423
+ with open(patch_filepath, encoding="utf-8") as f:
424
+ if self.config.label_variant == _REVISITED:
425
+ patch_examples = {example["id"]: example for example in json.load(f)}
426
+ elif self.config.label_variant == _RETACRED:
427
+ patch_examples = {_id: {"id": _id, "relation": label} for _id, label in json.load(f).items()}
428
+
429
+ with open(filepath, encoding="utf-8") as f:
430
+ data = json.load(f)
431
+ for example in data:
432
+ id_ = example["id"]
433
+
434
+ if id_ in patch_examples:
435
+ example.update(patch_examples[id_])
436
+ elif self.config.label_variant == _RETACRED:
437
+ # RE-TACRED was pruned, skip example if its id is not in patch_examples
438
+ continue
439
+
440
+ yield id_, {
441
+ "id": example["id"],
442
+ "token": [convert_ptb_token(token) for token in example["token"]],
443
+ "subj_start": example["subj_start"],
444
+ "subj_end": example["subj_end"] + 1, # make end offset exclusive
445
+ "subj_type": example["subj_type"],
446
+ "obj_start": example["obj_start"],
447
+ "obj_end": example["obj_end"] + 1, # make end offset exclusive
448
+ "obj_type": example["obj_type"],
449
+ "relation": example["relation"],
450
+ }
451
+