Datasets:

Modalities:
Text
Libraries:
Datasets
filevich commited on
Commit
9129ce0
·
1 Parent(s): 5ad2998

Upload fact2020.py

Browse files
Files changed (1) hide show
  1. fact2020.py +104 -0
fact2020.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ #
4
+
5
+ # Lint as: python3
6
+ """Overview of FACT at IberLEF 2020: Events Detection and Classification"""
7
+
8
+ import os
9
+
10
+ import datasets
11
+
12
+
13
+ logger = datasets.logging.get_logger(__name__)
14
+
15
+
16
+ _CITATION = """\
17
+ @inproceedings{fact2020,
18
+ title = "Overview of FACT at IberLEF 2020: Events Detection and Classification",
19
+ author = "Rosa, Aiala and Chiruzzo, Luis and Wonsever, Dina and Malcuori, Marisa and Curell, Hortènsia and Castellón, Irene and Vázquez, Gloria and Fernández-Montraveta, Ana and Góngora, Santiago and Alonso, Laura",
20
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
21
+ year = "2020",
22
+ url = "https://www.aclweb.org/anthology/W03-0419",
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ In this paper we present the second edition of the FACT shared task (Factuality Annotation and Classification
28
+ Task), included in IberLEF2020. The main objective of this task is to advance in the study of the factuality of
29
+ the events mentioned in texts. This year, the FACT task includes a subtask on event identification in addition
30
+ to the factuality classification subtask. We describe the submitted systems as well as the corpus used, which is
31
+ the same used in FACT 2019 but extended by adding annotations for nominal events.
32
+ """
33
+
34
+ _URL = "https://huggingface.co/datasets/filevich/fact2020/raw/main/"
35
+ _URLS = {
36
+ "train": _URL + "train.json",
37
+ "validation": _URL + "validation.json",
38
+ "test": _URL + "test-task1.json",
39
+ }
40
+
41
+ _TRAINING_FILE = "train.txt"
42
+ _DEV_FILE = "valid.txt"
43
+ _TEST_FILE = "test.txt"
44
+
45
+
46
+ class Fact2020Config(datasets.BuilderConfig):
47
+ """BuilderConfig for Fact2020"""
48
+
49
+ def __init__(self, **kwargs):
50
+ """BuilderConfig forFact2020.
51
+ Args:
52
+ **kwargs: keyword arguments forwarded to super.
53
+ """
54
+ super(Fact2020Config, self).__init__(**kwargs)
55
+
56
+
57
+ class Fact2020(datasets.GeneratorBasedBuilder):
58
+ """Fact2020 dataset."""
59
+
60
+ BUILDER_CONFIGS = [
61
+ Fact2020Config(name="fact2020", version=datasets.Version("1.0.0"), description="Fact2020 dataset"),
62
+ ]
63
+
64
+ def _info(self):
65
+ return datasets.DatasetInfo(
66
+ description=_DESCRIPTION,
67
+ features=datasets.Features(
68
+ {
69
+ "id": datasets.Value("string"),
70
+ "tokens": datasets.Sequence(datasets.Value("string")),
71
+ "fact_tags": datasets.Sequence(
72
+ datasets.features.ClassLabel(
73
+ names=[
74
+ "O",
75
+ "F",
76
+ "CF",
77
+ "U",
78
+ ]
79
+ )
80
+ ),
81
+ }
82
+ ),
83
+ supervised_keys=None,
84
+ homepage="https://ceur-ws.org/Vol-2664/fact_overview.pdf",
85
+ citation=_CITATION,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ urls_to_download = self._URLS
90
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
91
+ return [
92
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
93
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
94
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
95
+ ]
96
+
97
+ def _generate_examples(self, filepath):
98
+ logger.info("⏳ Generating examples from = %s", filepath)
99
+
100
+ import json
101
+ with open(filepath, encoding="utf-8") as f:
102
+ data = json.load(f)
103
+ return [(i, {"id": str(i), **d}) for i,d in enumerate(data)]
104
+