File size: 8,128 Bytes
a88bfdd
e48b976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb92b66
 
e48b976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb92b66
 
 
e48b976
 
 
 
 
a88bfdd
 
e48b976
eb92b66
 
e48b976
 
 
 
 
 
 
eb92b66
 
 
e48b976
 
 
 
 
 
 
 
 
 
 
 
 
 
eb92b66
 
e48b976
 
 
 
 
 
 
 
eb92b66
 
 
e48b976
 
 
 
 
a88bfdd
 
e48b976
eb92b66
e48b976
 
 
 
 
 
 
04d4c5c
e48b976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811fcdd
e48b976
 
 
 
f779d7f
 
eb92b66
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
"""The MuLD benchmark."""

import json
import os
import datasets

_CITATION = """\
@misc{hudson2022muld,
    title{MuLD: The Multitask Long Document Benchmark},
    author={G Thomas Hudson, Noura Al Moubayed}
    year={2022},
    eprint={TODO},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}
Some of these datasets are directly based on existing datasets. Please cite these works.
"""

_DESCRIPTION = """\
MuLD: The Multitask Long Document Benchmark
A set of NLP tasks where each example is over 10,000 tokens long.
"""

_REPO = "https://huggingface.co/datasets/ghomasHudson/muld/resolve/main/data"

_TASK_CONFIGS = {
    "NarrativeQA": {
        "description": """\
The NarrativeQA Reading Comprehension Challenge Dataset consists of user-submitted questions regarding the plot of movies or novels. Annotators were only given access to a human-written plot summary to encourage general questions which require a full understanding of the narrative. When these are given to a question-answering system along with the full text of the narrative (either a movie script or the novel text), this is a test of reading comprehension. As each sentence the annotator readcan summarise the plot of an entire chapter/scene of abook or movie, models evaluated on the full text must model the dependencies between multiple sections of the narrative. For MuLD, we filter any examples shorter than 10,000 words.
""",
        "citation": r"""\
@article{kovcisky2018narrativeqa,
  title={The narrativeqa reading comprehension challenge},
  author={Ko{\v{c}}isk{\'y}, Tom{\'a}{\v{s}} and Schwarz, Jonathan and Blunsom, Phil and Dyer, Chris and Hermann, Karl Moritz and Melis, G{\'a}bor and Grefenstette, Edward},
  journal={Transactions of the Association for Computational Linguistics},
  volume={6},
  pages={317--328},
  year={2018},
  publisher={MIT Press}
}""",
        "urls": {
            datasets.Split.TRAIN: f"{_REPO}/narrativeqa_train.json.bz2",
            datasets.Split.VALIDATION: f"{_REPO}/narrativeqa_validation.json.bz2",
            datasets.Split.TEST: f"{_REPO}/narrativeqa_test.json.bz2",
        }
},

    "HotpotQA": {
        "description": """\
The HotpotQA dataset consists of questions from crowd workers which require information from multiple Wikipedia articles in order to answer, thus testing the ability for models to perform multi-hop question answering. The data is commonly presented as a list of paragraphs containing relevant information plus a setting where the addition of 'distractor paragraphs' fully test the ability of the model to comprehend which information is relevant to the question asked. To transform this into a long document, we expand each paragraph with its full Wikipedia page as well as adding additional distractor articles
from similar topics (randomly chosen from links on the existing pages) in order to meet the 10,000 token minimum length requirement for this benchmark. These articles are shuffled and concatenated to form the model input.""",
        "urls": {
            datasets.Split.TRAIN: f"{_REPO}/hotpotqa_train.json.bz2",
            datasets.Split.VALIDATION: f"{_REPO}/hotpotqa_validation.json.bz2"
        }
},

    "Character Archetype Classification": {
        "description": """\
The Character Archetype Classification dataset is based on the methodology of Skowron et al. (2016). For this dataset, each example consists of a movie script along with a named character and the task is to classify whether the character is a Hero/Protagonist or Villain/Antagonist based on understanding their role in the narrative.""",
        "urls": {
            datasets.Split.TRAIN: f"{_REPO}/character_id_train.json.bz2",
            datasets.Split.VALIDATION: f"{_REPO}/character_id_validation.json.bz2",
            datasets.Split.TEST: f"{_REPO}/character_id_test.json.bz2",
        }
},

    "OpenSubtitles": {
        "description": """\
The Open Subtitles corpus (Lison et al., 2018) consists  of  aligned  subtitles from movies and TV shows from the website opensubtitles.org in 60 languages and can be used for machine translation. Importantly rather than individual lines,the data consists of the subtitles for an entire individ-ual movie or tv show, many of these being very long files and we filter to remove any document with less than 10,000 tokens.""",
        "citation": """\
@inproceedings{Lison_2018OpenSubtitles,
  title={OpenSubtitles2018: Statistical Rescoring of Sentence Alignments in Large, Noisy Parallel Corpora},
  author={Pierre Lison and J{\"o}rg Tiedemann and Milen Kouylekov},
  booktitle={LREC},
  year={2018}
}""",
        "urls": {
            datasets.Split.TRAIN: f"{_REPO}/opensubtitles_train.json.bz2",
            datasets.Split.TEST: f"{_REPO}/opensubtitles_test.json.bz2"
        }
},

    "AO3 Style Change Detection": {
        "description": """\
Style change detection is the task of identifying the points where the author changes in a document constructed from the work of multiple authors. We use stories contributed to the fanfiction website Archive of Our Own, which contains a large number of works submitted by fans of popular films, tv, game, and book charactersmakicab10mw.
""",
        "urls": {
            datasets.Split.TRAIN: f"{_REPO}/style_change_train.json.bz2",
            datasets.Split.VALIDATION: f"{_REPO}/style_change_validation.json.bz2",
            datasets.Split.TEST: f"{_REPO}/style_change_test.json.bz2"
        }
    },

    "VLSP": {
        "description": """\
We follow the  process of the Scientific papers (Cohan  et  al.,2018) summarization  dataset, extracting papers from the open-access preprint server Arxiv.org using both the arxiv short abstract and the one included in the document (where available) as the reference summaries. In contrast to Cohan et al.
(2018), rather than removing very long documents, we explicitly include them - removing any document with less than 10,000 tokens.""",
        "urls": {
            datasets.Split.TEST: f"{_REPO}/vlsp_test.json.bz2"
        }
    }
}

class MuldConfig(datasets.BuilderConfig):
    """BuilderConfig for MuLD."""
    def __init__(self, urls, citation, **kwargs):
        super(MuldConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.features = datasets.Features({
            "input": datasets.Value("string"),
            "output": [datasets.Value("string")],
            "metadata": datasets.Value("string")
        })
        self.urls = urls
        self.citation = citation


class Muld(datasets.GeneratorBasedBuilder):
    """The MuLD benchmark."""
    BUILDER_CONFIGS = []
    for task_name in _TASK_CONFIGS:
        BUILDER_CONFIGS.append(
        MuldConfig(
            name=task_name,
            description=_TASK_CONFIGS[task_name]["description"],
            urls=_TASK_CONFIGS[task_name]["urls"],
            citation=_TASK_CONFIGS[task_name].get("citation", ""),
        ))
    DEFAULT_WRITER_BATCH_SIZE = 1000  # Large Dataset fix

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION + self.config.description,
            features=self.config.features,
            homepage="https://github.com/ghomasHudson/muld",
            citation=self.config.citation + "\n" + _CITATION,
        )

    def _split_generators(self, dl_manager):
        dl_dirs = dl_manager.download_and_extract(self.config.urls)
        splits = []
        for split in dl_dirs:
            splits.append(
                datasets.SplitGenerator(
                    name=split._name,
                    gen_kwargs={
                        "data_file": dl_dirs[split],
                        "split": split,
                    })
            )
        return splits

    def _generate_examples(self, data_file, split):
        with open(data_file) as f:
            for idx, line in enumerate(f):
                row = json.loads(line)
                if "metadata" not in row:
                    row["metadata"] = ""
                if not isinstance(row["output"], list):
                    row["output"] = [row["output"]]
                yield idx, row