File size: 6,181 Bytes
aae389a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39f9e42
aae389a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
604e2ed
b5adada
aae389a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99d9f15
aae389a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e889cbc
aae389a
 
 
 
 
 
 
b5adada
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39f9e42
b5adada
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""Wiki-Convert: Language Modelling with Cardinal Number Annotations"""


import json
import sys
import datasets
#from datasets.tasks import QuestionAnsweringExtractive


logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@inproceedings{thawani-etal-2021-numeracy,
    title = "Numeracy enhances the Literacy of Language Models",
    author = "Thawani, Avijit  and
      Pujara, Jay  and
      Ilievski, Filip",
    booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
    month = nov,
    year = "2021",
    address = "Online and Punta Cana, Dominican Republic",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.emnlp-main.557",
    pages = "6960--6967",
    abstract = "Specialized number representations in NLP have shown improvements on numerical reasoning tasks like arithmetic word problems and masked number prediction. But humans also use numeracy to make better sense of world concepts, e.g., you can seat 5 people in your {`}room{'} but not 500. Does a better grasp of numbers improve a model{'}s understanding of other concepts and words? This paper studies the effect of using six different number encoders on the task of masked word prediction (MWP), as a proxy for evaluating literacy. To support this investigation, we develop Wiki-Convert, a 900,000 sentence dataset annotated with numbers and units, to avoid conflating nominal and ordinal number occurrences. We find a significant improvement in MWP for sentences containing numbers, that exponent embeddings are the best number encoders, yielding over 2 points jump in prediction accuracy over a BERT baseline, and that these enhanced literacy skills also generalize to contexts without annotated numbers. We release all code at https://git.io/JuZXn.",
}
"""

_DESCRIPTION = """\
Language Modelling with Cardinal Number Annotations.
"""

#_URL = "https://github.com/avi-jit/numeracy-literacy/"
_URL = "https://huggingface.co/datasets/usc-isi/WikiConvert/resolve/main/"
_URLS = {
    "train": _URL + "train_wiki.json",
    "dev": _URL + "dev_wiki.json",
    "test": _URL + "test_wiki.json",
}


class WikiConvertConfig(datasets.BuilderConfig):
    """BuilderConfig for WikiConvert."""

    def __init__(self, **kwargs):
        """BuilderConfig for WikiConvert.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(WikiConvertConfig, self).__init__(**kwargs)


class WikiConvert(datasets.GeneratorBasedBuilder):
    """WikiConvert: Language Modelling with Cardinal Number Annotations.. Version 1.1."""

    BUILDER_CONFIGS = [
        WikiConvertConfig(
            name="plain_text",
            version=datasets.Version("1.0.0", ""),
            description="Plain text",
        ),
    ]

    def _info(self): # {"id": 1336448, "UNIQUE_STORY_INDEX": "1336448", "offset": 24, "length": 1, "magnitude": 0, "comment": "The floral cup is about 2 mm long and covered with silky white hairs.", "number": 2}
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("int32"),
                    "UNIQUE_STORY_INDEX": datasets.Value("string"),
                    "offset": datasets.Value("int32"),
                    "length": datasets.Value("int32"),
                    "magnitude": datasets.Value("int32"),
                    "comment": datasets.Value("string"),
                    "number": datasets.Value("int64"),
                }
            ),
            # No default supervised_keys (use offset and length to locate the number of order magnitude).
            supervised_keys=None,
            homepage="https://github.com/avi-jit/numeracy-literacy/",
            citation=_CITATION,
            #task_templates=[QuestionAnsweringExtractive(question_column="question", context_column="context", answers_column="answers")],
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        key = 0
        with open(filepath, encoding="utf-8") as f:
            ds = json.load(f)
            #for row in f:
            #    yield key, {"comment": row[:100], "id": 1, "offset": 2, "length": 3, "magnitude": 4, "number": 5, "UNIQUE_STORY_INDEX": "6"}
            #    key += 1
                #print(row[:100])
                #ds = json.loads(row)
            #    print(len(ds))
            for row in ds:
                yield key, {
                    "id": row["id"],
                    "UNIQUE_STORY_INDEX": row["UNIQUE_STORY_INDEX"],
                    "offset": row["offset"],
                    "length": row["length"],
                    "magnitude": row["magnitude"],
                    "comment": row["comment"],
                    "number": min(sys.maxsize, row["number"]),
                }
                key += 1