Maxime commited on
Commit
080b541
1 Parent(s): bce13e8

first version of mfaq dataset

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. README.md +1 -0
  3. mfaq.py +118 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ test.py
README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ hello
mfaq.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @InProceedings{mfaq_a_multilingual_dataset,
26
+ title={MFAQ: a Multilingual FAQ Dataset},
27
+ author={Maxime {De Bruyn} and Ehsan Lotfi and Jeska Buhmann and Walter Daelemans},
28
+ year={2021},
29
+ booktitle={MRQA @ EMNLP 2021}
30
+ }
31
+ """
32
+
33
+
34
+ _DESCRIPTION = """\
35
+ We present the first multilingual FAQ dataset publicly available. We collected around 6M FAQ pairs from the web, in 21 different languages.
36
+ """
37
+
38
+ _HOMEPAGE = ""
39
+
40
+ _LICENSE = ""
41
+
42
+
43
+ _LANGUAGES = ["cs", "da", "de", "en", "es", "fi", "fr", "he", "hr", "hu", "id", "it", "nl", "no", "pl", "pt", "ro", "ru", "sv", "tr", "vi"]
44
+ _URLs = {}
45
+ _URLs.update({f"{l}": {"train": f"data/{l}/train.jsonl", "valid": f"data/{l}/valid.jsonl"} for l in _LANGUAGES})
46
+ _URLs.update({f"{l}_flat": {"train": f"data/{l}/train.jsonl", "valid": f"data/{l}/valid.jsonl"} for l in _LANGUAGES})
47
+
48
+
49
+ class MFAQ(datasets.GeneratorBasedBuilder):
50
+
51
+ VERSION = datasets.Version("1.0.0")
52
+ BUILDER_CONFIGS = list(map(lambda x: datasets.BuilderConfig(name=x, version=datasets.Version("1.1.0")), _URLs.keys()))
53
+
54
+ def _info(self):
55
+ features = datasets.Features(
56
+ {
57
+ "id": datasets.Value("int64"),
58
+ "language": datasets.Value("string"),
59
+ "num_pairs": datasets.Value("int64"),
60
+ "domain": datasets.Value("string"),
61
+ "qa_pairs": datasets.features.Sequence(
62
+ {
63
+ "question": datasets.Value("string"),
64
+ "answer": datasets.Value("string"),
65
+ "language": datasets.Value("string")
66
+ }
67
+ )
68
+ }
69
+ )
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=features, # Here we define them above because they are different between the two configurations
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ license=_LICENSE,
76
+ citation=_CITATION,
77
+ )
78
+
79
+ def _split_generators(self, dl_manager):
80
+ """Returns SplitGenerators."""
81
+ my_urls = _URLs[self.config.name]
82
+ data_dir = dl_manager.download_and_extract(my_urls)
83
+ return [
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TRAIN,
86
+ gen_kwargs={"filepath": data_dir["train"], "split": "train"},
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.VALIDATION,
90
+ gen_kwargs={"filepath": data_dir["valid"], "split": "valid"},
91
+ ),
92
+ ]
93
+
94
+ def _generate_examples(
95
+ self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
96
+ ):
97
+ """ Yields examples as (key, example) tuples. """
98
+
99
+ with open(filepath, encoding="utf-8") as f:
100
+ for _id, row in enumerate(f):
101
+ data = json.loads(row)
102
+ if "flat" in self.config.name:
103
+ for i, pair in enumerate(data["qa_pairs"]):
104
+ yield f"{_id}_{i}", {
105
+ "id": data["id"],
106
+ "domain": data["domain"],
107
+ "language": data["language"],
108
+ "num_pairs": 1,
109
+ "qa_pairs": [pair]
110
+ }
111
+ else:
112
+ yield _id, {
113
+ "id": data["id"],
114
+ "domain": data["domain"],
115
+ "language": data["language"],
116
+ "num_pairs": data["num_pairs"],
117
+ "qa_pairs": data["qa_pairs"]
118
+ }