Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
Arabic
Size:
1K - 10K
License:
Commit
•
d29ea9a
0
Parent(s):
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- arcd.py +118 -0
- dataset_infos.json +1 -0
- dummy/plain_text/1.0.0/dummy_data.zip +3 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
arcd.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""ARCD: Arabic Reading Comprehension Dataset."""
|
2 |
+
|
3 |
+
from __future__ import absolute_import, division, print_function
|
4 |
+
|
5 |
+
import json
|
6 |
+
import logging
|
7 |
+
import os
|
8 |
+
|
9 |
+
import datasets
|
10 |
+
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@inproceedings{mozannar-etal-2019-neural,
|
14 |
+
title = {Neural {A}rabic Question Answering},
|
15 |
+
author = {Mozannar, Hussein and Maamary, Elie and El Hajal, Karl and Hajj, Hazem},
|
16 |
+
booktitle = {Proceedings of the Fourth Arabic Natural Language Processing Workshop},
|
17 |
+
month = {aug},
|
18 |
+
year = {2019},
|
19 |
+
address = {Florence, Italy},
|
20 |
+
publisher = {Association for Computational Linguistics},
|
21 |
+
url = {https://www.aclweb.org/anthology/W19-4612},
|
22 |
+
doi = {10.18653/v1/W19-4612},
|
23 |
+
pages = {108--118},
|
24 |
+
abstract = {This paper tackles the problem of open domain factual Arabic question answering (QA) using Wikipedia as our knowledge source. This constrains the answer of any question to be a span of text in Wikipedia. Open domain QA for Arabic entails three challenges: annotated QA datasets in Arabic, large scale efficient information retrieval and machine reading comprehension. To deal with the lack of Arabic QA datasets we present the Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles, and a machine translation of the Stanford Question Answering Dataset (Arabic-SQuAD). Our system for open domain question answering in Arabic (SOQAL) is based on two components: (1) a document retriever using a hierarchical TF-IDF approach and (2) a neural reading comprehension model using the pre-trained bi-directional transformer BERT. Our experiments on ARCD indicate the effectiveness of our approach with our BERT-based reader achieving a 61.3 F1 score, and our open domain system SOQAL achieving a 27.6 F1 score.}
|
25 |
+
}
|
26 |
+
"""
|
27 |
+
|
28 |
+
_DESCRIPTION = """\
|
29 |
+
Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions\
|
30 |
+
posed by crowdworkers on Wikipedia articles.
|
31 |
+
"""
|
32 |
+
|
33 |
+
|
34 |
+
class ArcdConfig(datasets.BuilderConfig):
|
35 |
+
"""BuilderConfig for ARCD."""
|
36 |
+
|
37 |
+
def __init__(self, **kwargs):
|
38 |
+
"""BuilderConfig for ARCD.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
**kwargs: keyword arguments forwarded to super.
|
42 |
+
"""
|
43 |
+
super(ArcdConfig, self).__init__(**kwargs)
|
44 |
+
|
45 |
+
|
46 |
+
class Arcd(datasets.GeneratorBasedBuilder):
|
47 |
+
"""ARCD: Arabic Reading Comprehension Dataset."""
|
48 |
+
|
49 |
+
_URL = "https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/"
|
50 |
+
_DEV_FILE = "arcd-test.json"
|
51 |
+
_TRAINING_FILE = "arcd-train.json"
|
52 |
+
|
53 |
+
BUILDER_CONFIGS = [
|
54 |
+
ArcdConfig(
|
55 |
+
name="plain_text",
|
56 |
+
version=datasets.Version("1.0.0", ""),
|
57 |
+
description="Plain text",
|
58 |
+
)
|
59 |
+
]
|
60 |
+
|
61 |
+
def _info(self):
|
62 |
+
return datasets.DatasetInfo(
|
63 |
+
description=_DESCRIPTION,
|
64 |
+
features=datasets.Features(
|
65 |
+
{
|
66 |
+
"id": datasets.Value("string"),
|
67 |
+
"title": datasets.Value("string"),
|
68 |
+
"context": datasets.Value("string"),
|
69 |
+
"question": datasets.Value("string"),
|
70 |
+
"answers": datasets.features.Sequence(
|
71 |
+
{"text": datasets.Value("string"), "answer_start": datasets.Value("int32")}
|
72 |
+
),
|
73 |
+
}
|
74 |
+
),
|
75 |
+
# No default supervised_keys (as we have to pass both question
|
76 |
+
# and context as input).
|
77 |
+
supervised_keys=None,
|
78 |
+
homepage="https://github.com/husseinmozannar/SOQAL/tree/master/data",
|
79 |
+
citation=_CITATION,
|
80 |
+
)
|
81 |
+
|
82 |
+
def _split_generators(self, dl_manager):
|
83 |
+
urls_to_download = {
|
84 |
+
"train": os.path.join(self._URL, self._TRAINING_FILE),
|
85 |
+
"dev": os.path.join(self._URL, self._DEV_FILE),
|
86 |
+
}
|
87 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
88 |
+
|
89 |
+
return [
|
90 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
91 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
92 |
+
]
|
93 |
+
|
94 |
+
def _generate_examples(self, filepath):
|
95 |
+
"""This function returns the examples in the raw (text) form."""
|
96 |
+
logging.info("generating examples from = %s", filepath)
|
97 |
+
with open(filepath, encoding="utf-8") as f:
|
98 |
+
arcd = json.load(f)
|
99 |
+
for article in arcd["data"]:
|
100 |
+
title = article.get("title", "").strip()
|
101 |
+
for paragraph in article["paragraphs"]:
|
102 |
+
context = paragraph["context"].strip()
|
103 |
+
for qa in paragraph["qas"]:
|
104 |
+
question = qa["question"].strip()
|
105 |
+
id_ = qa["id"]
|
106 |
+
|
107 |
+
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
|
108 |
+
answers = [answer["text"].strip() for answer in qa["answers"]]
|
109 |
+
|
110 |
+
# Features currently used are "context", "question", and "answers".
|
111 |
+
# Others are extracted here for the ease of future expansions.
|
112 |
+
yield id_, {
|
113 |
+
"title": title,
|
114 |
+
"context": context,
|
115 |
+
"question": question,
|
116 |
+
"id": id_,
|
117 |
+
"answers": {"answer_start": answer_starts, "text": answers},
|
118 |
+
}
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"plain_text": {"description": " Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles.\n", "citation": "@inproceedings{mozannar-etal-2019-neural,\n title = \"Neural {A}rabic Question Answering\",\n author = \"Mozannar, Hussein and\n Maamary, Elie and\n El Hajal, Karl and\n Hajj, Hazem\",\n booktitle = \"Proceedings of the Fourth Arabic Natural Language Processing Workshop\",\n month = aug,\n year = \"2019\",\n address = \"Florence, Italy\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W19-4612\",\n doi = \"10.18653/v1/W19-4612\",\n pages = \"108--118\",\n abstract = \"This paper tackles the problem of open domain factual Arabic question answering (QA) using Wikipedia as our knowledge source. This constrains the answer of any question to be a span of text in Wikipedia. Open domain QA for Arabic entails three challenges: annotated QA datasets in Arabic, large scale efficient information retrieval and machine reading comprehension. To deal with the lack of Arabic QA datasets we present the Arabic Reading Comprehension Dataset (ARCD) composed of 1,395 questions posed by crowdworkers on Wikipedia articles, and a machine translation of the Stanford Question Answering Dataset (Arabic-SQuAD). Our system for open domain question answering in Arabic (SOQAL) is based on two components: (1) a document retriever using a hierarchical TF-IDF approach and (2) a neural reading comprehension model using the pre-trained bi-directional transformer BERT. Our experiments on ARCD indicate the effectiveness of our approach with our BERT-based reader achieving a 61.3 F1 score, and our open domain system SOQAL achieving a 27.6 F1 score.\",\n}\n", "homepage": "https://github.com/husseinmozannar/SOQAL/tree/master/data", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "arcd", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 811934, "num_examples": 693, "dataset_name": "arcd"}, "validation": {"name": "validation", "num_bytes": 886528, "num_examples": 702, "dataset_name": "arcd"}}, "download_checksums": {"https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-train.json": {"num_bytes": 939840, "checksum": "6a973fda9f0b066e0547a85a3396e7294fa917e24b6efd7ce430769033a6ce15"}, "https://raw.githubusercontent.com/husseinmozannar/SOQAL/master/data/arcd-test.json": {"num_bytes": 1002559, "checksum": "b4ba4fb4227841bbce71e01b3eaecb33e9f17a08cde1ec91e5bc335da2c75135"}}, "download_size": 1942399, "dataset_size": 1698462, "size_in_bytes": 3640861}}
|
dummy/plain_text/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5eb4495c84833778a1afc780a0dd0aaaee2b01a5da91974f24fbd0e10e0ca5d0
|
3 |
+
size 3666
|