Datasets:
hfl
/

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
Chinese
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
8a55af5
1 Parent(s): e786dbe

Delete loading script

Browse files
Files changed (1) hide show
  1. cmrc2018.py +0 -123
cmrc2018.py DELETED
@@ -1,123 +0,0 @@
1
- """TODO(cmrc2018): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
- from datasets.tasks import QuestionAnsweringExtractive
8
-
9
-
10
- # TODO(cmrc2018): BibTeX citation
11
- _CITATION = """\
12
- @inproceedings{cui-emnlp2019-cmrc2018,
13
- title = {A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension},
14
- author = {Cui, Yiming and
15
- Liu, Ting and
16
- Che, Wanxiang and
17
- Xiao, Li and
18
- Chen, Zhipeng and
19
- Ma, Wentao and
20
- Wang, Shijin and
21
- Hu, Guoping},
22
- booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
23
- month = {nov},
24
- year = {2019},
25
- address = {Hong Kong, China},
26
- publisher = {Association for Computational Linguistics},
27
- url = {https://www.aclweb.org/anthology/D19-1600},
28
- doi = {10.18653/v1/D19-1600},
29
- pages = {5886--5891}}
30
- """
31
-
32
- # TODO(cmrc2018):
33
- _DESCRIPTION = """\
34
- A Span-Extraction dataset for Chinese machine reading comprehension to add language
35
- diversities in this area. The dataset is composed by near 20,000 real questions annotated
36
- on Wikipedia paragraphs by human experts. We also annotated a challenge set which
37
- contains the questions that need comprehensive understanding and multi-sentence
38
- inference throughout the context.
39
- """
40
- _URL = "https://github.com/ymcui/cmrc2018"
41
- _TRAIN_FILE = "https://worksheets.codalab.org/rest/bundles/0x15022f0c4d3944a599ab27256686b9ac/contents/blob/"
42
- _DEV_FILE = "https://worksheets.codalab.org/rest/bundles/0x72252619f67b4346a85e122049c3eabd/contents/blob/"
43
- _TEST_FILE = "https://worksheets.codalab.org/rest/bundles/0x182c2e71fac94fc2a45cc1a3376879f7/contents/blob/"
44
-
45
-
46
- class Cmrc2018(datasets.GeneratorBasedBuilder):
47
- """TODO(cmrc2018): Short description of my dataset."""
48
-
49
- # TODO(cmrc2018): Set up version.
50
- VERSION = datasets.Version("0.1.0")
51
-
52
- def _info(self):
53
- # TODO(cmrc2018): Specifies the datasets.DatasetInfo object
54
- return datasets.DatasetInfo(
55
- # This is the description that will appear on the datasets page.
56
- description=_DESCRIPTION,
57
- # datasets.features.FeatureConnectors
58
- features=datasets.Features(
59
- {
60
- "id": datasets.Value("string"),
61
- "context": datasets.Value("string"),
62
- "question": datasets.Value("string"),
63
- "answers": datasets.features.Sequence(
64
- {
65
- "text": datasets.Value("string"),
66
- "answer_start": datasets.Value("int32"),
67
- }
68
- ),
69
- # These are the features of your dataset like images, labels ...
70
- }
71
- ),
72
- # If there's a common (input, target) tuple from the features,
73
- # specify them here. They'll be used if as_supervised=True in
74
- # builder.as_dataset.
75
- supervised_keys=None,
76
- # Homepage of the dataset for documentation
77
- homepage=_URL,
78
- citation=_CITATION,
79
- task_templates=[
80
- QuestionAnsweringExtractive(
81
- question_column="question", context_column="context", answers_column="answers"
82
- )
83
- ],
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- """Returns SplitGenerators."""
88
- # TODO(cmrc2018): Downloads the data and defines the splits
89
- # dl_manager is a datasets.download.DownloadManager that can be used to
90
- # download and extract URLs
91
- urls_to_download = {"train": _TRAIN_FILE, "dev": _DEV_FILE, "test": _TEST_FILE}
92
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
93
-
94
- return [
95
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
96
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
97
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
98
- ]
99
-
100
- def _generate_examples(self, filepath):
101
- """Yields examples."""
102
- # TODO(cmrc2018): Yields (key, example) tuples from the dataset
103
- with open(filepath, encoding="utf-8") as f:
104
- data = json.load(f)
105
- for example in data["data"]:
106
- for paragraph in example["paragraphs"]:
107
- context = paragraph["context"].strip()
108
- for qa in paragraph["qas"]:
109
- question = qa["question"].strip()
110
- id_ = qa["id"]
111
-
112
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
113
- answers = [answer["text"].strip() for answer in qa["answers"]]
114
-
115
- yield id_, {
116
- "context": context,
117
- "question": question,
118
- "id": id_,
119
- "answers": {
120
- "answer_start": answer_starts,
121
- "text": answers,
122
- },
123
- }