Datasets:
hfl
/

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
Chinese
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
137f2c4
1 Parent(s): b3b4199

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (e786dbed53f517958523ffe25da2545eaa25f29a)
- Delete loading script (8a55af5cff4ea5980563e9637608ac23aa684074)

README.md CHANGED
@@ -35,16 +35,25 @@ dataset_info:
35
  dtype: int32
36
  splits:
37
  - name: train
38
- num_bytes: 15508110
39
  num_examples: 10142
40
  - name: validation
41
- num_bytes: 5183809
42
  num_examples: 3219
43
  - name: test
44
- num_bytes: 1606931
45
  num_examples: 1002
46
- download_size: 11508117
47
- dataset_size: 22298850
 
 
 
 
 
 
 
 
 
48
  ---
49
 
50
  # Dataset Card for "cmrc2018"
 
35
  dtype: int32
36
  splits:
37
  - name: train
38
+ num_bytes: 15508062
39
  num_examples: 10142
40
  - name: validation
41
+ num_bytes: 5183785
42
  num_examples: 3219
43
  - name: test
44
+ num_bytes: 1606907
45
  num_examples: 1002
46
+ download_size: 4896696
47
+ dataset_size: 22298754
48
+ configs:
49
+ - config_name: default
50
+ data_files:
51
+ - split: train
52
+ path: data/train-*
53
+ - split: validation
54
+ path: data/validation-*
55
+ - split: test
56
+ path: data/test-*
57
  ---
58
 
59
  # Dataset Card for "cmrc2018"
cmrc2018.py DELETED
@@ -1,123 +0,0 @@
1
- """TODO(cmrc2018): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
- from datasets.tasks import QuestionAnsweringExtractive
8
-
9
-
10
- # TODO(cmrc2018): BibTeX citation
11
- _CITATION = """\
12
- @inproceedings{cui-emnlp2019-cmrc2018,
13
- title = {A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension},
14
- author = {Cui, Yiming and
15
- Liu, Ting and
16
- Che, Wanxiang and
17
- Xiao, Li and
18
- Chen, Zhipeng and
19
- Ma, Wentao and
20
- Wang, Shijin and
21
- Hu, Guoping},
22
- booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
23
- month = {nov},
24
- year = {2019},
25
- address = {Hong Kong, China},
26
- publisher = {Association for Computational Linguistics},
27
- url = {https://www.aclweb.org/anthology/D19-1600},
28
- doi = {10.18653/v1/D19-1600},
29
- pages = {5886--5891}}
30
- """
31
-
32
- # TODO(cmrc2018):
33
- _DESCRIPTION = """\
34
- A Span-Extraction dataset for Chinese machine reading comprehension to add language
35
- diversities in this area. The dataset is composed by near 20,000 real questions annotated
36
- on Wikipedia paragraphs by human experts. We also annotated a challenge set which
37
- contains the questions that need comprehensive understanding and multi-sentence
38
- inference throughout the context.
39
- """
40
- _URL = "https://github.com/ymcui/cmrc2018"
41
- _TRAIN_FILE = "https://worksheets.codalab.org/rest/bundles/0x15022f0c4d3944a599ab27256686b9ac/contents/blob/"
42
- _DEV_FILE = "https://worksheets.codalab.org/rest/bundles/0x72252619f67b4346a85e122049c3eabd/contents/blob/"
43
- _TEST_FILE = "https://worksheets.codalab.org/rest/bundles/0x182c2e71fac94fc2a45cc1a3376879f7/contents/blob/"
44
-
45
-
46
- class Cmrc2018(datasets.GeneratorBasedBuilder):
47
- """TODO(cmrc2018): Short description of my dataset."""
48
-
49
- # TODO(cmrc2018): Set up version.
50
- VERSION = datasets.Version("0.1.0")
51
-
52
- def _info(self):
53
- # TODO(cmrc2018): Specifies the datasets.DatasetInfo object
54
- return datasets.DatasetInfo(
55
- # This is the description that will appear on the datasets page.
56
- description=_DESCRIPTION,
57
- # datasets.features.FeatureConnectors
58
- features=datasets.Features(
59
- {
60
- "id": datasets.Value("string"),
61
- "context": datasets.Value("string"),
62
- "question": datasets.Value("string"),
63
- "answers": datasets.features.Sequence(
64
- {
65
- "text": datasets.Value("string"),
66
- "answer_start": datasets.Value("int32"),
67
- }
68
- ),
69
- # These are the features of your dataset like images, labels ...
70
- }
71
- ),
72
- # If there's a common (input, target) tuple from the features,
73
- # specify them here. They'll be used if as_supervised=True in
74
- # builder.as_dataset.
75
- supervised_keys=None,
76
- # Homepage of the dataset for documentation
77
- homepage=_URL,
78
- citation=_CITATION,
79
- task_templates=[
80
- QuestionAnsweringExtractive(
81
- question_column="question", context_column="context", answers_column="answers"
82
- )
83
- ],
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- """Returns SplitGenerators."""
88
- # TODO(cmrc2018): Downloads the data and defines the splits
89
- # dl_manager is a datasets.download.DownloadManager that can be used to
90
- # download and extract URLs
91
- urls_to_download = {"train": _TRAIN_FILE, "dev": _DEV_FILE, "test": _TEST_FILE}
92
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
93
-
94
- return [
95
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
96
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
97
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
98
- ]
99
-
100
- def _generate_examples(self, filepath):
101
- """Yields examples."""
102
- # TODO(cmrc2018): Yields (key, example) tuples from the dataset
103
- with open(filepath, encoding="utf-8") as f:
104
- data = json.load(f)
105
- for example in data["data"]:
106
- for paragraph in example["paragraphs"]:
107
- context = paragraph["context"].strip()
108
- for qa in paragraph["qas"]:
109
- question = qa["question"].strip()
110
- id_ = qa["id"]
111
-
112
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
113
- answers = [answer["text"].strip() for answer in qa["answers"]]
114
-
115
- yield id_, {
116
- "context": context,
117
- "question": question,
118
- "id": id_,
119
- "answers": {
120
- "answer_start": answer_starts,
121
- "text": answers,
122
- },
123
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cb3f679e735abdeeb37bba176042f5f0b0a62e4c4cf69884cfacf78c855ae16
3
+ size 394683
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b5ee0ce1caf754c8d164eeba467039d17a8e8b41ad7a791262869d7470826fb
3
+ size 3365898
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3d3116e5845b8d3c44a863c1e2f973817c9f10a52e503b079ac49d4366f78b2
3
+ size 1136115