wics commited on
Commit
c856936
·
1 Parent(s): b55aae1

Upload NCR.py

Browse files
Files changed (1) hide show
  1. NCR.py +116 -0
NCR.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+
17
+ import datasets
18
+ import json
19
+
20
+ _CITATION = """
21
+
22
+ """
23
+
24
+ _DESCRIPTION = """
25
+ """
26
+
27
+ _HOMEPAGE = ""
28
+
29
+ # TODO: Add the licence for the dataset here if you can find it
30
+ _LICENSE = ""
31
+
32
+ _URLS = {
33
+ "train": "https://huggingface.co/datasets/wics/NCR/resolve/main/train_2.json",
34
+ "validation": "https://huggingface.co/datasets/wics/NCR/resolve/main/dev_2.json",
35
+ "test": "https://huggingface.co/datasets/wics/NCR/resolve/main/test_2.json",
36
+ }
37
+
38
+
39
+ class NCR(datasets.GeneratorBasedBuilder):
40
+
41
+
42
+ VERSION = datasets.Version("0.0.1")
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name="NCR", version=VERSION, description="Chinese dataset."
47
+ ),
48
+ ]
49
+
50
+ def _info(self):
51
+ features = datasets.Features(
52
+ {
53
+ "example_id": datasets.Value("string"),
54
+ "article": datasets.Value("string"),
55
+ "answer": datasets.Value("string"),
56
+ "question": datasets.Value("string"),
57
+ "options": datasets.features.Sequence(datasets.Value("string"))
58
+ }
59
+ )
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=features,
63
+ homepage=_HOMEPAGE,
64
+ license=_LICENSE,
65
+ citation=_CITATION,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ urls = {
70
+ "train": _URLS["train"],
71
+ "test": _URLS["test"],
72
+ "validation": _URLS["validation"],
73
+ }
74
+ data_dir = dl_manager.download_and_extract(urls)
75
+ return [
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TRAIN,
78
+ # These kwargs will be passed to _generate_examples
79
+ gen_kwargs={
80
+ "filepath": data_dir["train"],
81
+ "split": "train",
82
+ },
83
+ ),
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split.TEST,
86
+ # These kwargs will be passed to _generate_examples
87
+ gen_kwargs={"filepath": data_dir["test"], "split": "test"},
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.VALIDATION,
91
+ # These kwargs will be passed to _generate_examples
92
+ gen_kwargs={
93
+ "filepath": data_dir["validation"],
94
+ "split": "validation",
95
+ },
96
+ ),
97
+ ]
98
+
99
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
100
+ def _generate_examples(self, train_test_or_eval, files):
101
+ """Yields examples."""
102
+ for file_idx, (path, f) in enumerate(files):
103
+ if path.startswith(train_test_or_eval) and path.endswith(".txt"):
104
+ data = json.loads(f.read().decode("utf-8"))
105
+ questions = data["Questions"]
106
+ for i in range(len(questions)):
107
+ question = questions[i]
108
+
109
+ yield f"{file_idx}_{i}", {
110
+ "example_id": data["Id"],
111
+ "article": data["Content"],
112
+ "question": question["Question"],
113
+ "answer": question["Answer"],
114
+ "options": question["Choices"],
115
+ }
116
+