yslim0726 commited on
Commit
e9d5152
β€’
1 Parent(s): c5d56d7

Upload quail.py

Browse files
Files changed (1) hide show
  1. quail.py +144 -0
quail.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+ from datasets import BuilderConfig, Features, Value, Sequence
5
+
6
+
7
+ _DESCRIPTION = """
8
+ # ν•œκ΅­μ–΄ μ§€μ‹œν•™μŠ΅ 데이터셋
9
+ - quail 데이터셋을 ν•œκ΅­μ–΄λ‘œ λ³€μ—­ν•œ 데이터셋
10
+ """
11
+
12
+ _CITATION = """
13
+ @inproceedings{KITD,
14
+ title={μ–Έμ–΄ λ²ˆμ—­ λͺ¨λΈμ„ ν†΅ν•œ ν•œκ΅­μ–΄ μ§€μ‹œ ν•™μŠ΅ 데이터 μ„ΈνŠΈ ꡬ좕},
15
+ author={μž„μ˜μ„œ, μΆ”ν˜„μ°½, κΉ€μ‚°, μž₯μ§„μ˜ˆ, μ •λ―Όμ˜, μ‹ μ‚¬μž„},
16
+ booktitle={제 35회 ν•œκΈ€ 및 ν•œκ΅­μ–΄ μ •λ³΄μ²˜λ¦¬ ν•™μˆ λŒ€νšŒ},
17
+ pages={591--595},
18
+ year={2023}
19
+ }
20
+ @inproceedings{KITD,
21
+ title={Korean Instruction Tuning Dataset},
22
+ author={Yeongseo Lim, HyeonChang Chu, San Kim, Jin Yea Jang, Minyoung Jung, Saim Shin},
23
+ booktitle={Proceedings of the 35th Annual Conference on Human and Cognitive Language Technology},
24
+ pages={591--595},
25
+ year={2023}
26
+ }
27
+ """
28
+
29
+ def _list(data_list):
30
+ result = list()
31
+ for data in data_list:
32
+ result.append(data)
33
+ return result
34
+
35
+ # quail
36
+ _QUAIL_FEATURES = Features({
37
+ "data_index_by_user": Value(dtype="int32"),
38
+ "id": Value(dtype="string"),
39
+ "context_id": Value(dtype="string"),
40
+ "question_id": Value(dtype="string"),
41
+ "domain": Value(dtype="string"),
42
+ "metadata": {
43
+ "author": Value(dtype="string"),
44
+ "title": Value(dtype="string"),
45
+ "url": Value(dtype="string"),
46
+ },
47
+ "context": Value(dtype="string"),
48
+ "question": Value(dtype="string"),
49
+ "question_type": Value(dtype="string"),
50
+ "answers": Sequence(Value(dtype="string")),
51
+ "correct_answer_id": Value(dtype="int32"),
52
+ })
53
+
54
+ def _parsing_quail(file_path):
55
+ with open(file_path, mode="r") as f:
56
+ dataset = json.load(f)
57
+ for _i, data in enumerate(dataset):
58
+ _data_index_by_user = data["data_index_by_user"]
59
+ _id = data["id"]
60
+ _context_id = data["context_id"]
61
+ _question_id = data["question_id"]
62
+ _domain = data["domain"]
63
+ _metadata = {
64
+ "author": data["metadata"]["author"],
65
+ "title": data["metadata"]["title"],
66
+ "url": data["metadata"]["url"]
67
+ }
68
+ _context = data["context"]
69
+ _question = data["question"]
70
+ _question_type = data["question_type"]
71
+ _answers = _list(data["_answers"])
72
+ _correct_answer_id = data["correct_answer_id"]
73
+
74
+ yield _i, {
75
+ "data_index_by_user": _data_index_by_user,
76
+ "id": _id,
77
+ "context_id": _context_id,
78
+ "question_id": _question_id,
79
+ "domain": _domain,
80
+ "metadata": _metadata,
81
+ "context": _context,
82
+ "question": _question,
83
+ "question_type": _question_type,
84
+ "answers": _answers,
85
+ "correct_answer_id": _correct_answer_id,
86
+ }
87
+
88
+ class QuailConfig(BuilderConfig):
89
+ def __init__(self, name, feature, reading_fn, parsing_fn, citation, **kwargs):
90
+ super(QuailConfig, self).__init__(
91
+ name = name,
92
+ version=datasets.Version("1.0.0"),
93
+ **kwargs)
94
+ self.feature = feature
95
+ self.reading_fn = reading_fn
96
+ self.parsing_fn = parsing_fn
97
+ self.citation = citation
98
+
99
+ class QUAIL(datasets.GeneratorBasedBuilder):
100
+ BUILDER_CONFIGS = [
101
+ QuailConfig(
102
+ name = "base",
103
+ data_dir = "./quail",
104
+ feature = _QUAIL_FEATURES,
105
+ reading_fn = _parsing_quail,
106
+ parsing_fn = lambda x:x,
107
+ citation = _CITATION,
108
+ ),
109
+ ]
110
+
111
+ def _info(self) -> datasets.DatasetInfo:
112
+ """Returns the dataset metadata."""
113
+ return datasets.DatasetInfo(
114
+ description=_DESCRIPTION,
115
+ features=_QUAIL_FEATURES,
116
+ citation=_CITATION,
117
+ )
118
+
119
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
120
+ """Returns SplitGenerators"""
121
+ path_kv = {
122
+ datasets.Split.TRAIN:[
123
+ os.path.join(dl_manager.manual_dir, f"train.json")
124
+ ],
125
+ datasets.Split.VALIDATION:[
126
+ os.path.join(dl_manager.manual_dir, f"validation.json")
127
+ ],
128
+ "challenge":[
129
+ os.path.join(dl_manager.manual_dir, f"challenge.json")
130
+ ],
131
+ }
132
+ return [
133
+ datasets.SplitGenerator(name=k, gen_kwargs={"path_list": v})
134
+ for k, v in path_kv.items()
135
+ ]
136
+
137
+ def _generate_examples(self, path_list):
138
+ """Yields examples."""
139
+ for path in path_list:
140
+ try:
141
+ for example in iter(self.config.reading_fn(path)):
142
+ yield self.config.parsing_fn(example)
143
+ except Exception as e:
144
+ print(e)