Heng666 commited on
Commit
6dc56c2
1 Parent(s): 259f1c7

Upload folder using huggingface_hub

Browse files
test/CohereForAI-default-test.csv ADDED
The diff for this file is too large to render. See raw diff
 
traditional_chinese_aya_dataset.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """ted2020_tw_mt"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import datasets
22
+
23
+
24
+ # TODO: Add BibTeX citation
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @InProceedings{huggingface:dataset,
28
+ title = {中文 Aya dataset},
29
+ author={Heng-Shiou Sheu
30
+ },
31
+ year={2024}
32
+ }
33
+ """
34
+
35
+ # You can copy an official description
36
+ _DESCRIPTION = """\
37
+ 是一個精心策劃的資料集,源自 CohereForAI 的綜合 Aya 集合,特別關注繁體中文資料。
38
+ 此資料集聚合了 CohereForAI/aya_collection、CohereForAI/aya_dataset 和 CohereForAI/aya_evaluation_suite 中的內容,
39
+ 過濾掉除中文內容之外的所有內容,包括繁體中文與簡體中文。
40
+ """
41
+
42
+ # TODO 請使用 MAC 讀取資料夾內容來做更新
43
+ _Subset_names = [
44
+ 'default'
45
+ ]
46
+
47
+ # TODO: Add a link to an official homepage for the dataset here
48
+ _HOMEPAGE = "https://huggingface.co/Heng666"
49
+
50
+ _LICENSE = "apache-2.0"
51
+
52
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
53
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
+ _URLS = {
55
+ "aya_collection": "https://huggingface.co/datasets/CohereForAI/aya_collection",
56
+ "aya_dataset": "https://huggingface.co/datasets/CohereForAI/aya_dataset",
57
+ "evaluation_suite": "https://huggingface.co/datasets/CohereForAI/aya_evaluation_suite"
58
+ }
59
+
60
+ class ChineseAyaDatasetConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for Chinese Aya"""
62
+
63
+ def __init__(self, subset, **kwargs):
64
+ super().__init__(**kwargs)
65
+ """
66
+ Args:
67
+ subset: subset, you want to load
68
+ **kwargs: keyword arguments forwarded to super.
69
+ """
70
+ self.subset = subset
71
+
72
+
73
+ class ChineseAyaDatasetDataset(datasets.GeneratorBasedBuilder):
74
+ """TODO: Short description of my dataset."""
75
+
76
+ VERSION = datasets.Version("1.0.0")
77
+
78
+ BUILDER_CONFIG_CLASS = ChineseAyaDatasetConfig
79
+
80
+ BUILDER_CONFIGS = [
81
+ ChineseAyaDatasetConfig(
82
+ name=subset,
83
+ description=_DESCRIPTION,
84
+ subset=subset
85
+ )
86
+ for subset in _Subset_names
87
+ ]
88
+
89
+ def _info(self):
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=datasets.Features({
93
+ "inputs": datasets.Value("string"),
94
+ "targets": datasets.Value("string"),
95
+ "language": datasets.Value("string"),
96
+ "language_code": datasets.Value("string"),
97
+ "annotation_type": datasets.Value("string"),
98
+ "user_id": datasets.Value("string"),
99
+ }),
100
+ homepage=_HOMEPAGE,
101
+ citation=_CITATION,
102
+ license=_LICENSE
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+
107
+ subset = self.config.subset
108
+
109
+ files = {}
110
+
111
+ train_path = os.path.join("train/", f"CohereForAI-{subset}-train.csv")
112
+ files["train"] = train_path
113
+ test_path = os.path.join("test", f"CohereForAI-{subset}-test.csv")
114
+ files["test"] = test_path
115
+ validation_path = os.path.join("validation", f"CohereForAI-{subset}-validation.csv")
116
+ files["validation"] = validation_path
117
+
118
+ try:
119
+ data_dir = dl_manager.download_and_extract(files)
120
+ except:
121
+ files.pop("test")
122
+ files.pop("validation")
123
+ data_dir = dl_manager.download_and_extract(files)
124
+
125
+ output = []
126
+ if "train" in files:
127
+ train = datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ gen_kwargs={
130
+ "filepath": data_dir["train"]
131
+ }
132
+ )
133
+ output.append(train)
134
+
135
+ if "test" in files:
136
+ test = datasets.SplitGenerator(
137
+ name=datasets.Split.TEST,
138
+ gen_kwargs={
139
+ "filepath": data_dir["test"]
140
+ }
141
+ )
142
+ output.append(test)
143
+
144
+ if "validation" in files:
145
+ validation = datasets.SplitGenerator(
146
+ name=datasets.Split.VALIDATION,
147
+ gen_kwargs={
148
+ "filepath": data_dir["validation"]
149
+ }
150
+ )
151
+ output.append(validation)
152
+
153
+ return output
154
+
155
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
156
+ def _generate_examples(self, filepath):
157
+ """Yields examples."""
158
+ with open(filepath, encoding="utf-8") as f:
159
+ reader = csv.reader(f, delimiter=",", quotechar='"')
160
+ for id_, row in enumerate(reader):
161
+ if id_ == 0:
162
+ continue
163
+ yield id_, {
164
+ "inputs": row[0],
165
+ "targets": row[1],
166
+ "language": row[2],
167
+ "language_code": row[3],
168
+ "annotation_type": row[4],
169
+ "user_id": row[5],
170
+ }
train/CohereForAI-default-train.csv ADDED
The diff for this file is too large to render. See raw diff