Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
ce0541d
1 Parent(s): 28eb997

Upload thai_romanization.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. thai_romanization.py +164 -0
thai_romanization.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The Thai Romanization dataset contains 648,241 Thai words \
18
+ that were transliterated into English, making Thai \
19
+ pronounciation easier for non-native Thai speakers. \
20
+ This is a valuable dataset for Thai language learners \
21
+ and researchers working on Thai language processing task. \
22
+ Each word in the Thai Romanization dataset is paired with \
23
+ its English phonetic representation, enabling accurate \
24
+ pronunciation guidance. This facilitates the learning and \
25
+ practice of Thai pronunciation for individuals who may not \
26
+ be familiar with the Thai script. The dataset aids in improving \
27
+ the accessibility and usability of Thai language resources, \
28
+ supporting applications such as speech recognition, text-to-speech \
29
+ synthesis, and machine translation. It enables the development of \
30
+ Thai language tools that can benefit Thai learners, tourists, \
31
+ and those interested in Thai culture and language.
32
+ """
33
+ import os
34
+ from pathlib import Path
35
+ from typing import Dict, List, Tuple
36
+
37
+ import datasets
38
+ import pandas as pd
39
+
40
+ from seacrowd.utils import schemas
41
+ from seacrowd.utils.configs import SEACrowdConfig
42
+ from seacrowd.utils.constants import Tasks, Licenses
43
+
44
+ # There are no citation available for this dataset.
45
+ _CITATION = ""
46
+
47
+ _DATASETNAME = "thai_romanization"
48
+
49
+ _DESCRIPTION = """
50
+ The Thai Romanization dataset contains 648,241 Thai words \
51
+ that were transliterated into English, making Thai \
52
+ pronounciation easier for non-native Thai speakers. \
53
+ This is a valuable dataset for Thai language learners \
54
+ and researchers working on Thai language processing task. \
55
+ Each word in the Thai Romanization dataset is paired with \
56
+ its English phonetic representation, enabling accurate \
57
+ pronunciation guidance. This facilitates the learning and \
58
+ practice of Thai pronunciation for individuals who may not \
59
+ be familiar with the Thai script. The dataset aids in improving \
60
+ the accessibility and usability of Thai language resources, \
61
+ supporting applications such as speech recognition, text-to-speech \
62
+ synthesis, and machine translation. It enables the development of \
63
+ Thai language tools that can benefit Thai learners, tourists, \
64
+ and those interested in Thai culture and language.
65
+ """
66
+
67
+ _HOMEPAGE = "https://www.kaggle.com/datasets/wannaphong/thai-romanization/data"
68
+
69
+ _LANGUAGES = ["tha"]
70
+
71
+ _LICENSE = Licenses.CC_BY_SA_3_0.value
72
+
73
+ _LOCAL = False
74
+
75
+ _URLS = {_DATASETNAME: "https://raw.githubusercontent.com/wannaphong/thai-romanization/master/dataset/data.csv"}
76
+
77
+ _SUPPORTED_TASKS = [Tasks.TRANSLITERATION]
78
+
79
+ _SOURCE_VERSION = "1.0.0"
80
+
81
+ _SEACROWD_VERSION = "2024.06.20"
82
+
83
+
84
+ class ThaiRomanizationDataset(datasets.GeneratorBasedBuilder):
85
+ """
86
+ Thai Romanization dataloader from Kaggle (Phong et al., 2018)
87
+ """
88
+
89
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
90
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
91
+ SEACROWD_SCHEMA_NAME = "t2t"
92
+
93
+ BUILDER_CONFIGS = [
94
+ SEACrowdConfig(
95
+ name=f"{_DATASETNAME}_source",
96
+ version=SOURCE_VERSION,
97
+ description=f"{_DATASETNAME} source schema",
98
+ schema="source",
99
+ subset_id=f"{_DATASETNAME}",
100
+ ),
101
+ SEACrowdConfig(
102
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
103
+ version=SEACROWD_VERSION,
104
+ description=f"{_DATASETNAME} SEACrowd schema",
105
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
106
+ subset_id=f"{_DATASETNAME}",
107
+ ),
108
+ ]
109
+
110
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
111
+
112
+ def _info(self) -> datasets.DatasetInfo:
113
+
114
+ if self.config.schema == "source":
115
+ features = datasets.Features({"word": datasets.Value("string"), "romanization": datasets.Value("string")})
116
+
117
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
118
+ features = schemas.text2text_features
119
+
120
+ return datasets.DatasetInfo(
121
+ description=_DESCRIPTION,
122
+ features=features,
123
+ homepage=_HOMEPAGE,
124
+ license=_LICENSE,
125
+ citation=_CITATION,
126
+ )
127
+
128
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
129
+ """Returns SplitGenerators."""
130
+
131
+ urls = _URLS[_DATASETNAME]
132
+ data_dir = dl_manager.download_and_extract(urls)
133
+
134
+ return [
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.TRAIN,
137
+ gen_kwargs={
138
+ "filepath": os.path.join(data_dir),
139
+ "split": "train",
140
+ },
141
+ )
142
+ ]
143
+
144
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
145
+ """Yields examples as (key, example) tuples."""
146
+
147
+ df = pd.read_csv(filepath, delimiter=" ")
148
+ df.columns = ["word", "romanization"]
149
+
150
+ for index, row in df.iterrows():
151
+
152
+ if self.config.schema == "source":
153
+ example = row.to_dict()
154
+
155
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
156
+ example = {
157
+ "id": str(index),
158
+ "text_1": str(row["word"]),
159
+ "text_2": str(row["romanization"]),
160
+ "text_1_name": "word",
161
+ "text_2_name": "romanization",
162
+ }
163
+
164
+ yield index, example