Datasets:
Languages:
Sundanese
Tags:
speech-recognition
alvinxrwui
commited on
Commit
•
0e85cd2
1
Parent(s):
f780020
fix: change the download link
Browse files- su_id_asr_6.py +47 -52
su_id_asr_6.py
CHANGED
@@ -27,7 +27,8 @@ _CITATION = """\
|
|
27 |
"""
|
28 |
|
29 |
_DESCRIPTION = """\
|
30 |
-
|
|
|
31 |
"""
|
32 |
|
33 |
_HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
@@ -35,7 +36,9 @@ _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
|
35 |
_LICENSE = "Attribution-ShareAlike 4.0 International."
|
36 |
|
37 |
_URLs = {
|
38 |
-
"
|
|
|
|
|
39 |
}
|
40 |
|
41 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
@@ -43,9 +46,8 @@ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
|
43 |
_SOURCE_VERSION = "1.0.0"
|
44 |
_SEACROWD_VERSION = "2024.06.20"
|
45 |
|
46 |
-
GROUP_DATASET_ID = 6
|
47 |
|
48 |
-
class
|
49 |
"""su_id contains ~220K utterances for Sundanese ASR training data."""
|
50 |
|
51 |
BUILDER_CONFIGS = [
|
@@ -90,66 +92,59 @@ class SuIdASR6(datasets.GeneratorBasedBuilder):
|
|
90 |
task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
|
91 |
)
|
92 |
|
93 |
-
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
94 |
-
test_url = 'https://drive.google.com/file/d/1B3xhZHQ7d8Z0Baqfz_Abefo_i5Voizfi/view?usp=drive_link'
|
95 |
-
train_url = 'https://drive.google.com/file/d/1vW7gfRr36WL7ezXzICJOkt3rYHViSWHr/view?usp=drive_link'
|
96 |
-
val_url = 'https://drive.google.com/file/d/1wUWXmuAkMMZIlo5Is3M1Ndf6NcHa9jnf/view?usp=drive_link'
|
97 |
-
|
98 |
return [
|
99 |
datasets.SplitGenerator(
|
100 |
name=datasets.Split.TRAIN,
|
101 |
-
gen_kwargs={"
|
102 |
),
|
103 |
datasets.SplitGenerator(
|
104 |
name=datasets.Split.VALIDATION,
|
105 |
-
gen_kwargs={"
|
106 |
),
|
107 |
datasets.SplitGenerator(
|
108 |
name=datasets.Split.TEST,
|
109 |
-
gen_kwargs={"
|
110 |
-
)
|
111 |
]
|
112 |
|
113 |
-
def _generate_examples(self, filepath:
|
114 |
|
115 |
if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
|
116 |
-
|
117 |
-
# Iterate through the dictionary that contains split paths
|
118 |
-
for split, each_filepath in filepath.items():
|
119 |
-
|
120 |
-
tsv_file = os.path.join(each_filepath, "asr_sundanese", "utt_spk_text.tsv")
|
121 |
-
|
122 |
-
with open(tsv_file, "r") as file:
|
123 |
-
tsv_reader = csv.reader(file, delimiter="\t")
|
124 |
-
|
125 |
-
for line in tsv_reader:
|
126 |
-
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
|
127 |
-
|
128 |
-
wav_path = os.path.join(each_filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
|
129 |
-
|
130 |
-
if os.path.exists(wav_path):
|
131 |
-
if self.config.schema == "source":
|
132 |
-
ex = {
|
133 |
-
"id": audio_id,
|
134 |
-
"speaker_id": speaker_id,
|
135 |
-
"path": wav_path,
|
136 |
-
"audio": wav_path,
|
137 |
-
"text": transcription_text,
|
138 |
-
}
|
139 |
-
yield audio_id, ex
|
140 |
-
elif self.config.schema == "seacrowd_sptext":
|
141 |
-
ex = {
|
142 |
-
"id": audio_id,
|
143 |
-
"speaker_id": speaker_id,
|
144 |
-
"path": wav_path,
|
145 |
-
"audio": wav_path,
|
146 |
-
"text": transcription_text,
|
147 |
-
"metadata": {
|
148 |
-
"speaker_age": None,
|
149 |
-
"speaker_gender": None,
|
150 |
-
},
|
151 |
-
}
|
152 |
-
yield audio_id, ex
|
153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
else:
|
155 |
-
raise ValueError(f"Invalid config: {self.config.name}")
|
|
|
27 |
"""
|
28 |
|
29 |
_DESCRIPTION = """\
|
30 |
+
Sundanese ASR training data set containing ~15K utterances.
|
31 |
+
This dataset was collected by Google in Indonesia.
|
32 |
"""
|
33 |
|
34 |
_HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
|
|
36 |
_LICENSE = "Attribution-ShareAlike 4.0 International."
|
37 |
|
38 |
_URLs = {
|
39 |
+
"su_id_asr_6_train": "https://univindonesia-my.sharepoint.com/:u:/g/personal/alvin_xavier_office_ui_ac_id/EYBfoLbhgy9NidrtAA_Sg8YBNkNqdbbu9bKJRrQvWRrzKg?e=UqcdIq&download=1",
|
40 |
+
"su_id_asr_6_val": "https://univindonesia-my.sharepoint.com/:u:/g/personal/alvin_xavier_office_ui_ac_id/EdjN373g7hpGtucnj2m995cBZQ_DSJD8dBAbCW2ggrdtcQ?e=kSXB6H&download=1",
|
41 |
+
"su_id_asr_6_test": "https://univindonesia-my.sharepoint.com/:u:/g/personal/alvin_xavier_office_ui_ac_id/EUCwHQ2zGn5CpcO8bov4074BzsW4m_2Dk4MgU2Ks-TsmsQ?e=64IhTw&download=1",
|
42 |
}
|
43 |
|
44 |
_SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
|
|
46 |
_SOURCE_VERSION = "1.0.0"
|
47 |
_SEACROWD_VERSION = "2024.06.20"
|
48 |
|
|
|
49 |
|
50 |
+
class SuIdASR(datasets.GeneratorBasedBuilder):
|
51 |
"""su_id contains ~220K utterances for Sundanese ASR training data."""
|
52 |
|
53 |
BUILDER_CONFIGS = [
|
|
|
92 |
task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
|
93 |
)
|
94 |
|
95 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
|
|
|
|
|
|
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|
98 |
name=datasets.Split.TRAIN,
|
99 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_train"])},
|
100 |
),
|
101 |
datasets.SplitGenerator(
|
102 |
name=datasets.Split.VALIDATION,
|
103 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_val"])},
|
104 |
),
|
105 |
datasets.SplitGenerator(
|
106 |
name=datasets.Split.TEST,
|
107 |
+
gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_test"])},
|
108 |
+
)
|
109 |
]
|
110 |
|
111 |
+
def _generate_examples(self, filepath: str):
|
112 |
|
113 |
if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
+
tsv_file = os.path.join(filepath, "asr_sundanese", "utt_spk_text_6.tsv")
|
116 |
+
|
117 |
+
with open(tsv_file, "r") as file:
|
118 |
+
tsv_file = csv.reader(file, delimiter="\t")
|
119 |
+
|
120 |
+
for line in tsv_file:
|
121 |
+
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
|
122 |
+
|
123 |
+
wav_path = os.path.join(filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
|
124 |
+
|
125 |
+
if os.path.exists(wav_path):
|
126 |
+
if self.config.schema == "source":
|
127 |
+
ex = {
|
128 |
+
"id": audio_id,
|
129 |
+
"speaker_id": speaker_id,
|
130 |
+
"path": wav_path,
|
131 |
+
"audio": wav_path,
|
132 |
+
"text": transcription_text,
|
133 |
+
}
|
134 |
+
yield audio_id, ex
|
135 |
+
elif self.config.schema == "seacrowd_sptext":
|
136 |
+
ex = {
|
137 |
+
"id": audio_id,
|
138 |
+
"speaker_id": speaker_id,
|
139 |
+
"path": wav_path,
|
140 |
+
"audio": wav_path,
|
141 |
+
"text": transcription_text,
|
142 |
+
"metadata": {
|
143 |
+
"speaker_age": None,
|
144 |
+
"speaker_gender": None,
|
145 |
+
},
|
146 |
+
}
|
147 |
+
yield audio_id, ex
|
148 |
+
|
149 |
else:
|
150 |
+
raise ValueError(f"Invalid config: {self.config.name}")
|