Datasets:
Languages:
Sundanese
Tags:
speech-recognition
alvinxrwui
commited on
Commit
•
93847ff
1
Parent(s):
d2f6576
feat: split train, val, test
Browse files- su_id_asr.py +18 -12
su_id_asr.py
CHANGED
@@ -27,8 +27,7 @@ _CITATION = """\
|
|
27 |
"""
|
28 |
|
29 |
_DESCRIPTION = """\
|
30 |
-
|
31 |
-
This dataset was collected by Google in Indonesia.
|
32 |
"""
|
33 |
|
34 |
_HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
@@ -44,6 +43,7 @@ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
|
|
44 |
_SOURCE_VERSION = "1.0.0"
|
45 |
_SEACROWD_VERSION = "2024.06.20"
|
46 |
|
|
|
47 |
|
48 |
class SuIdASR(datasets.GeneratorBasedBuilder):
|
49 |
"""su_id contains ~220K utterances for Sundanese ASR training data."""
|
@@ -92,29 +92,35 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
|
|
92 |
|
93 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
94 |
base_path = {}
|
95 |
-
|
96 |
-
base_path[id] = dl_manager.download_and_extract(_URLs["su_id_asr"].format(str(id)))
|
97 |
-
for id in ["a", "b", "c", "d", "e", "f"]:
|
98 |
-
base_path[id] = dl_manager.download_and_extract(_URLs["su_id_asr"].format(str(id)))
|
99 |
return [
|
100 |
datasets.SplitGenerator(
|
101 |
name=datasets.Split.TRAIN,
|
102 |
-
gen_kwargs={"filepath": base_path},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
),
|
104 |
]
|
105 |
|
106 |
def _generate_examples(self, filepath: Dict):
|
107 |
|
108 |
if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
112 |
tsv_file = os.path.join(each_filepath, "asr_sundanese", "utt_spk_text.tsv")
|
113 |
|
114 |
with open(tsv_file, "r") as file:
|
115 |
-
|
116 |
|
117 |
-
for line in
|
118 |
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
|
119 |
|
120 |
wav_path = os.path.join(each_filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
|
|
|
27 |
"""
|
28 |
|
29 |
_DESCRIPTION = """\
|
30 |
+
Test
|
|
|
31 |
"""
|
32 |
|
33 |
_HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
|
|
|
43 |
_SOURCE_VERSION = "1.0.0"
|
44 |
_SEACROWD_VERSION = "2024.06.20"
|
45 |
|
46 |
+
GROUP_DATASET_ID = 6
|
47 |
|
48 |
class SuIdASR(datasets.GeneratorBasedBuilder):
|
49 |
"""su_id contains ~220K utterances for Sundanese ASR training data."""
|
|
|
92 |
|
93 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
94 |
base_path = {}
|
95 |
+
base_path[id] = dl_manager.download_and_extract(_URLs["su_id_asr"].format(str(GROUP_DATASET_ID)))
|
|
|
|
|
|
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|
98 |
name=datasets.Split.TRAIN,
|
99 |
+
gen_kwargs={"filepath": f"{base_path}/train"},
|
100 |
+
),
|
101 |
+
datasets.SplitGenerator(
|
102 |
+
name=datasets.Split.VALIDATION,
|
103 |
+
gen_kwargs={"filepath": f"{base_path}/validation"},
|
104 |
+
),
|
105 |
+
datasets.SplitGenerator(
|
106 |
+
name=datasets.Split.TEST,
|
107 |
+
gen_kwargs={"filepath": f"{base_path}/test"},
|
108 |
),
|
109 |
]
|
110 |
|
111 |
def _generate_examples(self, filepath: Dict):
|
112 |
|
113 |
if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
|
114 |
+
|
115 |
+
# Iterate through the dictionary that contains split paths
|
116 |
+
for split, each_filepath in filepath.items():
|
117 |
+
|
118 |
tsv_file = os.path.join(each_filepath, "asr_sundanese", "utt_spk_text.tsv")
|
119 |
|
120 |
with open(tsv_file, "r") as file:
|
121 |
+
tsv_reader = csv.reader(file, delimiter="\t")
|
122 |
|
123 |
+
for line in tsv_reader:
|
124 |
audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
|
125 |
|
126 |
wav_path = os.path.join(each_filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
|