jineeuslab commited on
Commit
81c80df
·
verified ·
1 Parent(s): a46f84e

Update asr_sundanese_2_hub.py

Browse files
Files changed (1) hide show
  1. asr_sundanese_2_hub.py +49 -49
asr_sundanese_2_hub.py CHANGED
@@ -15,43 +15,40 @@
15
 
16
  import csv
17
  import os
18
- from pathlib import Path
19
- from typing import List
20
 
21
  import datasets
22
 
23
  from seacrowd.utils import schemas
24
  from seacrowd.utils.configs import SEACrowdConfig
25
- from seacrowd.utils.constants import Tasks
 
26
 
 
 
 
 
 
 
27
  _CITATION = """\
28
- @inproceedings{kjartansson-etal-sltu2018,
29
- title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
30
- author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
31
- booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
32
- year = {2018},
33
- address = {Gurugram, India},
34
- month = aug,
35
- pages = {52--55},
36
- URL = {http://dx.doi.org/10.21437/SLTU.2018-11},
37
- }
38
  """
39
 
40
- _DATASETNAME = "su_id_asr"
41
-
42
  _DESCRIPTION = """\
43
- This data set contains transcribed audio data for Sundanese, specifically asr_sundanese_2 dataset from OpenSLR.
44
- The data set consists of wave files, and a TSV file.
45
- The file utt_spk_text.tsv contains a FileID, UserID and the transcription of audio in the file.
46
- The data set has been manually quality checked, but there might still be errors.
47
- This dataset was collected by Google in collaboration with Reykjavik University and Universitas Gadjah Mada in Indonesia.
48
  """
49
 
50
- _HOMEPAGE = "http://openslr.org/36/"
51
- _LANGUAGES = ["sun"]
52
- _LOCAL = False
53
 
54
- _LICENSE = "Attribution-ShareAlike 4.0 International"
55
 
56
  _URLs = {
57
  "su_id_asr_train": "https://drive.google.com/uc?export=download&id=10YBMnKSfZQKCuYGXAsTeTfUM5t3rGLs-",
@@ -59,31 +56,26 @@ _URLs = {
59
  "su_id_asr_test": "https://drive.google.com/uc?export=download&id=1P6mtQJoZ2QV7AC9zbR2nDbW6s6YrJ_XU",
60
  }
61
 
62
- _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
63
 
64
  _SOURCE_VERSION = "1.0.0"
65
-
66
  _SEACROWD_VERSION = "2024.06.20"
67
 
68
-
69
  class SuIdASR(datasets.GeneratorBasedBuilder):
70
- """Sundanese ASR training data set."""
71
-
72
- SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
- SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
74
 
75
  BUILDER_CONFIGS = [
76
  SEACrowdConfig(
77
  name="su_id_asr_source",
78
- version=SOURCE_VERSION,
79
- description="su_id_asr source schema",
80
  schema="source",
81
  subset_id="su_id_asr",
82
  ),
83
  SEACrowdConfig(
84
  name="su_id_asr_seacrowd_sptext",
85
- version=SEACROWD_VERSION,
86
- description="su_id_asr Nusantara schema",
87
  schema="seacrowd_sptext",
88
  subset_id="su_id_asr",
89
  ),
@@ -91,7 +83,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
91
 
92
  DEFAULT_CONFIG_NAME = "su_id_asr_source"
93
 
94
- def _info(self) -> datasets.DatasetInfo:
95
  if self.config.schema == "source":
96
  features = datasets.Features(
97
  {
@@ -111,6 +103,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
111
  homepage=_HOMEPAGE,
112
  license=_LICENSE,
113
  citation=_CITATION,
 
114
  )
115
 
116
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
@@ -121,7 +114,7 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
121
  ),
122
  datasets.SplitGenerator(
123
  name=datasets.Split.VALIDATION,
124
- gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_val"])},
125
  ),
126
  datasets.SplitGenerator(
127
  name=datasets.Split.TEST,
@@ -129,36 +122,43 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
129
  )
130
  ]
131
 
132
- def _generate_examples(self, filepath: Path):
133
- for key, fp in filepath.items():
134
- tsv_file = os.path.join(fp, "utt_spk_text.tsv")
135
- with open(tsv_file, "r") as f:
136
- tsv_file = csv.reader(f, delimiter="\t")
 
 
 
 
137
  for line in tsv_file:
138
- audio_id, sp_id, text = line[0], line[1], line[2]
139
- wav_path = os.path.join(fp, "data", "{}.flac".format(audio_id))
 
140
 
141
  if os.path.exists(wav_path):
142
  if self.config.schema == "source":
143
  ex = {
144
  "id": audio_id,
145
- "speaker_id": sp_id,
146
  "path": wav_path,
147
  "audio": wav_path,
148
- "text": text,
149
  }
150
  yield audio_id, ex
151
  elif self.config.schema == "seacrowd_sptext":
152
  ex = {
153
  "id": audio_id,
154
- "speaker_id": sp_id,
155
  "path": wav_path,
156
  "audio": wav_path,
157
- "text": text,
158
  "metadata": {
159
  "speaker_age": None,
160
  "speaker_gender": None,
161
  },
162
  }
163
  yield audio_id, ex
164
- f.close()
 
 
 
15
 
16
  import csv
17
  import os
18
+ from typing import Dict, List
 
19
 
20
  import datasets
21
 
22
  from seacrowd.utils import schemas
23
  from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
25
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
26
 
27
+ _DATASETNAME = "su_id_asr"
28
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
29
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
30
+
31
+ _LANGUAGES = ["sun"]
32
+ _LOCAL = False
33
  _CITATION = """\
34
+ @inproceedings{sodimana18_sltu,
35
+ author={Keshan Sodimana and Pasindu {De Silva} and Supheakmungkol Sarin and Oddur Kjartansson and Martin Jansche and Knot Pipatsrisawat and Linne Ha},
36
+ title={{A Step-by-Step Process for Building TTS Voices Using Open Source Data and Frameworks for Bangla, Javanese, Khmer, Nepali, Sinhala, and Sundanese}},
37
+ year=2018,
38
+ booktitle={Proc. 6th Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU 2018)},
39
+ pages={66--70},
40
+ doi={10.21437/SLTU.2018-14}
41
+ }
 
 
42
  """
43
 
 
 
44
  _DESCRIPTION = """\
45
+ Sundanese ASR training data set containing ~220K utterances.
46
+ This dataset was collected by Google in Indonesia.
 
 
 
47
  """
48
 
49
+ _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
 
 
50
 
51
+ _LICENSE = "Attribution-ShareAlike 4.0 International."
52
 
53
  _URLs = {
54
  "su_id_asr_train": "https://drive.google.com/uc?export=download&id=10YBMnKSfZQKCuYGXAsTeTfUM5t3rGLs-",
 
56
  "su_id_asr_test": "https://drive.google.com/uc?export=download&id=1P6mtQJoZ2QV7AC9zbR2nDbW6s6YrJ_XU",
57
  }
58
 
59
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
60
 
61
  _SOURCE_VERSION = "1.0.0"
 
62
  _SEACROWD_VERSION = "2024.06.20"
63
 
 
64
  class SuIdASR(datasets.GeneratorBasedBuilder):
65
+ """su_id contains ~220K utterances for Sundanese ASR training data."""
 
 
 
66
 
67
  BUILDER_CONFIGS = [
68
  SEACrowdConfig(
69
  name="su_id_asr_source",
70
+ version=datasets.Version(_SOURCE_VERSION),
71
+ description="SU_ID_ASR source schema",
72
  schema="source",
73
  subset_id="su_id_asr",
74
  ),
75
  SEACrowdConfig(
76
  name="su_id_asr_seacrowd_sptext",
77
+ version=datasets.Version(_SEACROWD_VERSION),
78
+ description="SU_ID_ASR Nusantara schema",
79
  schema="seacrowd_sptext",
80
  subset_id="su_id_asr",
81
  ),
 
83
 
84
  DEFAULT_CONFIG_NAME = "su_id_asr_source"
85
 
86
+ def _info(self):
87
  if self.config.schema == "source":
88
  features = datasets.Features(
89
  {
 
103
  homepage=_HOMEPAGE,
104
  license=_LICENSE,
105
  citation=_CITATION,
106
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
107
  )
108
 
109
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
 
114
  ),
115
  datasets.SplitGenerator(
116
  name=datasets.Split.VALIDATION,
117
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_dev"])},
118
  ),
119
  datasets.SplitGenerator(
120
  name=datasets.Split.TEST,
 
122
  )
123
  ]
124
 
125
+ def _generate_examples(self, filepath: str):
126
+
127
+ if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
128
+
129
+ tsv_file = os.path.join(filepath, "asr_sundanese", "utt_spk_text.tsv")
130
+
131
+ with open(tsv_file, "r") as file:
132
+ tsv_file = csv.reader(file, delimiter="\t")
133
+
134
  for line in tsv_file:
135
+ audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
136
+
137
+ wav_path = os.path.join(filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
138
 
139
  if os.path.exists(wav_path):
140
  if self.config.schema == "source":
141
  ex = {
142
  "id": audio_id,
143
+ "speaker_id": speaker_id,
144
  "path": wav_path,
145
  "audio": wav_path,
146
+ "text": transcription_text,
147
  }
148
  yield audio_id, ex
149
  elif self.config.schema == "seacrowd_sptext":
150
  ex = {
151
  "id": audio_id,
152
+ "speaker_id": speaker_id,
153
  "path": wav_path,
154
  "audio": wav_path,
155
+ "text": transcription_text,
156
  "metadata": {
157
  "speaker_age": None,
158
  "speaker_gender": None,
159
  },
160
  }
161
  yield audio_id, ex
162
+
163
+ else:
164
+ raise ValueError(f"Invalid config: {self.config.name}")