holylovenia commited on
Commit
b9808d0
1 Parent(s): 9576e07

Upload struct_amb_ind.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. struct_amb_ind.py +174 -0
struct_amb_ind.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from itertools import chain
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """\
13
+ @inproceedings{widiaputri-etal-5641,
14
+ author = {Widiaputri, Ruhiyah Faradishi and Purwarianti, Ayu and Lestari, Dessi Puji and Azizah, Kurniawati and Tanaya, Dipta and Sakti, Sakriani},
15
+ title = {Speech Recognition and Meaning Interpretation: Towards Disambiguation of Structurally Ambiguous Spoken Utterances in Indonesian},
16
+ booktitle = {Proceedings of the EMNLP 2023},
17
+ year = {2023}
18
+ }
19
+ """
20
+
21
+ _DATASETNAME = "struct_amb_ind"
22
+
23
+ _DESCRIPTION = """
24
+ This dataset contains the first Indonesian speech dataset for structurally ambiguous utterances and each of transcription and two disambiguation texts.
25
+ The structurally ambiguous sentences were adapted from Types 4,5,6, and 10 of Types Of Syntactic Ambiguity in English by [Taha et al., 1983].
26
+ For each chosen type, 100 structurally ambiguous sentences in Indonesian were made by crowdsourcing.
27
+ Each Indonesian ambiguous sentence has two possible interpretations, resulting in two disambiguation text outputs for each ambiguous sentence.
28
+ Each disambiguation text is made up of two sentences. All of the sentences have been checked by linguists.
29
+ """
30
+
31
+ _HOMEPAGE = "https://github.com/ha3ci-lab/struct_amb_ind"
32
+
33
+ _LICENSE = Licenses.UNKNOWN.value
34
+
35
+ _LOCAL = True # get the audio data externally from https://drive.google.com/drive/folders/1QeaptstBgwGYO6THGkZHHViExrogCMUj
36
+ _LANGUAGES = ["ind"]
37
+
38
+ _URL_TEMPLATES = {
39
+ "keys": "https://raw.githubusercontent.com/ha3ci-lab/struct_amb_ind/main/keys/train_dev_test_spk_keys/",
40
+ "text": "https://raw.githubusercontent.com/ha3ci-lab/struct_amb_ind/main/text/",
41
+ }
42
+
43
+ _URLS = {
44
+ "split_train": _URL_TEMPLATES["keys"] + "train_spk",
45
+ "split_dev": _URL_TEMPLATES["keys"] + "dev_spk",
46
+ "split_test": _URL_TEMPLATES["keys"] + "test_spk",
47
+ "text_transcript": _URL_TEMPLATES["text"] + "ID_amb_disam_transcript.txt",
48
+ }
49
+
50
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
51
+
52
+ _SOURCE_VERSION = "1.0.0"
53
+
54
+ _SEACROWD_VERSION = "2024.06.20"
55
+
56
+
57
+ class StructAmbInd(datasets.GeneratorBasedBuilder):
58
+ """
59
+ This dataset contains the first Indonesian speech dataset for structurally ambiguous utterances and each of transcription and two disambiguation texts.
60
+ This dataloader does NOT contain the additional training data for as mentioned in the _HOMEPAGE, as it is already implemented in the dataloader "indspeech_news_lvcsr".
61
+ """
62
+
63
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
64
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
65
+
66
+ BUILDER_CONFIGS = [
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_source",
69
+ version=SOURCE_VERSION,
70
+ description=f"{_DATASETNAME} source schema",
71
+ schema="source",
72
+ subset_id=f"{_DATASETNAME}",
73
+ ),
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_seacrowd_sptext",
76
+ version=SEACROWD_VERSION,
77
+ description=f"{_DATASETNAME} SEACrowd schema",
78
+ schema="seacrowd_sptext",
79
+ subset_id=f"{_DATASETNAME}",
80
+ ),
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
84
+
85
+ def _info(self) -> datasets.DatasetInfo:
86
+ if self.config.schema == "source":
87
+ features = datasets.Features(
88
+ {
89
+ "id": datasets.Value("string"),
90
+ "speaker_id": datasets.Value("string"),
91
+ "path": datasets.Value("string"),
92
+ "audio": datasets.Audio(sampling_rate=16_000),
93
+ "amb_transcript": datasets.Value("string"),
94
+ "disam_text": datasets.Value("string"),
95
+ }
96
+ )
97
+
98
+ elif self.config.schema == "seacrowd_sptext":
99
+ features = schemas.speech_text_features
100
+
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=features,
104
+ homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
110
+ # The data_dir configuration is required ONLY for the audio_urls.
111
+ if self.config.data_dir is None:
112
+ raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
113
+ else:
114
+ data_dir = self.config.data_dir
115
+
116
+ # load the local audio folders
117
+ audio_urls = [data_dir + "/" + f"{gender}{_id:02}.zip" for gender in ["F", "M"] for _id in range(1, 12, 1)]
118
+ audio_files_dir = [Path(dl_manager.extract(audio_url)) / audio_url.split("/")[-1][:-4] for audio_url in audio_urls]
119
+ # load the speaker splits and transcript
120
+ split_train = Path(dl_manager.download(_URLS["split_train"]))
121
+ split_dev = Path(dl_manager.download(_URLS["split_dev"]))
122
+ split_test = Path(dl_manager.download(_URLS["split_test"]))
123
+ text_transcript = Path(dl_manager.download(_URLS["text_transcript"]))
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={"split": split_train, "transcript": text_transcript, "audio_files_dir": audio_files_dir},
129
+ ),
130
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split": split_dev, "transcript": text_transcript, "audio_files_dir": audio_files_dir}),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TEST,
133
+ gen_kwargs={"split": split_test, "transcript": text_transcript, "audio_files_dir": audio_files_dir},
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, split: Path, transcript: Path, audio_files_dir: List[Path]) -> Tuple[int, Dict]:
138
+ speaker_ids = open(split, "r").readlines()
139
+ speaker_ids = [id.replace("\n", "") for id in speaker_ids]
140
+ speech_folders = [audio_folder for audio_folder in audio_files_dir if audio_folder.name.split("/")[-1] in speaker_ids]
141
+ speech_files = list(chain(*[list(map((str(speech_folder) + "/").__add__, os.listdir(speech_folder))) for speech_folder in speech_folders]))
142
+
143
+ transcript = open(transcript, "r").readlines()
144
+ transcript = [sent.replace("\n", "").split("|") for sent in transcript]
145
+ transcript_dict = {sent[0]: {"amb_transcript": sent[1], "disam_text": sent[2]} for sent in transcript}
146
+
147
+ for key, aud_file in enumerate(speech_files):
148
+ aud_id = aud_file.split("/")[-1][:-4]
149
+ aud_info = aud_id.split("_")
150
+ if self.config.schema == "source":
151
+ row = {
152
+ "id": aud_id,
153
+ "speaker_id": aud_info[1],
154
+ "path": aud_file,
155
+ "audio": aud_file,
156
+ "amb_transcript": transcript_dict[aud_id]["amb_transcript"],
157
+ "disam_text": transcript_dict[aud_id]["disam_text"],
158
+ }
159
+ yield key, row
160
+ elif self.config.schema == "seacrowd_sptext":
161
+ row = {
162
+ "id": aud_id,
163
+ "path": aud_file,
164
+ "audio": aud_file,
165
+ "text": transcript_dict[aud_id]["amb_transcript"],
166
+ "speaker_id": aud_info[1],
167
+ "metadata": {
168
+ "speaker_age": None,
169
+ "speaker_gender": aud_info[1][0],
170
+ },
171
+ }
172
+ yield key, row
173
+ else:
174
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")