SumitMdhr commited on
Commit
f6068d2
1 Parent(s): 0b3861b

Upload biulders.py

Browse files
Files changed (1) hide show
  1. biulders.py +124 -0
biulders.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Leading and Trailing Silences Removed Large Nepali ASR Dataset"""
15
+
16
+ import os
17
+ import csv
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ @inproceedings{kjartansson-etal-sltu2018,
24
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
25
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
26
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
27
+ year = {2018},
28
+ address = {Gurugram, India},
29
+ month = aug,
30
+ pages = {52--55},
31
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file.
37
+ The data set has been manually quality checked, but there might still be errors.
38
+ The audio files are sampled at rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection.
39
+ """
40
+
41
+ # Official homepage for the dataset
42
+ _HOMEPAGE = "https://www.openslr.org/54/"
43
+
44
+ # The licence for the dataset
45
+ _LICENSE = "license:cc-by-sa-4.0"
46
+
47
+ # TODO: Add link to the official dataset URLs here
48
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
+
51
+ _URL = "https://huggingface.co/datasets/SumitMdhr/SANT-ASR/resolve/main/"
52
+ _URLS = {
53
+ "zipfile": _URL + "CLEAN_DATA.zip",
54
+ "index_file": _URL + "metedata1.csv",
55
+ }
56
+
57
+
58
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
59
+ class OpenslrNepaliAsrCleaned(datasets.GeneratorBasedBuilder):
60
+ """End Silences Removed Large Nepali ASR Dataset"""
61
+
62
+ VERSION = datasets.Version("1.0.0")
63
+ # It's not mandatory to have a default configuration. Just use one if it make sense.
64
+ DEFAULT_CONFIG_NAME = "original"
65
+
66
+ def _info(self):
67
+ features = datasets.Features(
68
+ {
69
+ "utterance_id": datasets.Value("string"),
70
+ "speaker_id": datasets.Value("string"),
71
+ "utterance": datasets.Audio(),
72
+ "transcription": datasets.Value("string"),
73
+ "num_frames": datasets.Value("int32"),
74
+ }
75
+ )
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ # Here we define them above because they are different between the two configurations
79
+ features=features,
80
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
81
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
82
+ # supervised_keys=("sentence", "label"),
83
+ # Homepage of the dataset for documentation
84
+ homepage=_HOMEPAGE,
85
+ # License for the dataset if available
86
+ license=_LICENSE,
87
+ # Citation for the dataset
88
+ citation=_CITATION,
89
+ task_templates=[
90
+ datasets.tasks.AutomaticSpeechRecognition(
91
+ audio_column="utterance", transcription_column="transcription"
92
+ )
93
+ ],
94
+ )
95
+
96
+ def _split_generators(self, dl_manager):
97
+ index_file = dl_manager.download(_URLS["index_file"])
98
+ zip_paths = dl_manager.download(_URLS["zipfiles"])
99
+ audio_paths = dl_manager.extract(zip_paths)
100
+ for path in zip_paths:
101
+ if os.path.exists(path):
102
+ os.remove(path)
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={
107
+ "index_file": index_file,
108
+ "audio_paths": audio_paths,
109
+ },
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, index_file, audio_paths):
114
+ with open(index_file, encoding="utf-8") as f:
115
+ reader = csv.DictReader(f, delimiter="\t")
116
+ for key, row in enumerate(reader):
117
+ path = os.path.join(audio_paths, "CLEAN_DATA", row["utterance_id"])
118
+ yield key, {
119
+ "utterance_id": row["utterance_id"],
120
+ "speaker_id": row["speaker_id"],
121
+ "utterance": path,
122
+ "transcription": row["transcription"],
123
+ "num_frames": int(row["num_frames"]),
124
+ }