carlosdanielhernandezmena commited on
Commit
54173f8
1 Parent(s): a772002

Adding audio files to the repo

Browse files
corpus/files/metadata_dev.tsv ADDED
The diff for this file is too large to render. See raw diff
 
corpus/files/metadata_test.tsv ADDED
The diff for this file is too large to render. See raw diff
 
corpus/files/metadata_train.tsv ADDED
The diff for this file is too large to render. See raw diff
 
corpus/files/tars_dev.paths ADDED
@@ -0,0 +1 @@
 
 
1
+ corpus/speech/dev.tar.gz
corpus/files/tars_test.paths ADDED
@@ -0,0 +1 @@
 
 
1
+ corpus/speech/test.tar.gz
corpus/files/tars_train.paths ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ corpus/speech/train/train_part_01.tar.gz
2
+ corpus/speech/train/train_part_02.tar.gz
corpus/speech/dev.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e8e2e09d1cdf4805569b959c1365e5096d554d18835c1e9d00dc60a2b68fa5b
3
+ size 745026377
corpus/speech/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:453dd9c5eff11a19b42bdb730b06482b08a5d4c32a58ebc46aba376e91843d11
3
+ size 779045243
corpus/speech/train/train_part_01.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a1442ddb2ed80ce58e706b08a2d317e8ef6c00cbcab47ac3ee6cc12a9361869
3
+ size 2346032212
corpus/speech/train/train_part_02.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06e3a5ca276dbecf4fc983368ea14f2e2eca1902f2e51854a4dbdd2b3d16bf37
3
+ size 3157553445
samromur_asr.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+
6
+ import datasets
7
+
8
+ _NAME="samromur_asr"
9
+ _VERSION="1.0.0"
10
+ _AUDIO_EXTENSIONS=".flac"
11
+
12
+ _DESCRIPTION = """
13
+ Samrómur Icelandic Speech 1.0.
14
+ """
15
+
16
+ _CITATION = """
17
+ @misc{mollbergsamromur2022,
18
+ title={Samrómur Icelandic Speech 1.0},
19
+ ldc_catalog_no={LDC2022S05},
20
+ DOI={https://doi.org/10.35111/thx3-f170},
21
+ author={Mollberg, David Erik and Jónsson, Ólafur Helgi and Þorsteinsdóttir, Sunneva and Guðmundsdóttir, Jóhanna Vigdís and Steingrimsson, Steinthor and Magnusdottir, Eydis Huld and Fong, Judy Y. and Borsky, Michal and Guðnason, Jón},
22
+ publisher={Reykjavík University}
23
+ journal={Linguistic Data Consortium, Philadelphia},
24
+ year={2022},
25
+ url={https://catalog.ldc.upenn.edu/LDC2022S05},
26
+ }
27
+ """
28
+
29
+ _HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC2022S05/"
30
+
31
+ _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/"
32
+
33
+ _BASE_DATA_DIR = "corpus/"
34
+ _METADATA_TRAIN = os.path.join(_BASE_DATA_DIR,"files","metadata_train.tsv")
35
+ _METADATA_TEST = os.path.join(_BASE_DATA_DIR,"files", "metadata_test.tsv")
36
+ _METADATA_DEV = os.path.join(_BASE_DATA_DIR,"files", "metadata_dev.tsv")
37
+
38
+ _TARS_TRAIN = os.path.join(_BASE_DATA_DIR,"files","tars_train.paths")
39
+ _TARS_TEST = os.path.join(_BASE_DATA_DIR,"files", "tars_test.paths")
40
+ _TARS_DEV = os.path.join(_BASE_DATA_DIR,"files", "tars_dev.paths")
41
+
42
+ class SamromurAsrConfig(datasets.BuilderConfig):
43
+ """BuilderConfig for The Samrómur Corpus"""
44
+
45
+ def __init__(self, name, **kwargs):
46
+ name=_NAME
47
+ super().__init__(name=name, **kwargs)
48
+
49
+ class SamromurAsr(datasets.GeneratorBasedBuilder):
50
+ """Samrómur Icelandic Speech 1.0"""
51
+
52
+ VERSION = datasets.Version(_VERSION)
53
+ BUILDER_CONFIGS = [
54
+ SamromurAsrConfig(
55
+ name=_NAME,
56
+ version=datasets.Version(_VERSION),
57
+ )
58
+ ]
59
+
60
+ def _info(self):
61
+ features = datasets.Features(
62
+ {
63
+ "audio_id": datasets.Value("string"),
64
+ "audio": datasets.Audio(sampling_rate=16000),
65
+ "speaker_id": datasets.Value("string"),
66
+ "gender": datasets.Value("string"),
67
+ "age": datasets.Value("string"),
68
+ "duration": datasets.Value("float32"),
69
+ "normalized_text": datasets.Value("string"),
70
+ }
71
+ )
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=features,
75
+ homepage=_HOMEPAGE,
76
+ license=_LICENSE,
77
+ citation=_CITATION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+
82
+ metadata_train=dl_manager.download_and_extract(_METADATA_TRAIN)
83
+ metadata_test=dl_manager.download_and_extract(_METADATA_TEST)
84
+ metadata_dev=dl_manager.download_and_extract(_METADATA_DEV)
85
+
86
+ tars_train=dl_manager.download_and_extract(_TARS_TRAIN)
87
+ tars_test=dl_manager.download_and_extract(_TARS_TEST)
88
+ tars_dev=dl_manager.download_and_extract(_TARS_DEV)
89
+
90
+ hash_tar_files=defaultdict(dict)
91
+ with open(tars_train,'r') as f:
92
+ hash_tar_files['train']=[path.replace('\n','') for path in f]
93
+
94
+ with open(tars_test,'r') as f:
95
+ hash_tar_files['test']=[path.replace('\n','') for path in f]
96
+
97
+ with open(tars_dev,'r') as f:
98
+ hash_tar_files['dev']=[path.replace('\n','') for path in f]
99
+
100
+ hash_meta_paths={"train":metadata_train,"test":metadata_test,"dev":metadata_dev}
101
+ audio_paths = dl_manager.download(hash_tar_files)
102
+
103
+ splits=["train","dev","test"]
104
+ local_extracted_audio_paths = (
105
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
106
+ {
107
+ split:[None] * len(audio_paths[split]) for split in splits
108
+ }
109
+ )
110
+
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TRAIN,
114
+ gen_kwargs={
115
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
116
+ "local_extracted_archives_paths": local_extracted_audio_paths["train"],
117
+ "metadata_paths": hash_meta_paths["train"],
118
+ }
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.VALIDATION,
122
+ gen_kwargs={
123
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["dev"]],
124
+ "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
125
+ "metadata_paths": hash_meta_paths["dev"],
126
+ }
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TEST,
130
+ gen_kwargs={
131
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
132
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
133
+ "metadata_paths": hash_meta_paths["test"],
134
+ }
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
139
+
140
+ features = ["speaker_id","gender","age","duration","normalized_text"]
141
+
142
+ with open(metadata_paths) as f:
143
+ metadata = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
144
+
145
+ for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
146
+ for audio_filename, audio_file in audio_archive:
147
+ audio_id = audio_filename.split(os.sep)[-1].split(_AUDIO_EXTENSIONS)[0]
148
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
149
+
150
+ yield audio_id, {
151
+ "audio_id": audio_id,
152
+ **{feature: metadata[audio_id][feature] for feature in features},
153
+ "audio": {"path": path, "bytes": audio_file.read()},
154
+ }