Elyordev commited on
Commit
ef5ba35
·
verified ·
1 Parent(s): 29d69d4

Delete dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +0 -191
dataset.py DELETED
@@ -1,191 +0,0 @@
1
- # coding=utf-8
2
- """
3
- new_dataset_stt_audio dataset.
4
-
5
- This dataset consists of audio files stored in tar archives and transcript files in TSV format.
6
- The dataset structure is as follows:
7
-
8
- new_dataset_stt_audio/
9
- ├── audio/
10
- │ └── uz/
11
- │ ├── train/
12
- │ │ └── train.tar
13
- │ ├── validation/
14
- │ │ └── validation.tar
15
- │ └── test/
16
- │ └── test.tar
17
- └── transcript/
18
- └── uz/
19
- ├── train/
20
- │ └── train.tsv
21
- ├── validation/
22
- │ └── validation.tsv
23
- └── test/
24
- └── test.tsv
25
-
26
- Each transcript TSV file has columns:
27
- id, path, sentence, duration, age, gender, accents, locale.
28
-
29
- The audio field is loaded using a tar URI, allowing streaming from the tar archive.
30
- """
31
-
32
- import csv
33
- import os
34
- import tarfile
35
- from typing import Iterator, Tuple
36
-
37
- import datasets
38
-
39
- _CITATION = """\
40
- @misc{yourcitation2023,
41
- title={Your Dataset Title},
42
- author={Your Name},
43
- year={2023},
44
- url={https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio}
45
- }
46
- """
47
-
48
- _DESCRIPTION = """\
49
- This dataset consists of audio files and corresponding transcripts for speech-to-text tasks.
50
- The audio files are stored in tar archives under the audio/uz folder for each split (train, validation, test),
51
- and the transcripts are stored as TSV files under transcript/uz for each split.
52
- The transcript TSV files have the following columns:
53
- id, path, sentence, duration, age, gender, accents, locale.
54
- The audio is loaded using a tar URI to enable streaming.
55
- """
56
-
57
- _HOMEPAGE = "https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio"
58
- _LICENSE = "MIT"
59
-
60
- class NewDatasetSTTAudioConfig(datasets.BuilderConfig):
61
- """Builder config for new_dataset_stt_audio."""
62
- def __init__(self, language="uz", **kwargs):
63
- super(NewDatasetSTTAudioConfig, self).__init__(**kwargs)
64
- self.language = language
65
-
66
- class NewDatasetSTTAudio(datasets.GeneratorBasedBuilder):
67
- """New Dataset STT Audio builder."""
68
- VERSION = datasets.Version("1.0.0")
69
- BUILDER_CONFIGS = [
70
- NewDatasetSTTAudioConfig(
71
- name="default",
72
- version=VERSION,
73
- description="STT dataset with audio tar archives and transcript TSV files for Uzbek language",
74
- language="uz",
75
- ),
76
- ]
77
-
78
- def _info(self):
79
- features = datasets.Features({
80
- "id": datasets.Value("string"),
81
- "path": datasets.Value("string"),
82
- "sentence": datasets.Value("string"),
83
- "duration": datasets.Value("float"),
84
- "age": datasets.Value("string"),
85
- "gender": datasets.Value("string"),
86
- "accents": datasets.Value("string"),
87
- "locale": datasets.Value("string"),
88
- "audio": datasets.Audio(sampling_rate=16000),
89
- })
90
- return datasets.DatasetInfo(
91
- description=_DESCRIPTION,
92
- features=features,
93
- supervised_keys=None,
94
- homepage=_HOMEPAGE,
95
- license=_LICENSE,
96
- citation=_CITATION,
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- """
101
- Returns SplitGenerators.
102
- Expects the dataset to be provided manually via the repository.
103
- The manual_dir should contain the following structure:
104
-
105
- new_dataset_stt_audio/
106
- audio/uz/{train, validation, test}/*.tar
107
- transcript/uz/{train, validation, test}/*.tsv
108
- """
109
- manual_dir = dl_manager.manual_dir if dl_manager.manual_dir is not None else ""
110
- language = self.config.language
111
-
112
- splits = {
113
- "train": {
114
- "transcript": os.path.join(manual_dir, "transcript", language, "train", "train.tsv"),
115
- "audio": os.path.join(manual_dir, "audio", language, "train", "train.tar")
116
- },
117
- "validation": {
118
- "transcript": os.path.join(manual_dir, "transcript", language, "validation", "validation.tsv"),
119
- "audio": os.path.join(manual_dir, "audio", language, "validation", "validation.tar")
120
- },
121
- "test": {
122
- "transcript": os.path.join(manual_dir, "transcript", language, "test", "test.tsv"),
123
- "audio": os.path.join(manual_dir, "audio", language, "test", "test.tar")
124
- }
125
- }
126
-
127
- return [
128
- datasets.SplitGenerator(
129
- name=datasets.Split.TRAIN,
130
- gen_kwargs={
131
- "transcript_path": splits["train"]["transcript"],
132
- "audio_tar_path": splits["train"]["audio"],
133
- },
134
- ),
135
- datasets.SplitGenerator(
136
- name=datasets.Split.VALIDATION,
137
- gen_kwargs={
138
- "transcript_path": splits["validation"]["transcript"],
139
- "audio_tar_path": splits["validation"]["audio"],
140
- },
141
- ),
142
- datasets.SplitGenerator(
143
- name=datasets.Split.TEST,
144
- gen_kwargs={
145
- "transcript_path": splits["test"]["transcript"],
146
- "audio_tar_path": splits["test"]["audio"],
147
- },
148
- ),
149
- ]
150
-
151
- def _generate_examples(self, transcript_path: str, audio_tar_path: str) -> Iterator[Tuple[str, dict]]:
152
- """
153
- Yields examples as (key, example) tuples.
154
-
155
- Args:
156
- transcript_path (str): Path to the transcript TSV file.
157
- audio_tar_path (str): Path to the audio tar archive.
158
- """
159
- # 1. Read the transcript TSV file into a dictionary mapping file name to metadata.
160
- metadata_map = {}
161
- with open(transcript_path, encoding="utf-8") as f:
162
- reader = csv.DictReader(f, delimiter="\t")
163
- for row in reader:
164
- file_name = row["path"].strip()
165
- if not file_name.endswith(".mp3"):
166
- file_name += ".mp3"
167
- metadata_map[file_name] = row
168
-
169
- # 2. Create a base audio URI for streaming from the tar archive.
170
- base_audio_uri = f"tar://{audio_tar_path}#"
171
-
172
- # 3. Open the tar archive and iterate through its members.
173
- id_ = 0
174
- with tarfile.open(audio_tar_path, "r") as tar:
175
- for member in tar.getmembers():
176
- file_name = os.path.basename(member.name)
177
- if file_name in metadata_map:
178
- row = metadata_map[file_name]
179
- audio_uri = base_audio_uri + file_name
180
- yield str(id_), {
181
- "id": row["id"],
182
- "path": row["path"],
183
- "sentence": row["sentence"],
184
- "duration": float(row.get("duration", 0.0)),
185
- "age": row.get("age", ""),
186
- "gender": row.get("gender", ""),
187
- "accents": row.get("accents", ""),
188
- "locale": row.get("locale", ""),
189
- "audio": audio_uri,
190
- }
191
- id_ += 1