File size: 4,707 Bytes
423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e b015b0a 423482e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import csv
import os
import tarfile
import datasets
from tqdm import tqdm
_DESCRIPTION = """\
This dataset is designed for speech-to-text (STT) tasks. It contains audio files stored as tar archives along with their corresponding transcript files in TSV format. The data is for the Uzbek language.
"""
_CITATION = """\
@misc{dataset_stt2025,
title={Dataset_STT},
author={Your Name},
year={2025}
}
"""
class DatasetSTT(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features({
"id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16000), # Agar kerak bo'lsa, sampling_rate ni moslashtiring
"sentence": datasets.Value("string"),
"duration": datasets.Value("float"),
"age": datasets.Value("string"),
"gender": datasets.Value("string"),
"accents": datasets.Value("string"),
"locale": datasets.Value("string")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://huggingface.co/datasets/Elyordev/Dataset_STT",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""
_split_generators da har bir split uchun kerakli fayllarni belgilaymiz.
Biz quyidagi splitlarni qo'llaymiz: TRAIN, TEST va VALIDATION.
Data_files argumenti orqali audio arxiv va transcript TSV fayllarini olamiz.
"""
data_files = self.config.data_files
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_archive": data_files["train"]["audio"],
"transcript_file": data_files["train"]["transcript"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_archive": data_files["test"]["audio"],
"transcript_file": data_files["test"]["transcript"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"audio_archive": data_files["validation"]["audio"],
"transcript_file": data_files["validation"]["transcript"],
},
),
]
def _generate_examples(self, audio_archive, transcript_file):
"""
Transcript TSV faylini o'qib, har bir yozuv uchun:
- Tar arxivni ochamiz va audio fayllarni indekslaymiz.
- Transcript faylida ko'rsatilgan "path" ustuni orqali mos audio faylni topamiz.
- Audio faylni butun baytlar shaklida o'qib, audio maydoni sifatida qaytaramiz.
"""
# Tar arxivni ochamiz
with tarfile.open(audio_archive, "r:*") as tar:
# Arxiv ichidagi barcha fayllarni (fayl nomi -> tarinfo) indekslaymiz
tar_index = {os.path.basename(member.name): member for member in tar.getmembers() if member.isfile()}
# Transcript TSV faylini ochamiz (UTF-8 kodlashda)
with open(transcript_file, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
for row in tqdm(reader, desc="Processing transcripts"):
file_name = row["path"] # Masalan: "2cd08f62-aa25-4f5e-bb73-40cfc19a215e.mp3"
if file_name not in tar_index:
print(f"Warning: {file_name} not found in {audio_archive}")
continue
audio_member = tar.extractfile(tar_index[file_name])
if audio_member is None:
print(f"Warning: Could not extract {file_name}")
continue
audio_bytes = audio_member.read()
yield row["id"], {
"id": row["id"],
"audio": {"path": file_name, "bytes": audio_bytes},
"sentence": row["sentence"],
"duration": float(row["duration"]) if row["duration"] else 0.0,
"age": row["age"],
"gender": row["gender"],
"accents": row["accents"],
"locale": row["locale"],
} |