african_accented_french / african_accented_french.py
gigant's picture
Upload african_accented_french.py
789df26
raw history blame
No virus
4.18 kB
# This script for Hugging Face's datasets library was written by Théo Gigant
import csv
import json
import os
from pathlib import Path
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
This corpus consists of approximately 22 hours of speech recordings. Transcripts are provided for all the recordings. The corpus can be divided into 3 parts:
1. Yaounde
Collected by a team from the U.S. Military Academy's Center for Technology Enhanced Language Learning (CTELL) in 2003 in Yaoundé, Cameroon. It has recordings from 84 speakers, 48 male and 36 female.
2. CA16
This part was collected by a RDECOM Science Team who participated in the United Nations exercise Central Accord 16 (CA16) in Libreville, Gabon in June 2016. The Science Team included DARPA's Dr. Boyan Onyshkevich and Dr. Aaron Lawson (SRI International), as well as RDECOM scientists. It has recordings from 125 speakers from Cameroon, Chad, Congo and Gabon.
3. Niger
This part was collected from 23 speakers in Niamey, Niger, Oct. 26-30 2015. These speakers were students in a course for officers and sergeants presented by Army trainers assigned to U.S. Army Africa. The data was collected by RDECOM Science & Technology Advisors Major Eddie Strimel and Mr. Bill Bergen.
"""
_HOMEPAGE = "http://www.openslr.org/57/"
_LICENSE = ""
_URLS = {
"fr": "https://www.openslr.org/resources/57/African_Accented_French.tar.gz",
}
class AfricanAccentedFrench(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="fr", version=VERSION, description=""),
]
DEFAULT_CONFIG_NAME = "fr"
def _info(self):
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datapath": data_dir,
"split": "train"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datapath": data_dir,
"split": "test"
},
),
]
def _generate_examples(self, datapath, split):
key = 0
files = {}
for split_name in ["train"] if split=="train" else ["test", "devtest", "dev"]:
for speaker in os.listdir(os.path.join(datapath, "African_Accented_French", "transcripts", split_name)):
for meta in os.listdir(os.path.join(datapath,"African_Accented_French", "transcripts", split_name, speaker)):
with open(os.path.join(datapath,"African_Accented_French", "transcripts", split_name, speaker, meta), 'r') as transcript:
for line in transcript.readlines():
line = line.split(maxsplit=1)
if "answers" not in line[0]:
filename = line[0].split("/")[-1]
if ".tdf" in filename or ".wav" in filename:
filename = f"{filename[:-4]}.wav"
else :
filename = f"{filename}.wav"
files[filename]= line[1]
for f in Path(os.path.join(datapath, "African_Accented_French")).rglob("*.wav"):
if f.name in files.keys():
yield key, {
"sentence": files[f.name],
"audio": f.absolute().as_posix()
}
key += 1