tedlium-long-form / README.md
sanchit-gandhi's picture
Upload README.md with huggingface_hub
ea3a784
|
raw
history blame
No virus
2.36 kB
metadata
dataset_info:
  features:
    - name: audio
      dtype: audio
    - name: text
      dtype: string
    - name: speaker_id
      dtype: string
  splits:
    - name: validation
      num_bytes: 180166870
      num_examples: 8
    - name: test
      num_bytes: 285107770
      num_examples: 11
  download_size: 284926490
  dataset_size: 465274640

Dataset Card for "tedlium-long-form"

To create the dataset:

import os
import numpy as np
from datasets import load_dataset, DatasetDict, Dataset, Audio
import soundfile as sf
from tqdm import tqdm

tedlium = load_dataset("LIUM/tedlium", "release3")
merged_dataset = DatasetDict()

validation_speaker_ids = [
    "Al_Gore",
    "Barry_Schwartz",
    "Blaise_Agueray_Arcas",
    "Brian_Cox",
    "Craig_Venter",
    "David_Merrill",
    "Elizabeth_Gilbert",
    "Wade_Davis",
]
validation_dataset_merged = {speaker_id: {"audio": [], "text": ""} for speaker_id in validation_speaker_ids}

test_speaker_ids = [
    "AimeeMullins",
    "BillGates",
    "DanBarber",
    "DanBarber_2010_S103",
    "DanielKahneman",
    "EricMead_2009P_EricMead",
    "GaryFlake",
    "JamesCameron",
    "JaneMcGonigal",
    "MichaelSpecter",
    "RobertGupta",
]
test_dataset_merged = {speaker_id: {"audio": [], "text": ""} for speaker_id in test_speaker_ids}

for split, dataset in zip(["validation", "test"], [validation_dataset_merged, test_dataset_merged]):
    sampling_rate = tedlium[split].features["audio"].sampling_rate

    for sample in tqdm(tedlium[split]):
        if sample["speaker_id"] in dataset:
            dataset[sample["speaker_id"]]["audio"].extend(sample["audio"]["array"])
            dataset[sample["speaker_id"]]["text"] += " " + sample["text"]

    audio_paths = []
    os.makedirs(split, exist_ok=True)
    for speaker in dataset:
        path = os.path.join(split, f"{speaker}-merged.wav")
        audio_paths.append(path)
        sf.write(path, np.asarray(dataset[speaker]["audio"]), samplerate=sampling_rate)

    merged_dataset[split] = Dataset.from_dict({"audio": audio_paths}).cast_column("audio", Audio())
    # remove spaced apostrophes (e.g. it 's -> it's)
    merged_dataset[split] = merged_dataset[split].add_column("text", [dataset[speaker]["text"].replace(" '", "'") for speaker in dataset])
    merged_dataset[split] = merged_dataset[split].add_column("speaker_id", dataset.keys())