Datasets:
LT3
/

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
TomMoeras's picture
Updated test split with DGT filtered data
0267a7d verified
metadata
language:
  - en
  - uk
license: apache-2.0
dataset_info:
  features:
    - name: english
      dtype: string
    - name: ukrainian
      dtype: string
    - name: id
      dtype: int64
  splits:
    - name: train
      num_bytes: 130905422
      num_examples: 286417
    - name: validation
      num_bytes: 899099
      num_examples: 2000
    - name: test
      num_bytes: 902362
      num_examples: 1898
    - name: mono
      num_bytes: 400416310
      num_examples: 1461320
  download_size: 278301105
  dataset_size: 533123193
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: validation
        path: data/validation-*
      - split: test
        path: data/test-*
      - split: mono
        path: data/mono-*

Dataset Downloader

This script allows you to download and save datasets from the Hugging Face Hub in the same format used for the experiments:

python download_data.py --repo_name LT3/nfr_bt_nmt_english-ukrainian --base_path data/en-uk

import argparse
from datasets import load_dataset
import os


def save_data(data, file_path):
    with open(file_path, "w", encoding="utf-8") as f:
        f.write("\n".join(data) + "\n")


def download_and_save_dataset(repo_name, base_path):
    # Load the dataset from Hugging Face Hub
    dataset = load_dataset(repo_name)

    # Ensure the necessary directory exists
    os.makedirs(base_path, exist_ok=True)

    # Dictionary to store dataset paths
    dataset_paths = {}

    # Save the datasets to disk
    for split in dataset.keys():
        # Handle mono splits specially
        if "mono_english" in split or "mono_ukrainian" in split or "mono_french" in split:
            lang_code = "en" if "english" in split else ("uk" if "ukrainian" in split else "fr")
            feature = "english" if "english" in split else ("ukrainian" if "ukrainian" in split else "french")
            if feature in dataset[split].column_names:
                path = f"{base_path}/{lang_code}_mono.txt"
                save_data(dataset[split][feature], path)
                dataset_paths[f"{lang_code}_mono"] = path
        else:
            # Save data for other splits
            for feature in ["english", "french", "ukrainian"]:
                if feature in dataset[split].column_names:
                    lang_code = "en" if feature == "english" else ("fr" if feature == "french" else "uk")
                    path = f"{base_path}/{lang_code}_{split}.txt"
                    save_data(dataset[split][feature], path)
                    dataset_paths[f"{lang_code}_{split}"] = path

    print(dataset_paths)


def main():
    parser = argparse.ArgumentParser(
        description="Download and save datasets from Hugging Face."
    )
    parser.add_argument(
        "--repo_name",
        required=True,
        help="Repository name on Hugging Face (e.g., 'MT-LT3/nfr_bt_nmt_english-french')",
    )
    parser.add_argument(
        "--base_path",
        required=True,
        help="Base path where the dataset files will be saved (e.g., '/path/to/data/en-fr')",
    )
    args = parser.parse_args()

    download_and_save_dataset(args.repo_name, args.base_path)


if __name__ == "__main__":
    main()