|
import argparse |
|
import gzip |
|
import json |
|
from pathlib import Path |
|
from typing import ( |
|
Callable, |
|
Dict, |
|
Iterable, |
|
Iterator, |
|
List, |
|
Optional, |
|
Sequence, |
|
TextIO, |
|
Tuple, |
|
Union, |
|
) |
|
|
|
import datasets |
|
import numpy as np |
|
import pandas as pd |
|
|
|
TO_REMOVE = [ |
|
"meta", |
|
"perplexity_score", |
|
"text_length", |
|
"url", |
|
"domain", |
|
"dup_ratio", |
|
"pairs", |
|
"repetitions", |
|
"included_in_dedup", |
|
"cluster", |
|
"id", |
|
] |
|
|
|
L_TO_NAME = { |
|
"en": "English", |
|
"de": "German", |
|
"fr": "French", |
|
"es": "Spanish", |
|
"it": "Italian", |
|
"ru": "Russian", |
|
"zh": "Chinese", |
|
"ko": "Korean", |
|
"pt": "Portuguese", |
|
"nl": "Dutch", |
|
"pl": "Polish", |
|
"sv": "Swedish", |
|
} |
|
|
|
|
|
def gen(l): |
|
for x in l: |
|
yield x |
|
|
|
|
|
def _close_when_exhausted(file: TextIO) -> Iterable[str]: |
|
with file: |
|
for line in file: |
|
yield json.loads(line) |
|
|
|
|
|
def _close_when_exhausted_txt(file: TextIO) -> Iterable[str]: |
|
with file: |
|
for line in file: |
|
yield line[:-1] |
|
|
|
|
|
def open_read_cleaned(filename) -> Iterable[str]: |
|
file: TextIO = gzip.open(filename, "rt") |
|
return _close_when_exhausted(file) |
|
|
|
|
|
def open_gzip_txt(filename) -> Iterable[str]: |
|
file: TextIO = gzip.open(filename, "rt") |
|
return _close_when_exhausted_txt(file) |
|
|
|
|
|
def read_parallel_corpus(dir: str, lp: str) -> Tuple[Iterable[str], Iterable[str]]: |
|
src_l, tgt_l = lp.split("-") |
|
if src_l != "en": |
|
lp_path = f"{tgt_l}-{src_l}" |
|
else: |
|
lp_path = lp |
|
src_path = Path(dir) / f"cometkiwi_data.{lp_path}.{src_l}" |
|
tgt_path = Path(dir) / f"cometkiwi_data.{lp_path}.{tgt_l}" |
|
src_corpus = open_gzip_txt(src_path) |
|
tgt_corpus = open_gzip_txt(tgt_path) |
|
return src_corpus, tgt_corpus |
|
|
|
|
|
def unroll_chat(chat): |
|
chat_str = "" |
|
for i, turn in enumerate(chat): |
|
if type(turn["value"]) != str: |
|
pass |
|
else: |
|
chat_str += turn["value"] |
|
return chat_str |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--dataset_path", type=str, required=True) |
|
parser.add_argument("--output", type=str, required=True) |
|
parser.add_argument("--is_hf_dataset", type=str, required=True, default=False) |
|
parser.add_argument("--n_tokens", type=int, required=False, default=None) |
|
parser.add_argument("--threshold", type=int, required=False, default=None) |
|
parser.add_argument("--min_perplexity", type=int, required=False, default=None) |
|
parser.add_argument("--wikipedia", type=str, required=False, default=False) |
|
parser.add_argument("--posterior_tokens", type=str, required=False, default=False) |
|
parser.add_argument("--n_posterior_tokens", type=int, required=False, default=None) |
|
parser.add_argument("--is_parallel", type=str, required=False, default=False) |
|
parser.add_argument("--lp", type=str, required=False) |
|
|
|
args = parser.parse_args() |
|
if args.posterior_tokens == "False": |
|
if args.wikipedia == "True": |
|
print("on wikipedia") |
|
data = [] |
|
dataset_paths = [p for p in Path(args.dataset_path).iterdir()] |
|
dfs = [] |
|
for dataset_path in dataset_paths: |
|
print("on path", dataset_path) |
|
corpus = open_read_cleaned(dataset_path) |
|
|
|
for doc in corpus: |
|
data.append({"text": doc["text"]}) |
|
|
|
print(dataset_path) |
|
|
|
sub_df = pd.DataFrame(data=data) |
|
dfs.append(sub_df) |
|
|
|
df = pd.concat(dfs, ignore_index=True) |
|
dataset = datasets.Dataset.from_pandas(df) |
|
dataset.to_json(args.output, lines=True) |
|
|
|
else: |
|
if args.is_hf_dataset == "True": |
|
if args.dataset_path == "Unbabel/TowerBlocks-v0.1": |
|
df = datasets.load_dataset( |
|
"Unbabel/TowerBlocks-v0.1", split="train" |
|
).to_pandas() |
|
dataset = pd.DataFrame() |
|
dataset["text"] = df["conversations"].apply(unroll_chat) |
|
dataset = datasets.Dataset.from_pandas(dataset) |
|
else: |
|
dataset = datasets.load_from_disk(args.dataset_path) |
|
instances_to_select = [] |
|
n_words = 0 |
|
for idx in range(len(dataset)): |
|
perplexity = dataset[int(idx)]["perplexity_score"] |
|
if perplexity < args.threshold and perplexity > args.min_perplexity: |
|
instances_to_select.append(idx) |
|
n_words += len(dataset[int(idx)]["text"].split(" ")) |
|
print(f"Selected {n_words} of {args.n_tokens} tokens.") |
|
if n_words >= args.n_tokens: |
|
break |
|
|
|
dataset = dataset.select(instances_to_select) |
|
|
|
|
|
for column in TO_REMOVE: |
|
if column in dataset.column_names: |
|
dataset = dataset.remove_columns(column) |
|
|
|
print("English") |
|
print("n words", n_words) |
|
|
|
elif args.is_parallel == "False": |
|
data = [] |
|
corpus = open_read_cleaned(args.dataset_path) |
|
|
|
n_words = 0 |
|
for doc in corpus: |
|
perplexity = doc["perplexity"] |
|
if perplexity < args.threshold and perplexity > args.min_perplexity: |
|
if args.lp == "zh": |
|
n_words += len(doc["text"]) |
|
else: |
|
n_words += len(doc["text"].split(" ")) |
|
data.append({"text": doc["text"]}) |
|
if n_words >= args.n_tokens: |
|
break |
|
|
|
print(args.dataset_path) |
|
print("n words", n_words) |
|
|
|
dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=data)) |
|
|
|
elif args.is_parallel == "True": |
|
data = [] |
|
src_data, tgt_data = read_parallel_corpus( |
|
dir=f"{args.dataset_path}", lp=args.lp |
|
) |
|
n_sents = 0 |
|
for src, tgt in zip(src_data, tgt_data): |
|
if n_sents >= args.n_tokens: |
|
break |
|
data.append( |
|
{ |
|
"text": f"{L_TO_NAME[args.lp.split('-')[0]]}: {src}\n{L_TO_NAME[args.lp.split('-')[-1]]}: {tgt}" |
|
} |
|
) |
|
n_sents += 1 |
|
if n_sents % 1000 == 0: |
|
print(f"Selected {n_sents} of {args.n_tokens} sentences.") |
|
data_len = len(data) |
|
|
|
if "-en" in args.lp: |
|
data = data[: int(data_len / 2)] |
|
else: |
|
data = data[int(data_len / 2) :] |
|
dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=data)) |
|
|
|
dataset.to_json(args.output, lines=True) |
|
|
|
else: |
|
if args.is_hf_dataset: |
|
dataset = datasets.load_from_disk(args.dataset_path) |
|
instances_to_select = [] |
|
n_words = 0 |
|
surpassed = False |
|
for idx in range(len(dataset)): |
|
perplexity = dataset[int(idx)]["perplexity_score"] |
|
if perplexity < args.threshold and perplexity > args.min_perplexity: |
|
n_words += len(dataset[int(idx)]["text"].split(" ")) |
|
if n_words >= args.n_tokens: |
|
if surpassed: |
|
instances_to_select.append(idx) |
|
n_posterior_words += len(dataset[int(idx)]["text"].split(" ")) |
|
if n_posterior_words >= args.n_posterior_tokens: |
|
break |
|
else: |
|
n_posterior_words = 0 |
|
surpassed = True |
|
|
|
dataset = dataset.select(instances_to_select) |
|
|
|
|
|
for column in TO_REMOVE: |
|
if column in dataset.column_names: |
|
dataset = dataset.remove_columns(column) |
|
|
|
print("English") |
|
print("n words", n_words) |
|
|
|
|
|
else: |
|
data = [] |
|
corpus = open_read_cleaned(args.dataset_path) |
|
|
|
n_words = 0 |
|
surpassed = False |
|
for doc in corpus: |
|
perplexity = doc["perplexity"] |
|
if perplexity < args.threshold and perplexity > args.min_perplexity: |
|
n_words += len(doc["text"].split(" ")) |
|
|
|
if n_words >= args.n_tokens: |
|
if surpassed: |
|
data.append({"text": doc["text"]}) |
|
n_posterior_words += len(doc["text"].split(" ")) |
|
if n_posterior_words >= args.n_posterior_tokens: |
|
break |
|
if not surpassed: |
|
n_posterior_words = 0 |
|
surpassed = True |
|
|
|
print(args.dataset_path) |
|
print("n words", n_words) |
|
|
|
dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=data)) |
|
|
|
dataset.to_json(args.output, lines=True) |
|
|