Datasets:
File size: 2,257 Bytes
3db1b8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# adapted from: https://github.com/huggingface/transformers/blob/master/examples/research_projects/codeparrot/scripts/preprocessing.py
import datasets
def get_hash(example):
"""Get hash of text field."""
return {"hash": hash(example["text"])}
def check_uniques(example, uniques):
"""Check if current hash is still in set of unique hashes and remove if true."""
if example["hash"] in uniques:
uniques.remove(example["hash"])
return True
else:
return False
def filter(example, uniques):
"""Filter dataset with unique values."""
if not check_uniques(example, uniques):
return False
else:
return True
dataset = datasets.load_dataset("csv", data_files={"train": "train.csv", "validation": "valid.csv"})
# TRAIN SPLIT DEDUPLICATION
len_train = len(dataset["train"])
print(f"Size of original dataset train: {len_train}")
dataset["train"] = dataset["train"].map(get_hash, num_proc=64, writer_batch_size=100000)
# Deduplicate hashes
uniques = set(dataset["train"].unique("hash"))
frac = len(uniques) / len(dataset["train"])
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data
dataset_train_deduplicated = dataset["train"].filter(filter, fn_kwargs={"uniques": uniques})
print(f"Size of filtered dataset train: {len(dataset_train_deduplicated)}")
# VALIDATION SPLIT DEDUPLICATION
len_val = len(dataset["validation"])
print(f"Size of original dataset valid: {len_val}")
dataset["validation"] = dataset["validation"].map(get_hash, num_proc=64, writer_batch_size=100000)
# Deduplicate hashes
uniques = set(dataset["validation"].unique("hash"))
frac = len(uniques) / len(dataset["validation"])
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data
dataset_valid_deduplicated = dataset["validation"].filter(filter, fn_kwargs={"uniques": uniques})
print(f"Size of filtered dataset valid: {len(dataset_valid_deduplicated)}")
# SAVE DEDUPLICATED DATASET
dataset_train_deduplicated = dataset_train_deduplicated.remove_columns(["hash"])
dataset_valid_deduplicated = dataset_valid_deduplicated.remove_columns(["hash"])
dataset_train_deduplicated.to_csv("train.csv", num_proc=64, index=False)
dataset_valid_deduplicated.to_csv("valid.csv", num_proc=64, index=False) |