File size: 3,541 Bytes
9ae5648 7c23da9 9ae5648 7c23da9 9ae5648 7c23da9 9ae5648 7c23da9 9ae5648 7c23da9 86d8a6b 7d400e6 7784c61 86d8a6b 7784c61 7d400e6 86d8a6b 7c23da9 9ae5648 7c23da9 9ae5648 e417aa0 7c23da9 e417aa0 9ae5648 e417aa0 9ae5648 e417aa0 9ae5648 e417aa0 9ae5648 e417aa0 9ae5648 e417aa0 7d400e6 9ae5648 7c23da9 9ae5648 f7e0826 9ae5648 f7e0826 9ae5648 f7e0826 9ae5648 f7e0826 7c23da9 9ae5648 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import json
import os
from random import shuffle, seed
import pandas as pd
from datasets import load_dataset
test = load_dataset("cardiffnlp/super_tweeteval", "tweet_nerd", split="test").shuffle(seed=42)
test = list(test.to_pandas().T.to_dict().values())
train = load_dataset("cardiffnlp/super_tweeteval", "tweet_nerd", split="train").shuffle(seed=42)
train = list(train.to_pandas().T.to_dict().values())
validation = load_dataset("cardiffnlp/super_tweeteval", "tweet_nerd", split="validation").shuffle(seed=42)
validation = list(validation.to_pandas().T.to_dict().values())
full = train + test + validation
df = pd.DataFrame(full)
df["date_dt"] = pd.to_datetime(df.date)
df = df.sort_values(by="date_dt")
dist_date = df.groupby("date_dt").size()
total_n = len(df)
n = 0
while True:
n += 1
if dist_date[:n].sum() > total_n/2:
break
split_date = dist_date.index[n]
print(split_date)
train = df[df["date_dt"] <= split_date]
test = df[df["date_dt"] > split_date]
print(train.date_dt.min(), train.date_dt.max())
print(test.date_dt.min(), test.date_dt.max())
train.pop("date_dt")
test.pop("date_dt")
train = list(train.T.to_dict().values())
test = list(test.T.to_dict().values())
seed(42)
shuffle(train)
shuffle(test)
valid = train[:int(len(train)*0.2)]
train = train[len(valid):]
n_test = int(len(test)/4)
n_train = len(train)
n_validation = len(valid)
test_1 = test[:n_test]
test_2 = test[n_test:n_test*2]
test_3 = test[n_test*2:n_test*3]
test_4 = test[n_test*3:]
os.makedirs("data/tweet_nerd", exist_ok=True)
with open("data/tweet_nerd/test.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test]))
with open("data/tweet_nerd/test_1.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_1]))
with open("data/tweet_nerd/test_2.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_2]))
with open("data/tweet_nerd/test_3.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_3]))
with open("data/tweet_nerd/test_4.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in test_4]))
with open("data/tweet_nerd/train.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in train]))
with open("data/tweet_nerd/validation.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in valid]))
def sampler(dataset_test, r_seed):
seed(r_seed)
shuffle(dataset_test)
shuffle(train)
shuffle(validation)
test_tr = dataset_test[:int(n_train / 2)]
test_vl = dataset_test[int(n_train / 2): int(n_train / 2) + int(n_validation / 2)]
new_train = test_tr + train[:n_train - len(test_tr)]
new_validation = test_vl + validation[:n_validation - len(test_vl)]
return new_train, new_validation
id2test = {n: t for n, t in enumerate([test_1, test_2, test_3, test_4])}
for n, _test in enumerate([
test_4 + test_2 + test_3,
test_1 + test_4 + test_3,
test_1 + test_2 + test_4,
test_1 + test_2 + test_3]):
for s in range(3):
os.makedirs(f"data/tweet_nerd_new_test{n}_seed{s}", exist_ok=True)
_train, _valid = sampler(_test, s)
with open(f"data/tweet_nerd_new_test{n}_seed{s}/train.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in _train]))
with open(f"data/tweet_nerd_new_test{n}_seed{s}/validation.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in _valid]))
with open(f"data/tweet_nerd_new_test{n}_seed{s}/test.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in id2test[n]]))
|