Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
natural-language-inference
Languages:
Catalan
Size:
10K - 100K
ArXiv:
License:
Delete splitter_with_ids.py
Browse files- splitter_with_ids.py +0 -42
splitter_with_ids.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import pandas as pd
|
3 |
-
from sklearn.model_selection import train_test_split
|
4 |
-
|
5 |
-
# both files downloaded from https://zenodo.org/record/4621378
|
6 |
-
path_to_teca1 = 'dataset_te1.json'
|
7 |
-
path_to_teca2 = 'dataset_te_vilaweb.json'
|
8 |
-
|
9 |
-
teca1 = pd.read_json(path_to_teca1) # Shape: (14997, 4)
|
10 |
-
teca2 = pd.read_json(path_to_teca2) # Shape: (6166, 4)
|
11 |
-
|
12 |
-
teca1['id'] = 'te1_' + teca1['id'].astype(str)
|
13 |
-
teca2['id'] = 'vila_' + teca2['id'].astype(str)
|
14 |
-
|
15 |
-
teca = pd.concat([teca1, teca2]) # Shape: (21163, 4)
|
16 |
-
#teca.drop(['id'], axis=1, inplace=True) # now columns are: ['premise', 'hypothesis', 'label']
|
17 |
-
teca = teca.sample(frac=1).reset_index(drop=True) # shuffle rows
|
18 |
-
|
19 |
-
print('### VALUE COUNTS TECA ###')
|
20 |
-
print(teca['label'].value_counts())
|
21 |
-
|
22 |
-
# stratified split with harcoded percentages: 80% train, 10% dev, 10% test
|
23 |
-
train, dev_test = train_test_split(teca, test_size=0.2, random_state=42, stratify=teca['label'])
|
24 |
-
dev, test = train_test_split(dev_test, test_size=0.5, random_state=42, stratify=dev_test['label'])
|
25 |
-
|
26 |
-
print('### VALUE COUNTS TRAIN ###')
|
27 |
-
print(train['label'].value_counts())
|
28 |
-
print('### VALUE COUNTS DEV ###')
|
29 |
-
print(dev['label'].value_counts())
|
30 |
-
print('### VALUE COUNTS TEST ###')
|
31 |
-
print(test['label'].value_counts())
|
32 |
-
print('train shape:', train.shape[0], ', dev shape:', dev.shape[0], ', test shape:', test.shape[0])
|
33 |
-
|
34 |
-
print(train.head())
|
35 |
-
|
36 |
-
sets = {'train': train, 'dev': dev, 'test': test, 'full': teca}
|
37 |
-
|
38 |
-
for key in sets:
|
39 |
-
set_dict = sets[key].to_dict('records')
|
40 |
-
json_content = {"version": '1.0.1', "data": set_dict}
|
41 |
-
with open(key+'.json', 'w') as f:
|
42 |
-
json.dump(json_content, f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|