Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
natural-language-inference
Languages:
Catalan
Size:
10K - 100K
ArXiv:
License:
import json | |
import pandas as pd | |
from sklearn.model_selection import train_test_split | |
# both files downloaded from https://zenodo.org/record/4621378 | |
path_to_teca1 = 'dataset_te1.json' | |
path_to_teca2 = 'dataset_te_vilaweb.json' | |
# load data to pandas dataframes | |
teca1 = pd.read_json(path_to_teca1) # Shape: (14997, 4) | |
teca2 = pd.read_json(path_to_teca2) # Shape: (6166, 4) | |
teca = pd.concat([teca1, teca2]) # Shape: (21163, 4) | |
# remove "id" column, now columns are: ['premise', 'hypothesis', 'label'] | |
teca.drop(['id'], axis=1, inplace=True) | |
# shuffle rows | |
teca = teca.sample(frac=1).reset_index(drop=True) | |
# stratified split with harcoded percentages: 80% train, 10% dev, 10% test | |
train, dev_test = train_test_split(teca, test_size=0.2, random_state=42, stratify=teca['label']) | |
dev, test = train_test_split(dev_test, test_size=0.5, random_state=42, stratify=dev_test['label']) | |
# report some stats | |
print('### VALUE COUNTS TECA ###') | |
print(teca['label'].value_counts()) | |
print('### VALUE COUNTS TRAIN ###') | |
print(train['label'].value_counts()) | |
print('### VALUE COUNTS DEV ###') | |
print(dev['label'].value_counts()) | |
print('### VALUE COUNTS TEST ###') | |
print(test['label'].value_counts()) | |
print('train shape:', train.shape[0], ', dev shape:', dev.shape[0], ', test shape:', test.shape[0]) | |
# save train/dev/test sets as json files | |
sets = {'train': train, 'dev': dev, 'test': test} | |
for key in sets: | |
set_dict = sets[key].to_dict('records') | |
json_content = {"version": '1.0.1', "data": set_dict} | |
with open(key+'.json', 'w') as f: | |
json.dump(json_content, f) |