turkish-constitutional-court / convert_to_hf_dataset.py
ceyhunemreozturk's picture
Upload 4 files
7e66328
import os
import numpy as np
import pandas as pd
import pickle
import math
import random
"""
Dataset url: https://github.com/koc-lab/law-turk/tree/main/data/constitutional/deep
Paper url: https://www.sciencedirect.com/science/article/abs/pii/S0306457321001692
"""
def prepare_data():
label_dict = {' İhlal' : 0, ' İhlal Olmadığı' : 1}
### Load data
with open('old_dataset/constitutional_tokenized.law' ,'rb') as pickle_file:
tokenized = pickle.load(pickle_file)
with open('old_dataset/constitutional_labels.law' ,'rb') as pickle_file:
labels = pickle.load(pickle_file)
tokenized_lower = []
for text in tokenized:
temp_text = ""
for word in text:
temp_text += word.lower() + " "
tokenized_lower.append(temp_text)
### Split the data
train_ratio = 0.70
val_ratio = 0.15
list_indices = []
for i, lbl in enumerate(labels):
if lbl in label_dict:
list_indices.append(i)
random.Random(13).shuffle(list_indices)
new_length = len(list_indices)
train_idx = math.floor(new_length * train_ratio)
val_idx = math.floor(new_length * (train_ratio + val_ratio))
train_indices = list_indices[0:train_idx]
val_indices = list_indices[train_idx : val_idx]
test_indices = list_indices[val_idx:]
train_list = []
val_list = []
test_list = []
for ind in train_indices:
train_list.append(tokenized_lower[ind])
for ind in val_indices:
val_list.append(tokenized_lower[ind])
for ind in test_indices:
test_list.append(tokenized_lower[ind])
train_labels = []
val_labels = []
test_labels = []
count = 0
for ind in train_indices:
if labels[ind] == " İhlal":
train_labels.append("Violation")
else:
train_labels.append("No violation")
for ind in val_indices:
if labels[ind] == " İhlal":
val_labels.append("Violation")
else:
val_labels.append("No violation")
for ind in test_indices:
if labels[ind] == " İhlal":
test_labels.append("Violation")
else:
test_labels.append("No violation")
print(count)
train_split = np.concatenate((np.expand_dims(train_list, axis=1), np.expand_dims(train_labels, axis=1)), axis=1)
val_split = np.concatenate((np.expand_dims(val_list, axis=1), np.expand_dims(val_labels, axis=1)), axis=1)
test_split = np.concatenate((np.expand_dims(test_list, axis=1), np.expand_dims(test_labels, axis=1)), axis=1)
return pd.DataFrame(train_split, columns=["Text", "Label"]), pd.DataFrame(val_split, columns=["Text", "Label"]), pd.DataFrame(test_split, columns=["Text", "Label"])
train_split, val_split, test_split = prepare_data()
train_split.to_json(os.path.join("train.jsonl"), lines=True, orient="records", force_ascii=False)
val_split.to_json(os.path.join("validation.jsonl"), lines=True, orient="records", force_ascii=False)
test_split.to_json(os.path.join("test.jsonl"), lines=True, orient="records", force_ascii=False)