import os import numpy as np import pandas as pd import pickle import math import random """ Dataset url: https://github.com/koc-lab/law-turk/tree/main/data/constitutional/deep Paper url: https://www.sciencedirect.com/science/article/abs/pii/S0306457321001692 """ def prepare_data(): label_dict = {' İhlal' : 0, ' İhlal Olmadığı' : 1} ### Load data with open('old_dataset/constitutional_tokenized.law' ,'rb') as pickle_file: tokenized = pickle.load(pickle_file) with open('old_dataset/constitutional_labels.law' ,'rb') as pickle_file: labels = pickle.load(pickle_file) tokenized_lower = [] for text in tokenized: temp_text = "" for word in text: temp_text += word.lower() + " " tokenized_lower.append(temp_text) ### Split the data train_ratio = 0.70 val_ratio = 0.15 list_indices = [] for i, lbl in enumerate(labels): if lbl in label_dict: list_indices.append(i) random.Random(13).shuffle(list_indices) new_length = len(list_indices) train_idx = math.floor(new_length * train_ratio) val_idx = math.floor(new_length * (train_ratio + val_ratio)) train_indices = list_indices[0:train_idx] val_indices = list_indices[train_idx : val_idx] test_indices = list_indices[val_idx:] train_list = [] val_list = [] test_list = [] for ind in train_indices: train_list.append(tokenized_lower[ind]) for ind in val_indices: val_list.append(tokenized_lower[ind]) for ind in test_indices: test_list.append(tokenized_lower[ind]) train_labels = [] val_labels = [] test_labels = [] for ind in train_indices: if label_dict[labels[ind]] == " İhlal": train_labels.append("Violation") else: train_labels.append("No violation") for ind in val_indices: if label_dict[labels[ind]] == " İhlal": val_labels.append("Violation") else: val_labels.append("No violation") for ind in test_indices: if label_dict[labels[ind]] == " İhlal": test_labels.append("Violation") else: test_labels.append("No violation") train_split = np.concatenate((np.expand_dims(train_list, axis=1), np.expand_dims(train_labels, axis=1)), axis=1) val_split = np.concatenate((np.expand_dims(val_list, axis=1), np.expand_dims(val_labels, axis=1)), axis=1) test_split = np.concatenate((np.expand_dims(test_list, axis=1), np.expand_dims(test_labels, axis=1)), axis=1) return pd.DataFrame(train_split, columns=["Text", "Label"]), pd.DataFrame(val_split, columns=["Text", "Label"]), pd.DataFrame(test_split, columns=["Text", "Label"]) train_split, val_split, test_split = prepare_data() train_split.to_json(os.path.join("train.jsonl"), lines=True, orient="records", force_ascii=False) val_split.to_json(os.path.join("validation.jsonl"), lines=True, orient="records", force_ascii=False) test_split.to_json(os.path.join("test.jsonl"), lines=True, orient="records", force_ascii=False) ''' pd.set_option('display.max_colwidth', None) pd.set_option('display.max_columns', None) def perform_original_preprocessing(): # Original Preprocessing from: https://github.com/lagefreitas/predicting-brazilian-court-decisions/blob/main/predicting-brazilian-court-decisions.py#L81 # Loading the labeled decisions data = pd.read_csv("dataset.csv", sep='<=>', header=0) print('data.shape=' + str(data.shape) + ' full data set') # Removing NA values data = data.dropna(subset=[data.columns[9]]) # decision_description data = data.dropna(subset=[data.columns[11]]) # decision_label print('data.shape=' + str(data.shape) + ' dropna') # Removing duplicated samples data = data.drop_duplicates(subset=[data.columns[1]]) # process_number print('data.shape=' + str(data.shape) + ' removed duplicated samples by process_number') data = data.drop_duplicates(subset=[data.columns[9]]) # decision_description print('data.shape=' + str(data.shape) + ' removed duplicated samples by decision_description') # Removing not relevant decision labels and decision not properly labeled data = data.query('decision_label != "conflito-competencia"') print('data.shape=' + str(data.shape) + ' removed decisions labeled as conflito-competencia') data = data.query('decision_label != "prejudicada"') print('data.shape=' + str(data.shape) + ' removed decisions labeled as prejudicada') data = data.query('decision_label != "not-cognized"') print('data.shape=' + str(data.shape) + ' removed decisions labeled as not-cognized') data_no = data.query('decision_label == "no"') print('data_no.shape=' + str(data_no.shape)) data_yes = data.query('decision_label == "yes"') print('data_yes.shape=' + str(data_yes.shape)) data_partial = data.query('decision_label == "partial"') print('data_partial.shape=' + str(data_partial.shape)) # Merging decisions whose labels are yes, no, and partial to build the final data set data_merged = data_no.merge(data_yes, how='outer') data = data_merged.merge(data_partial, how='outer') print('data.shape=' + str(data.shape) + ' merged decisions whose labels are yes, no, and partial') # Removing decision_description and decision_labels whose values are -1 and -2 indexNames = data[(data['decision_description'] == str(-1)) | (data['decision_description'] == str(-2)) | ( data['decision_label'] == str(-1)) | (data['decision_label'] == str(-2))].index data.drop(indexNames, inplace=True) print('data.shape=' + str(data.shape) + ' removed -1 and -2 decision descriptions and labels') data.to_csv("dataset_processed_original.csv", index=False) def perform_additional_processing(): df = pd.read_csv("dataset_processed_original.csv") # remove strange " characters sometimes occurring in the beginning and at the end of a line df.ementa_filepath = df.ementa_filepath.str.replace('^"', '') df.decision_unanimity = df.decision_unanimity.str.replace('"$', '') # removing process_type and judgment_date, since they are the same everywhere (-) # decisions only contains 'None', nan and '-2' # ementa_filepath refers to the name of file in the filesystem that we created when we scraped the data from the Court. It is temporary data and can be removed # decision_description = ementa_text - decision_text - decision_unanimity_text df = df.drop(['process_type', 'judgment_date', 'decisions', 'ementa_filepath'], axis=1) # some rows are somehow not read correctly. With this, we can filter them df = df[df.decision_text.str.len() > 1] # rename "-2" to more descriptive name ==> -2 means, that they were not able to determine it df.decision_unanimity = df.decision_unanimity.replace('-2', 'not_determined') # rename cols for more clarity df = df.rename(columns={"decision_unanimity": "unanimity_label"}) df = df.rename(columns={"decision_unanimity_text": "unanimity_text"}) df = df.rename(columns={"decision_text": "judgment_text"}) df = df.rename(columns={"decision_label": "judgment_label"}) df.to_csv("dataset_processed_additional.csv", index=False) return df perform_original_preprocessing() df = perform_additional_processing() # perform random split 80% train (3234), 10% validation (404), 10% test (405) train, validation, test = np.split(df.sample(frac=1, random_state=42), [int(.8 * len(df)), int(.9 * len(df))]) def save_splits_to_jsonl(config_name): # save to jsonl files for huggingface if config_name: os.makedirs(config_name, exist_ok=True) train.to_json(os.path.join(config_name, "train.jsonl"), lines=True, orient="records", force_ascii=False) validation.to_json(os.path.join(config_name, "validation.jsonl"), lines=True, orient="records", force_ascii=False) test.to_json(os.path.join(config_name, "test.jsonl"), lines=True, orient="records", force_ascii=False) def print_split_table_single_label(train, validation, test, label_name): train_counts = train[label_name].value_counts().to_frame().rename(columns={label_name: "train"}) validation_counts = validation[label_name].value_counts().to_frame().rename(columns={label_name: "validation"}) test_counts = test[label_name].value_counts().to_frame().rename(columns={label_name: "test"}) table = train_counts.join(validation_counts) table = table.join(test_counts) table[label_name] = table.index total_row = {label_name: "total", "train": len(train.index), "validation": len(validation.index), "test": len(test.index)} table = table.append(total_row, ignore_index=True) table = table[[label_name, "train", "validation", "test"]] # reorder columns print(table.to_markdown(index=False)) save_splits_to_jsonl("") print_split_table_single_label(train, validation, test, "judgment_label") print_split_table_single_label(train, validation, test, "unanimity_label") # create second config by filtering out rows with unanimity label == not_determined, while keeping the same splits # train = train[train.unanimity_label != "not_determined"] # validation = validation[validation.unanimity_label != "not_determined"] # test = test[test.unanimity_label != "not_determined"] # it is a very small dataset and very imbalanced (only very few not-unanimity labels) # save_splits_to_jsonl("unanimity") '''