File size: 3,106 Bytes
7aa68c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e66328
7aa68c7
 
7e66328
7aa68c7
 
 
 
 
7e66328
7aa68c7
 
 
 
 
7e66328
7aa68c7
 
 
7e66328
7aa68c7
 
 
 
 
 
 
 
 
 
814d3d1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import os

import numpy as np
import pandas as pd

import pickle
import math
import random

"""
Dataset url: https://github.com/koc-lab/law-turk/tree/main/data/constitutional/deep
Paper url: https://www.sciencedirect.com/science/article/abs/pii/S0306457321001692
"""

def prepare_data():
    label_dict = {' İhlal' : 0, ' İhlal Olmadığı' : 1} 

    ### Load data
    with open('old_dataset/constitutional_tokenized.law' ,'rb') as pickle_file:
        tokenized = pickle.load(pickle_file)
    with open('old_dataset/constitutional_labels.law' ,'rb') as pickle_file:
        labels = pickle.load(pickle_file)

    tokenized_lower = []

    for text in tokenized:
        temp_text = ""
        for word in text:
            temp_text += word.lower() + " "

        tokenized_lower.append(temp_text)

    ### Split the data

    train_ratio = 0.70
    val_ratio = 0.15

    list_indices = []

    for i, lbl in enumerate(labels):
        if lbl in label_dict:
            list_indices.append(i)

    random.Random(13).shuffle(list_indices)

    new_length = len(list_indices)

    train_idx = math.floor(new_length * train_ratio)
    val_idx = math.floor(new_length * (train_ratio + val_ratio))

    train_indices = list_indices[0:train_idx]
    val_indices = list_indices[train_idx : val_idx]
    test_indices = list_indices[val_idx:]

    train_list = []
    val_list = []
    test_list = []

    for ind in train_indices:
        train_list.append(tokenized_lower[ind])

    for ind in val_indices:
        val_list.append(tokenized_lower[ind])

    for ind in test_indices:
        test_list.append(tokenized_lower[ind])

    train_labels = []
    val_labels = []
    test_labels = []
    count = 0
    
    for ind in train_indices:
        if labels[ind] == " İhlal":
            train_labels.append("Violation")
        else:
            train_labels.append("No violation")

    for ind in val_indices:
        if labels[ind] == " İhlal":
            val_labels.append("Violation")
        else:
            val_labels.append("No violation")

    for ind in test_indices:
        if labels[ind] == " İhlal":
            test_labels.append("Violation")
        else:
            test_labels.append("No violation")
    print(count)
    
    train_split = np.concatenate((np.expand_dims(train_list, axis=1), np.expand_dims(train_labels, axis=1)), axis=1)
    val_split = np.concatenate((np.expand_dims(val_list, axis=1), np.expand_dims(val_labels, axis=1)), axis=1)
    test_split = np.concatenate((np.expand_dims(test_list, axis=1), np.expand_dims(test_labels, axis=1)), axis=1)

    return pd.DataFrame(train_split, columns=["Text", "Label"]), pd.DataFrame(val_split, columns=["Text", "Label"]), pd.DataFrame(test_split, columns=["Text", "Label"])

train_split, val_split, test_split = prepare_data()
train_split.to_json(os.path.join("train.jsonl"), lines=True, orient="records", force_ascii=False)
val_split.to_json(os.path.join("validation.jsonl"), lines=True, orient="records", force_ascii=False)
test_split.to_json(os.path.join("test.jsonl"), lines=True, orient="records", force_ascii=False)