Commit
·
9de7a3b
1
Parent(s):
7aa68c7
Update parquet files
Browse files- KocLab-Bilkent--turkish-constitutional-court/json-test.parquet +3 -0
- KocLab-Bilkent--turkish-constitutional-court/json-train.parquet +3 -0
- KocLab-Bilkent--turkish-constitutional-court/json-validation.parquet +3 -0
- convert_to_hf_dataset.py +0 -221
- test.jsonl +0 -0
- train.jsonl +0 -0
- validation.jsonl +0 -0
KocLab-Bilkent--turkish-constitutional-court/json-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d833cea6946aeced67cb0bbd11d6d95dc590e241b334a48edba1be9cdf0d875a
|
3 |
+
size 754393
|
KocLab-Bilkent--turkish-constitutional-court/json-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d93dd1ae78336c88ec52d2c96466f367983e9aa365d378a8d23f50435032483
|
3 |
+
size 3453672
|
KocLab-Bilkent--turkish-constitutional-court/json-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b52be18a2509896834dcad153e868f823612dfb6b0408dda3d4940b5c4d3cfe7
|
3 |
+
size 661610
|
convert_to_hf_dataset.py
DELETED
@@ -1,221 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
-
import pickle
|
7 |
-
import math
|
8 |
-
import random
|
9 |
-
|
10 |
-
"""
|
11 |
-
Dataset url: https://github.com/koc-lab/law-turk/tree/main/data/constitutional/deep
|
12 |
-
Paper url: https://www.sciencedirect.com/science/article/abs/pii/S0306457321001692
|
13 |
-
"""
|
14 |
-
|
15 |
-
def prepare_data():
|
16 |
-
label_dict = {' İhlal' : 0, ' İhlal Olmadığı' : 1}
|
17 |
-
|
18 |
-
### Load data
|
19 |
-
with open('old_dataset/constitutional_tokenized.law' ,'rb') as pickle_file:
|
20 |
-
tokenized = pickle.load(pickle_file)
|
21 |
-
with open('old_dataset/constitutional_labels.law' ,'rb') as pickle_file:
|
22 |
-
labels = pickle.load(pickle_file)
|
23 |
-
|
24 |
-
tokenized_lower = []
|
25 |
-
|
26 |
-
for text in tokenized:
|
27 |
-
temp_text = ""
|
28 |
-
for word in text:
|
29 |
-
temp_text += word.lower() + " "
|
30 |
-
|
31 |
-
tokenized_lower.append(temp_text)
|
32 |
-
|
33 |
-
### Split the data
|
34 |
-
|
35 |
-
train_ratio = 0.70
|
36 |
-
val_ratio = 0.15
|
37 |
-
|
38 |
-
list_indices = []
|
39 |
-
|
40 |
-
for i, lbl in enumerate(labels):
|
41 |
-
if lbl in label_dict:
|
42 |
-
list_indices.append(i)
|
43 |
-
|
44 |
-
random.Random(13).shuffle(list_indices)
|
45 |
-
|
46 |
-
new_length = len(list_indices)
|
47 |
-
|
48 |
-
train_idx = math.floor(new_length * train_ratio)
|
49 |
-
val_idx = math.floor(new_length * (train_ratio + val_ratio))
|
50 |
-
|
51 |
-
train_indices = list_indices[0:train_idx]
|
52 |
-
val_indices = list_indices[train_idx : val_idx]
|
53 |
-
test_indices = list_indices[val_idx:]
|
54 |
-
|
55 |
-
train_list = []
|
56 |
-
val_list = []
|
57 |
-
test_list = []
|
58 |
-
|
59 |
-
for ind in train_indices:
|
60 |
-
train_list.append(tokenized_lower[ind])
|
61 |
-
|
62 |
-
for ind in val_indices:
|
63 |
-
val_list.append(tokenized_lower[ind])
|
64 |
-
|
65 |
-
for ind in test_indices:
|
66 |
-
test_list.append(tokenized_lower[ind])
|
67 |
-
|
68 |
-
train_labels = []
|
69 |
-
val_labels = []
|
70 |
-
test_labels = []
|
71 |
-
|
72 |
-
for ind in train_indices:
|
73 |
-
if label_dict[labels[ind]] == " İhlal":
|
74 |
-
train_labels.append("Violation")
|
75 |
-
else:
|
76 |
-
train_labels.append("No violation")
|
77 |
-
|
78 |
-
for ind in val_indices:
|
79 |
-
if label_dict[labels[ind]] == " İhlal":
|
80 |
-
val_labels.append("Violation")
|
81 |
-
else:
|
82 |
-
val_labels.append("No violation")
|
83 |
-
|
84 |
-
for ind in test_indices:
|
85 |
-
if label_dict[labels[ind]] == " İhlal":
|
86 |
-
test_labels.append("Violation")
|
87 |
-
else:
|
88 |
-
test_labels.append("No violation")
|
89 |
-
|
90 |
-
train_split = np.concatenate((np.expand_dims(train_list, axis=1), np.expand_dims(train_labels, axis=1)), axis=1)
|
91 |
-
val_split = np.concatenate((np.expand_dims(val_list, axis=1), np.expand_dims(val_labels, axis=1)), axis=1)
|
92 |
-
test_split = np.concatenate((np.expand_dims(test_list, axis=1), np.expand_dims(test_labels, axis=1)), axis=1)
|
93 |
-
|
94 |
-
return pd.DataFrame(train_split, columns=["Text", "Label"]), pd.DataFrame(val_split, columns=["Text", "Label"]), pd.DataFrame(test_split, columns=["Text", "Label"])
|
95 |
-
|
96 |
-
train_split, val_split, test_split = prepare_data()
|
97 |
-
train_split.to_json(os.path.join("train.jsonl"), lines=True, orient="records", force_ascii=False)
|
98 |
-
val_split.to_json(os.path.join("validation.jsonl"), lines=True, orient="records", force_ascii=False)
|
99 |
-
test_split.to_json(os.path.join("test.jsonl"), lines=True, orient="records", force_ascii=False)
|
100 |
-
|
101 |
-
'''
|
102 |
-
pd.set_option('display.max_colwidth', None)
|
103 |
-
pd.set_option('display.max_columns', None)
|
104 |
-
|
105 |
-
|
106 |
-
def perform_original_preprocessing():
|
107 |
-
# Original Preprocessing from: https://github.com/lagefreitas/predicting-brazilian-court-decisions/blob/main/predicting-brazilian-court-decisions.py#L81
|
108 |
-
# Loading the labeled decisions
|
109 |
-
data = pd.read_csv("dataset.csv", sep='<=>', header=0)
|
110 |
-
print('data.shape=' + str(data.shape) + ' full data set')
|
111 |
-
# Removing NA values
|
112 |
-
data = data.dropna(subset=[data.columns[9]]) # decision_description
|
113 |
-
data = data.dropna(subset=[data.columns[11]]) # decision_label
|
114 |
-
print('data.shape=' + str(data.shape) + ' dropna')
|
115 |
-
# Removing duplicated samples
|
116 |
-
data = data.drop_duplicates(subset=[data.columns[1]]) # process_number
|
117 |
-
print('data.shape=' + str(data.shape) + ' removed duplicated samples by process_number')
|
118 |
-
data = data.drop_duplicates(subset=[data.columns[9]]) # decision_description
|
119 |
-
print('data.shape=' + str(data.shape) + ' removed duplicated samples by decision_description')
|
120 |
-
# Removing not relevant decision labels and decision not properly labeled
|
121 |
-
data = data.query('decision_label != "conflito-competencia"')
|
122 |
-
print('data.shape=' + str(data.shape) + ' removed decisions labeled as conflito-competencia')
|
123 |
-
data = data.query('decision_label != "prejudicada"')
|
124 |
-
print('data.shape=' + str(data.shape) + ' removed decisions labeled as prejudicada')
|
125 |
-
data = data.query('decision_label != "not-cognized"')
|
126 |
-
print('data.shape=' + str(data.shape) + ' removed decisions labeled as not-cognized')
|
127 |
-
data_no = data.query('decision_label == "no"')
|
128 |
-
print('data_no.shape=' + str(data_no.shape))
|
129 |
-
data_yes = data.query('decision_label == "yes"')
|
130 |
-
print('data_yes.shape=' + str(data_yes.shape))
|
131 |
-
data_partial = data.query('decision_label == "partial"')
|
132 |
-
print('data_partial.shape=' + str(data_partial.shape))
|
133 |
-
# Merging decisions whose labels are yes, no, and partial to build the final data set
|
134 |
-
data_merged = data_no.merge(data_yes, how='outer')
|
135 |
-
data = data_merged.merge(data_partial, how='outer')
|
136 |
-
print('data.shape=' + str(data.shape) + ' merged decisions whose labels are yes, no, and partial')
|
137 |
-
# Removing decision_description and decision_labels whose values are -1 and -2
|
138 |
-
indexNames = data[(data['decision_description'] == str(-1)) | (data['decision_description'] == str(-2)) | (
|
139 |
-
data['decision_label'] == str(-1)) | (data['decision_label'] == str(-2))].index
|
140 |
-
data.drop(indexNames, inplace=True)
|
141 |
-
print('data.shape=' + str(data.shape) + ' removed -1 and -2 decision descriptions and labels')
|
142 |
-
|
143 |
-
data.to_csv("dataset_processed_original.csv", index=False)
|
144 |
-
|
145 |
-
|
146 |
-
def perform_additional_processing():
|
147 |
-
df = pd.read_csv("dataset_processed_original.csv")
|
148 |
-
|
149 |
-
# remove strange " characters sometimes occurring in the beginning and at the end of a line
|
150 |
-
df.ementa_filepath = df.ementa_filepath.str.replace('^"', '')
|
151 |
-
df.decision_unanimity = df.decision_unanimity.str.replace('"$', '')
|
152 |
-
|
153 |
-
# removing process_type and judgment_date, since they are the same everywhere (-)
|
154 |
-
# decisions only contains 'None', nan and '-2'
|
155 |
-
# ementa_filepath refers to the name of file in the filesystem that we created when we scraped the data from the Court. It is temporary data and can be removed
|
156 |
-
# decision_description = ementa_text - decision_text - decision_unanimity_text
|
157 |
-
df = df.drop(['process_type', 'judgment_date', 'decisions', 'ementa_filepath'], axis=1)
|
158 |
-
|
159 |
-
# some rows are somehow not read correctly. With this, we can filter them
|
160 |
-
df = df[df.decision_text.str.len() > 1]
|
161 |
-
|
162 |
-
# rename "-2" to more descriptive name ==> -2 means, that they were not able to determine it
|
163 |
-
df.decision_unanimity = df.decision_unanimity.replace('-2', 'not_determined')
|
164 |
-
|
165 |
-
# rename cols for more clarity
|
166 |
-
df = df.rename(columns={"decision_unanimity": "unanimity_label"})
|
167 |
-
df = df.rename(columns={"decision_unanimity_text": "unanimity_text"})
|
168 |
-
df = df.rename(columns={"decision_text": "judgment_text"})
|
169 |
-
df = df.rename(columns={"decision_label": "judgment_label"})
|
170 |
-
|
171 |
-
df.to_csv("dataset_processed_additional.csv", index=False)
|
172 |
-
|
173 |
-
return df
|
174 |
-
|
175 |
-
|
176 |
-
perform_original_preprocessing()
|
177 |
-
df = perform_additional_processing()
|
178 |
-
|
179 |
-
# perform random split 80% train (3234), 10% validation (404), 10% test (405)
|
180 |
-
train, validation, test = np.split(df.sample(frac=1, random_state=42), [int(.8 * len(df)), int(.9 * len(df))])
|
181 |
-
|
182 |
-
|
183 |
-
def save_splits_to_jsonl(config_name):
|
184 |
-
# save to jsonl files for huggingface
|
185 |
-
if config_name: os.makedirs(config_name, exist_ok=True)
|
186 |
-
train.to_json(os.path.join(config_name, "train.jsonl"), lines=True, orient="records", force_ascii=False)
|
187 |
-
validation.to_json(os.path.join(config_name, "validation.jsonl"), lines=True, orient="records", force_ascii=False)
|
188 |
-
test.to_json(os.path.join(config_name, "test.jsonl"), lines=True, orient="records", force_ascii=False)
|
189 |
-
|
190 |
-
|
191 |
-
def print_split_table_single_label(train, validation, test, label_name):
|
192 |
-
train_counts = train[label_name].value_counts().to_frame().rename(columns={label_name: "train"})
|
193 |
-
validation_counts = validation[label_name].value_counts().to_frame().rename(columns={label_name: "validation"})
|
194 |
-
test_counts = test[label_name].value_counts().to_frame().rename(columns={label_name: "test"})
|
195 |
-
|
196 |
-
table = train_counts.join(validation_counts)
|
197 |
-
table = table.join(test_counts)
|
198 |
-
table[label_name] = table.index
|
199 |
-
total_row = {label_name: "total",
|
200 |
-
"train": len(train.index),
|
201 |
-
"validation": len(validation.index),
|
202 |
-
"test": len(test.index)}
|
203 |
-
table = table.append(total_row, ignore_index=True)
|
204 |
-
table = table[[label_name, "train", "validation", "test"]] # reorder columns
|
205 |
-
print(table.to_markdown(index=False))
|
206 |
-
|
207 |
-
|
208 |
-
save_splits_to_jsonl("")
|
209 |
-
|
210 |
-
print_split_table_single_label(train, validation, test, "judgment_label")
|
211 |
-
print_split_table_single_label(train, validation, test, "unanimity_label")
|
212 |
-
|
213 |
-
# create second config by filtering out rows with unanimity label == not_determined, while keeping the same splits
|
214 |
-
# train = train[train.unanimity_label != "not_determined"]
|
215 |
-
# validation = validation[validation.unanimity_label != "not_determined"]
|
216 |
-
# test = test[test.unanimity_label != "not_determined"]
|
217 |
-
|
218 |
-
|
219 |
-
# it is a very small dataset and very imbalanced (only very few not-unanimity labels)
|
220 |
-
# save_splits_to_jsonl("unanimity")
|
221 |
-
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test.jsonl
DELETED
The diff for this file is too large to render.
See raw diff
|
|
train.jsonl
DELETED
The diff for this file is too large to render.
See raw diff
|
|
validation.jsonl
DELETED
The diff for this file is too large to render.
See raw diff
|
|