ceyhunemreozturk
commited on
Commit
•
814d3d1
1
Parent(s):
7aa68c7
Upload convert_to_hf_dataset.py
Browse files- convert_to_hf_dataset.py +1 -123
convert_to_hf_dataset.py
CHANGED
@@ -96,126 +96,4 @@ def prepare_data():
|
|
96 |
train_split, val_split, test_split = prepare_data()
|
97 |
train_split.to_json(os.path.join("train.jsonl"), lines=True, orient="records", force_ascii=False)
|
98 |
val_split.to_json(os.path.join("validation.jsonl"), lines=True, orient="records", force_ascii=False)
|
99 |
-
test_split.to_json(os.path.join("test.jsonl"), lines=True, orient="records", force_ascii=False)
|
100 |
-
|
101 |
-
'''
|
102 |
-
pd.set_option('display.max_colwidth', None)
|
103 |
-
pd.set_option('display.max_columns', None)
|
104 |
-
|
105 |
-
|
106 |
-
def perform_original_preprocessing():
|
107 |
-
# Original Preprocessing from: https://github.com/lagefreitas/predicting-brazilian-court-decisions/blob/main/predicting-brazilian-court-decisions.py#L81
|
108 |
-
# Loading the labeled decisions
|
109 |
-
data = pd.read_csv("dataset.csv", sep='<=>', header=0)
|
110 |
-
print('data.shape=' + str(data.shape) + ' full data set')
|
111 |
-
# Removing NA values
|
112 |
-
data = data.dropna(subset=[data.columns[9]]) # decision_description
|
113 |
-
data = data.dropna(subset=[data.columns[11]]) # decision_label
|
114 |
-
print('data.shape=' + str(data.shape) + ' dropna')
|
115 |
-
# Removing duplicated samples
|
116 |
-
data = data.drop_duplicates(subset=[data.columns[1]]) # process_number
|
117 |
-
print('data.shape=' + str(data.shape) + ' removed duplicated samples by process_number')
|
118 |
-
data = data.drop_duplicates(subset=[data.columns[9]]) # decision_description
|
119 |
-
print('data.shape=' + str(data.shape) + ' removed duplicated samples by decision_description')
|
120 |
-
# Removing not relevant decision labels and decision not properly labeled
|
121 |
-
data = data.query('decision_label != "conflito-competencia"')
|
122 |
-
print('data.shape=' + str(data.shape) + ' removed decisions labeled as conflito-competencia')
|
123 |
-
data = data.query('decision_label != "prejudicada"')
|
124 |
-
print('data.shape=' + str(data.shape) + ' removed decisions labeled as prejudicada')
|
125 |
-
data = data.query('decision_label != "not-cognized"')
|
126 |
-
print('data.shape=' + str(data.shape) + ' removed decisions labeled as not-cognized')
|
127 |
-
data_no = data.query('decision_label == "no"')
|
128 |
-
print('data_no.shape=' + str(data_no.shape))
|
129 |
-
data_yes = data.query('decision_label == "yes"')
|
130 |
-
print('data_yes.shape=' + str(data_yes.shape))
|
131 |
-
data_partial = data.query('decision_label == "partial"')
|
132 |
-
print('data_partial.shape=' + str(data_partial.shape))
|
133 |
-
# Merging decisions whose labels are yes, no, and partial to build the final data set
|
134 |
-
data_merged = data_no.merge(data_yes, how='outer')
|
135 |
-
data = data_merged.merge(data_partial, how='outer')
|
136 |
-
print('data.shape=' + str(data.shape) + ' merged decisions whose labels are yes, no, and partial')
|
137 |
-
# Removing decision_description and decision_labels whose values are -1 and -2
|
138 |
-
indexNames = data[(data['decision_description'] == str(-1)) | (data['decision_description'] == str(-2)) | (
|
139 |
-
data['decision_label'] == str(-1)) | (data['decision_label'] == str(-2))].index
|
140 |
-
data.drop(indexNames, inplace=True)
|
141 |
-
print('data.shape=' + str(data.shape) + ' removed -1 and -2 decision descriptions and labels')
|
142 |
-
|
143 |
-
data.to_csv("dataset_processed_original.csv", index=False)
|
144 |
-
|
145 |
-
|
146 |
-
def perform_additional_processing():
|
147 |
-
df = pd.read_csv("dataset_processed_original.csv")
|
148 |
-
|
149 |
-
# remove strange " characters sometimes occurring in the beginning and at the end of a line
|
150 |
-
df.ementa_filepath = df.ementa_filepath.str.replace('^"', '')
|
151 |
-
df.decision_unanimity = df.decision_unanimity.str.replace('"$', '')
|
152 |
-
|
153 |
-
# removing process_type and judgment_date, since they are the same everywhere (-)
|
154 |
-
# decisions only contains 'None', nan and '-2'
|
155 |
-
# ementa_filepath refers to the name of file in the filesystem that we created when we scraped the data from the Court. It is temporary data and can be removed
|
156 |
-
# decision_description = ementa_text - decision_text - decision_unanimity_text
|
157 |
-
df = df.drop(['process_type', 'judgment_date', 'decisions', 'ementa_filepath'], axis=1)
|
158 |
-
|
159 |
-
# some rows are somehow not read correctly. With this, we can filter them
|
160 |
-
df = df[df.decision_text.str.len() > 1]
|
161 |
-
|
162 |
-
# rename "-2" to more descriptive name ==> -2 means, that they were not able to determine it
|
163 |
-
df.decision_unanimity = df.decision_unanimity.replace('-2', 'not_determined')
|
164 |
-
|
165 |
-
# rename cols for more clarity
|
166 |
-
df = df.rename(columns={"decision_unanimity": "unanimity_label"})
|
167 |
-
df = df.rename(columns={"decision_unanimity_text": "unanimity_text"})
|
168 |
-
df = df.rename(columns={"decision_text": "judgment_text"})
|
169 |
-
df = df.rename(columns={"decision_label": "judgment_label"})
|
170 |
-
|
171 |
-
df.to_csv("dataset_processed_additional.csv", index=False)
|
172 |
-
|
173 |
-
return df
|
174 |
-
|
175 |
-
|
176 |
-
perform_original_preprocessing()
|
177 |
-
df = perform_additional_processing()
|
178 |
-
|
179 |
-
# perform random split 80% train (3234), 10% validation (404), 10% test (405)
|
180 |
-
train, validation, test = np.split(df.sample(frac=1, random_state=42), [int(.8 * len(df)), int(.9 * len(df))])
|
181 |
-
|
182 |
-
|
183 |
-
def save_splits_to_jsonl(config_name):
|
184 |
-
# save to jsonl files for huggingface
|
185 |
-
if config_name: os.makedirs(config_name, exist_ok=True)
|
186 |
-
train.to_json(os.path.join(config_name, "train.jsonl"), lines=True, orient="records", force_ascii=False)
|
187 |
-
validation.to_json(os.path.join(config_name, "validation.jsonl"), lines=True, orient="records", force_ascii=False)
|
188 |
-
test.to_json(os.path.join(config_name, "test.jsonl"), lines=True, orient="records", force_ascii=False)
|
189 |
-
|
190 |
-
|
191 |
-
def print_split_table_single_label(train, validation, test, label_name):
|
192 |
-
train_counts = train[label_name].value_counts().to_frame().rename(columns={label_name: "train"})
|
193 |
-
validation_counts = validation[label_name].value_counts().to_frame().rename(columns={label_name: "validation"})
|
194 |
-
test_counts = test[label_name].value_counts().to_frame().rename(columns={label_name: "test"})
|
195 |
-
|
196 |
-
table = train_counts.join(validation_counts)
|
197 |
-
table = table.join(test_counts)
|
198 |
-
table[label_name] = table.index
|
199 |
-
total_row = {label_name: "total",
|
200 |
-
"train": len(train.index),
|
201 |
-
"validation": len(validation.index),
|
202 |
-
"test": len(test.index)}
|
203 |
-
table = table.append(total_row, ignore_index=True)
|
204 |
-
table = table[[label_name, "train", "validation", "test"]] # reorder columns
|
205 |
-
print(table.to_markdown(index=False))
|
206 |
-
|
207 |
-
|
208 |
-
save_splits_to_jsonl("")
|
209 |
-
|
210 |
-
print_split_table_single_label(train, validation, test, "judgment_label")
|
211 |
-
print_split_table_single_label(train, validation, test, "unanimity_label")
|
212 |
-
|
213 |
-
# create second config by filtering out rows with unanimity label == not_determined, while keeping the same splits
|
214 |
-
# train = train[train.unanimity_label != "not_determined"]
|
215 |
-
# validation = validation[validation.unanimity_label != "not_determined"]
|
216 |
-
# test = test[test.unanimity_label != "not_determined"]
|
217 |
-
|
218 |
-
|
219 |
-
# it is a very small dataset and very imbalanced (only very few not-unanimity labels)
|
220 |
-
# save_splits_to_jsonl("unanimity")
|
221 |
-
'''
|
|
|
96 |
train_split, val_split, test_split = prepare_data()
|
97 |
train_split.to_json(os.path.join("train.jsonl"), lines=True, orient="records", force_ascii=False)
|
98 |
val_split.to_json(os.path.join("validation.jsonl"), lines=True, orient="records", force_ascii=False)
|
99 |
+
test_split.to_json(os.path.join("test.jsonl"), lines=True, orient="records", force_ascii=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|