Datasets:

Modalities:
Tabular
Text
Formats:
csv
DOI:
Libraries:
Datasets
pandas
License:
brabant-xvii / todataset.py
xaviergillard's picture
actually includes sentences (vonnis) in addition to pardons
690818b
import os
import os.path as path
import unittest
import pandas as pd
from tokenizers import Regex, NormalizedString
from tokenizers.normalizers import Sequence, NFKC, Replace, BertNormalizer, Strip
from datasets import Dataset
class CustomNormalizer:
def __init__(self):
self.normalization = Sequence([
NFKC(),
Replace("[", ""), # les annotations sont tellement inconsistantes pour les [] et le () que
Replace("(", ""), # que je pense qu'il vaut mieux tout virer et ls considérer tous pareil
Replace("]", ""),
Replace(")", ""),
Replace("\r\n", ""),
Replace(Regex(r"\s{2,}"), " "),
BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True),
Strip()
])
def normalize(self, text: NormalizedString):
return self.normalization.normalize(text)
def normalize_str(self, sequence: str):
return self.normalization.normalize_str(sequence)
class TestNormalizer(unittest.TestCase):
def test_it_normalizes_in_place(self):
norm = CustomNormalizer()
actual = NormalizedString("coucou[[[xavier")
norm.normalize(actual)
expected = "coucouxavier"
self.assertEqual(actual.normalized, expected)
def test_it_removes_opening_bracket(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou[[[xavier")
expected = "coucouxavier"
self.assertEqual(actual, expected)
def test_it_removes_closing_bracket(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou]]]xavier")
expected = "coucouxavier"
self.assertEqual(actual, expected)
def test_it_removes_both_brackets(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou[[[-]]]xavier")
expected = "coucou-xavier"
self.assertEqual(actual, expected)
def test_it_lowertcases_text(self):
norm = CustomNormalizer()
actual = norm.normalize_str("CouCou Xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
def test_it_replaces_long_sequences_of_whitespaces(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou \txavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
def test_it_normalizes_funky_chars(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou \txavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
def file_to_pandas(project, fname):
norm = CustomNormalizer()
file_id = path.basename(fname)
page_id = 0
line_id = 0
df_dict = {
"project": [],
"file_id": [],
"page_id": [],
"line_id": [],
"text" : []
}
#
with open(fname, encoding="utf8") as f:
for line in f.readlines():
line = NormalizedString(line)
norm.normalize(line)
line = line.normalized
#
if not line: # emptystring is seen as false
pass
elif line == "###":
page_id += 1
else:
line_id += 1
df_dict["project"].append(project)
df_dict["file_id"].append(file_id)
df_dict["page_id"].append(page_id)
df_dict["line_id"].append(line_id)
df_dict["text"] .append(line)
#
return pd.DataFrame.from_dict(df_dict)
def text(df):
return "\n".join(df["text"].to_list())
def documents_text(df: pd.DataFrame):
'''
This function returns a dataframe equivalent to the original 'df'. However, the returned dataframe
loses the page and line information so as to retrieve the complete text of each documents pertaining
to the dataset
# Params
- df is a pandas dataframe
'''
groups = df.groupby(["project", "file_id"])
data = [(project, file_id, text(groups.get_group((project, file_id)).sort_values(["page_id", "line_id"]))) for (project, file_id),_v in groups ]
return pd.DataFrame.from_records(data, columns=["project", "file_id", "text"])
def pages_text(df: pd.DataFrame):
'''
This function returns a dataframe equivalent to the original 'df'. However, the returned dataframe
loses the line information so as to retrieve the complete text of each page of processed data.s
# Params
- df is a pandas dataframe
'''
groups = df.groupby(["project", "file_id", "page_id"])
data = [(project, file_id, page_id, text(groups.get_group((project, file_id, page_id)).sort_values("line_id"))) for (project, file_id, page_id),_v in groups ]
return pd.DataFrame.from_records(data, columns=["project", "file_id","page_id", "text"])
def prepare(df: pd.DataFrame, config_name: str):
ds = Dataset.from_pandas(df)
splits = ds.train_test_split(train_size=0.8, shuffle=True)
train = splits["train"]
test = splits["test"]
splits = train.train_test_split(train_size=0.8)
train = splits["train"]
valid = splits["test"]
train.to_csv("data/{config}/train.csv".format(config=config_name))
test .to_csv("data/{config}/test.csv".format(config=config_name))
valid.to_csv("data/{config}/valid.csv".format(config=config_name))
if __name__ == '__main__':
pth= r'C:\Users\xavier.gillard\Documents\ARKEY\data'
prj= "pardons"
df1 = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
pth = r'C:\Users\xavier.gillard\Documents\REPO\vonnis'
prj = "sentences"
df2 = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
df = pd.concat([df1, df2], axis=0, ignore_index=True)
df.to_csv("data/full_corpus.csv", index=False)
#df = pd.read_csv("data/full_corpus.csv")
lines = df[["project","file_id","page_id","line_id","text"]]
pages = pages_text(df)[["project","file_id","page_id","text"]]
docs = documents_text(df)[["project","file_id","text"]]
prepare(lines, "line_by_line")
prepare(pages, "page_by_page")
prepare(docs, "doc_by_doc")