File size: 6,956 Bytes
acddb18 690818b acddb18 690818b acddb18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 |
import os
import os.path as path
import unittest
import pandas as pd
from tokenizers import Regex, NormalizedString
from tokenizers.normalizers import Sequence, NFKC, Replace, BertNormalizer, Strip
from datasets import Dataset
class CustomNormalizer:
def __init__(self):
self.normalization = Sequence([
NFKC(),
Replace("[", ""), # les annotations sont tellement inconsistantes pour les [] et le () que
Replace("(", ""), # que je pense qu'il vaut mieux tout virer et ls considérer tous pareil
Replace("]", ""),
Replace(")", ""),
Replace("\r\n", ""),
Replace(Regex(r"\s{2,}"), " "),
BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True),
Strip()
])
def normalize(self, text: NormalizedString):
return self.normalization.normalize(text)
def normalize_str(self, sequence: str):
return self.normalization.normalize_str(sequence)
class TestNormalizer(unittest.TestCase):
def test_it_normalizes_in_place(self):
norm = CustomNormalizer()
actual = NormalizedString("coucou[[[xavier")
norm.normalize(actual)
expected = "coucouxavier"
self.assertEqual(actual.normalized, expected)
def test_it_removes_opening_bracket(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou[[[xavier")
expected = "coucouxavier"
self.assertEqual(actual, expected)
def test_it_removes_closing_bracket(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou]]]xavier")
expected = "coucouxavier"
self.assertEqual(actual, expected)
def test_it_removes_both_brackets(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou[[[-]]]xavier")
expected = "coucou-xavier"
self.assertEqual(actual, expected)
def test_it_lowertcases_text(self):
norm = CustomNormalizer()
actual = norm.normalize_str("CouCou Xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
def test_it_replaces_long_sequences_of_whitespaces(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou \txavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
def test_it_normalizes_funky_chars(self):
norm = CustomNormalizer()
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou xavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
#
actual = norm.normalize_str("coucou \txavier")
expected = "coucou xavier"
self.assertEqual(actual, expected)
def file_to_pandas(project, fname):
norm = CustomNormalizer()
file_id = path.basename(fname)
page_id = 0
line_id = 0
df_dict = {
"project": [],
"file_id": [],
"page_id": [],
"line_id": [],
"text" : []
}
#
with open(fname, encoding="utf8") as f:
for line in f.readlines():
line = NormalizedString(line)
norm.normalize(line)
line = line.normalized
#
if not line: # emptystring is seen as false
pass
elif line == "###":
page_id += 1
else:
line_id += 1
df_dict["project"].append(project)
df_dict["file_id"].append(file_id)
df_dict["page_id"].append(page_id)
df_dict["line_id"].append(line_id)
df_dict["text"] .append(line)
#
return pd.DataFrame.from_dict(df_dict)
def text(df):
return "\n".join(df["text"].to_list())
def documents_text(df: pd.DataFrame):
'''
This function returns a dataframe equivalent to the original 'df'. However, the returned dataframe
loses the page and line information so as to retrieve the complete text of each documents pertaining
to the dataset
# Params
- df is a pandas dataframe
'''
groups = df.groupby(["project", "file_id"])
data = [(project, file_id, text(groups.get_group((project, file_id)).sort_values(["page_id", "line_id"]))) for (project, file_id),_v in groups ]
return pd.DataFrame.from_records(data, columns=["project", "file_id", "text"])
def pages_text(df: pd.DataFrame):
'''
This function returns a dataframe equivalent to the original 'df'. However, the returned dataframe
loses the line information so as to retrieve the complete text of each page of processed data.s
# Params
- df is a pandas dataframe
'''
groups = df.groupby(["project", "file_id", "page_id"])
data = [(project, file_id, page_id, text(groups.get_group((project, file_id, page_id)).sort_values("line_id"))) for (project, file_id, page_id),_v in groups ]
return pd.DataFrame.from_records(data, columns=["project", "file_id","page_id", "text"])
def prepare(df: pd.DataFrame, config_name: str):
ds = Dataset.from_pandas(df)
splits = ds.train_test_split(train_size=0.8, shuffle=True)
train = splits["train"]
test = splits["test"]
splits = train.train_test_split(train_size=0.8)
train = splits["train"]
valid = splits["test"]
train.to_csv("data/{config}/train.csv".format(config=config_name))
test .to_csv("data/{config}/test.csv".format(config=config_name))
valid.to_csv("data/{config}/valid.csv".format(config=config_name))
if __name__ == '__main__':
pth= r'C:\Users\xavier.gillard\Documents\ARKEY\data'
prj= "pardons"
df1 = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
pth = r'C:\Users\xavier.gillard\Documents\REPO\vonnis'
prj = "sentences"
df2 = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
df = pd.concat([df1, df2], axis=0, ignore_index=True)
df.to_csv("data/full_corpus.csv", index=False)
#df = pd.read_csv("data/full_corpus.csv")
lines = df[["project","file_id","page_id","line_id","text"]]
pages = pages_text(df)[["project","file_id","page_id","text"]]
docs = documents_text(df)[["project","file_id","text"]]
prepare(lines, "line_by_line")
prepare(pages, "page_by_page")
prepare(docs, "doc_by_doc") |