xaviergillard
commited on
Commit
•
acddb18
1
Parent(s):
a4cff4d
initial commit
Browse files- README.md +26 -0
- data/doc_by_doc/test.csv +0 -0
- data/doc_by_doc/train.csv +0 -0
- data/doc_by_doc/valid.csv +0 -0
- data/full_corpus.csv +0 -0
- data/line_by_line/test.csv +0 -0
- data/line_by_line/train.csv +0 -0
- data/line_by_line/valid.csv +0 -0
- data/page_by_page/test.csv +0 -0
- data/page_by_page/train.csv +0 -0
- data/page_by_page/valid.csv +0 -0
- todataset.py +179 -0
README.md
CHANGED
@@ -3,6 +3,32 @@ license: bsd
|
|
3 |
language:
|
4 |
- nl
|
5 |
- fr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
---
|
7 |
# Brabant XVII
|
8 |
|
|
|
3 |
language:
|
4 |
- nl
|
5 |
- fr
|
6 |
+
configs:
|
7 |
+
- config_name: line_by_line
|
8 |
+
data_files:
|
9 |
+
- split: train
|
10 |
+
path : "data/line_by_line/train.csv"
|
11 |
+
- split: test
|
12 |
+
path : "data/line_by_line/test.csv"
|
13 |
+
- split: valid
|
14 |
+
path : "data/line_by_line/valid.csv"
|
15 |
+
default: true
|
16 |
+
- config_name: page_by_page
|
17 |
+
data_files:
|
18 |
+
- split: train
|
19 |
+
path : "data/page_by_page/train.csv"
|
20 |
+
- split: test
|
21 |
+
path : "data/page_by_page/test.csv"
|
22 |
+
- split: valid
|
23 |
+
path : "data/page_by_page/valid.csv"
|
24 |
+
- config_name: doc_by_doc
|
25 |
+
data_files:
|
26 |
+
- split: train
|
27 |
+
path : "data/doc_by_doc/train.csv"
|
28 |
+
- split: test
|
29 |
+
path : "data/doc_by_doc/test.csv"
|
30 |
+
- split: valid
|
31 |
+
path : "data/doc_by_doc/valid.csv"
|
32 |
---
|
33 |
# Brabant XVII
|
34 |
|
data/doc_by_doc/test.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/doc_by_doc/train.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/doc_by_doc/valid.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/full_corpus.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/line_by_line/test.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/line_by_line/train.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/line_by_line/valid.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/page_by_page/test.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/page_by_page/train.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/page_by_page/valid.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
todataset.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import os.path as path
|
3 |
+
import unittest
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from tokenizers import Regex, NormalizedString
|
7 |
+
from tokenizers.normalizers import Sequence, NFKC, Replace, BertNormalizer, Strip
|
8 |
+
|
9 |
+
from datasets import Dataset
|
10 |
+
|
11 |
+
class CustomNormalizer:
|
12 |
+
def __init__(self):
|
13 |
+
self.normalization = Sequence([
|
14 |
+
NFKC(),
|
15 |
+
Replace("[", ""), # les annotations sont tellement inconsistantes pour les [] et le () que
|
16 |
+
Replace("(", ""), # que je pense qu'il vaut mieux tout virer et ls considérer tous pareil
|
17 |
+
Replace("]", ""),
|
18 |
+
Replace(")", ""),
|
19 |
+
Replace("\r\n", ""),
|
20 |
+
Replace(Regex(r"\s{2,}"), " "),
|
21 |
+
BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True),
|
22 |
+
Strip()
|
23 |
+
])
|
24 |
+
|
25 |
+
def normalize(self, text: NormalizedString):
|
26 |
+
return self.normalization.normalize(text)
|
27 |
+
|
28 |
+
def normalize_str(self, sequence: str):
|
29 |
+
return self.normalization.normalize_str(sequence)
|
30 |
+
|
31 |
+
|
32 |
+
class TestNormalizer(unittest.TestCase):
|
33 |
+
def test_it_normalizes_in_place(self):
|
34 |
+
norm = CustomNormalizer()
|
35 |
+
actual = NormalizedString("coucou[[[xavier")
|
36 |
+
norm.normalize(actual)
|
37 |
+
expected = "coucouxavier"
|
38 |
+
self.assertEqual(actual.normalized, expected)
|
39 |
+
|
40 |
+
def test_it_removes_opening_bracket(self):
|
41 |
+
norm = CustomNormalizer()
|
42 |
+
actual = norm.normalize_str("coucou[[[xavier")
|
43 |
+
expected = "coucouxavier"
|
44 |
+
self.assertEqual(actual, expected)
|
45 |
+
|
46 |
+
def test_it_removes_closing_bracket(self):
|
47 |
+
norm = CustomNormalizer()
|
48 |
+
actual = norm.normalize_str("coucou]]]xavier")
|
49 |
+
expected = "coucouxavier"
|
50 |
+
self.assertEqual(actual, expected)
|
51 |
+
|
52 |
+
def test_it_removes_both_brackets(self):
|
53 |
+
norm = CustomNormalizer()
|
54 |
+
actual = norm.normalize_str("coucou[[[-]]]xavier")
|
55 |
+
expected = "coucou-xavier"
|
56 |
+
self.assertEqual(actual, expected)
|
57 |
+
|
58 |
+
def test_it_lowertcases_text(self):
|
59 |
+
norm = CustomNormalizer()
|
60 |
+
actual = norm.normalize_str("CouCou Xavier")
|
61 |
+
expected = "coucou xavier"
|
62 |
+
self.assertEqual(actual, expected)
|
63 |
+
|
64 |
+
def test_it_replaces_long_sequences_of_whitespaces(self):
|
65 |
+
norm = CustomNormalizer()
|
66 |
+
actual = norm.normalize_str("coucou xavier")
|
67 |
+
expected = "coucou xavier"
|
68 |
+
self.assertEqual(actual, expected)
|
69 |
+
#
|
70 |
+
actual = norm.normalize_str("coucou xavier")
|
71 |
+
expected = "coucou xavier"
|
72 |
+
self.assertEqual(actual, expected)
|
73 |
+
#
|
74 |
+
actual = norm.normalize_str("coucou \txavier")
|
75 |
+
expected = "coucou xavier"
|
76 |
+
self.assertEqual(actual, expected)
|
77 |
+
|
78 |
+
def test_it_normalizes_funky_chars(self):
|
79 |
+
norm = CustomNormalizer()
|
80 |
+
actual = norm.normalize_str("coucou xavier")
|
81 |
+
expected = "coucou xavier"
|
82 |
+
self.assertEqual(actual, expected)
|
83 |
+
#
|
84 |
+
actual = norm.normalize_str("coucou xavier")
|
85 |
+
expected = "coucou xavier"
|
86 |
+
self.assertEqual(actual, expected)
|
87 |
+
#
|
88 |
+
actual = norm.normalize_str("coucou \txavier")
|
89 |
+
expected = "coucou xavier"
|
90 |
+
self.assertEqual(actual, expected)
|
91 |
+
|
92 |
+
|
93 |
+
def file_to_pandas(project, fname):
|
94 |
+
norm = CustomNormalizer()
|
95 |
+
file_id = path.basename(fname)
|
96 |
+
page_id = 0
|
97 |
+
line_id = 0
|
98 |
+
df_dict = {
|
99 |
+
"project": [],
|
100 |
+
"file_id": [],
|
101 |
+
"page_id": [],
|
102 |
+
"line_id": [],
|
103 |
+
"text" : []
|
104 |
+
}
|
105 |
+
#
|
106 |
+
with open(fname, encoding="utf8") as f:
|
107 |
+
for line in f.readlines():
|
108 |
+
line = NormalizedString(line)
|
109 |
+
norm.normalize(line)
|
110 |
+
line = line.normalized
|
111 |
+
#
|
112 |
+
if not line: # emptystring is seen as false
|
113 |
+
pass
|
114 |
+
elif line == "###":
|
115 |
+
page_id += 1
|
116 |
+
else:
|
117 |
+
line_id += 1
|
118 |
+
df_dict["project"].append(project)
|
119 |
+
df_dict["file_id"].append(file_id)
|
120 |
+
df_dict["page_id"].append(page_id)
|
121 |
+
df_dict["line_id"].append(line_id)
|
122 |
+
df_dict["text"] .append(line)
|
123 |
+
#
|
124 |
+
return pd.DataFrame.from_dict(df_dict)
|
125 |
+
|
126 |
+
def text(df):
|
127 |
+
return "\n".join(df["text"].to_list())
|
128 |
+
|
129 |
+
def documents_text(df: pd.DataFrame):
|
130 |
+
'''
|
131 |
+
This function returns a dataframe equivalent to the original 'df'. However, the returned dataframe
|
132 |
+
loses the page and line information so as to retrieve the complete text of each documents pertaining
|
133 |
+
to the dataset
|
134 |
+
|
135 |
+
# Params
|
136 |
+
- df is a pandas dataframe
|
137 |
+
'''
|
138 |
+
groups = df.groupby(["project", "file_id"])
|
139 |
+
data = [(project, file_id, text(groups.get_group((project, file_id)).sort_values(["page_id", "line_id"]))) for (project, file_id),_v in groups ]
|
140 |
+
return pd.DataFrame.from_records(data, columns=["project", "file_id", "text"])
|
141 |
+
|
142 |
+
def pages_text(df: pd.DataFrame):
|
143 |
+
'''
|
144 |
+
This function returns a dataframe equivalent to the original 'df'. However, the returned dataframe
|
145 |
+
loses the line information so as to retrieve the complete text of each page of processed data.s
|
146 |
+
|
147 |
+
# Params
|
148 |
+
- df is a pandas dataframe
|
149 |
+
'''
|
150 |
+
groups = df.groupby(["project", "file_id", "page_id"])
|
151 |
+
data = [(project, file_id, page_id, text(groups.get_group((project, file_id, page_id)).sort_values("line_id"))) for (project, file_id, page_id),_v in groups ]
|
152 |
+
return pd.DataFrame.from_records(data, columns=["project", "file_id","page_id", "text"])
|
153 |
+
|
154 |
+
def prepare(df: pd.DataFrame, config_name: str):
|
155 |
+
ds = Dataset.from_pandas(df)
|
156 |
+
splits = ds.train_test_split(train_size=0.8, shuffle=True)
|
157 |
+
train = splits["train"]
|
158 |
+
test = splits["test"]
|
159 |
+
splits = train.train_test_split(train_size=0.8)
|
160 |
+
train = splits["train"]
|
161 |
+
valid = splits["test"]
|
162 |
+
train.to_csv("data/{config}/train.csv".format(config=config_name))
|
163 |
+
test .to_csv("data/{config}/test.csv".format(config=config_name))
|
164 |
+
valid.to_csv("data/{config}/valid.csv".format(config=config_name))
|
165 |
+
|
166 |
+
if __name__ == '__main__':
|
167 |
+
#pth= r'C:\Users\xavier.gillard\Documents\ARKEY\data'
|
168 |
+
#prj= "pardons"
|
169 |
+
#df = pd.concat([file_to_pandas(prj, path.join(pth, f)) for f in os.listdir(path=pth)], axis=0, ignore_index=True)
|
170 |
+
#df.to_csv("data/full_corpus.csv", index=False)
|
171 |
+
|
172 |
+
df = pd.read_csv("data/full_corpus.csv")
|
173 |
+
lines = df[["text"]]
|
174 |
+
docs = documents_text(df)[["text"]]
|
175 |
+
pages = pages_text(df)[["text"]]
|
176 |
+
|
177 |
+
prepare(lines, "line_by_line")
|
178 |
+
prepare(pages, "page_by_page")
|
179 |
+
prepare(docs, "doc_by_doc")
|