KaraKaraWitch commited on
Commit
232f4f2
1 Parent(s): 6bf7ef0

Upload scripts/gutenberg_process.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/gutenberg_process.py +212 -0
scripts/gutenberg_process.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import concurrent.futures as fut
2
+ import pathlib
3
+ import ftfy
4
+ import orjson
5
+ import re
6
+ import psutil
7
+ from bs4 import BeautifulSoup, Comment
8
+ import markdownify
9
+ from text_toolkit import normalize_puncts, clean_newlines, merge_consecutive_lines
10
+
11
+
12
+ class md_nolinks(markdownify.MarkdownConverter):
13
+ def convert_a(self, _, text, __):
14
+ _, _, text = markdownify.chomp(text)
15
+ if not text:
16
+ return ""
17
+ return text
18
+
19
+ def convert_img(self, el, text, convert_as_inline):
20
+ return ""
21
+
22
+
23
+ md = md_nolinks()
24
+
25
+
26
+ def blackbox(soup: pathlib.Path, filename: pathlib.Path):
27
+ p = psutil.Process()
28
+ p.cpu_affinity([i for i in range(0, 64)])
29
+ path = pathlib.Path(soup)
30
+ data_bytes = path.read_bytes()
31
+ try:
32
+ utf8 = data_bytes.decode("utf-8")
33
+ utf8, explain = ftfy.fix_and_explain(utf8)
34
+ except UnicodeDecodeError:
35
+ # print("Attempt Black magic...")
36
+ magic_data, magic_explain = ftfy.guess_bytes(data_bytes)
37
+ # print(magic_explain)
38
+ utf8, explain = ftfy.fix_and_explain(magic_data)
39
+ # Skip
40
+ soup = BeautifulSoup(utf8, "lxml")
41
+ pre = soup.find("pre")
42
+ [i.decompose() for i in soup.find_all("span", attrs={"class": "pagenum"})]
43
+ [i.extract() for i in soup.findAll(text=lambda text: isinstance(text, Comment))]
44
+ composed_content = []
45
+ if pre:
46
+ # Gutenberg pre html block
47
+ txt: str = pre.get_text()
48
+ # rgx = re.compile(r"\*\*+START\*\*\*+([\S\s]+)\*END\*")
49
+ if "gutenberg e" in txt.lower():
50
+ if not pre.parent:
51
+ raise Exception
52
+ has_pre = False
53
+ for child in pre.next_siblings:
54
+ if isinstance(child, str):
55
+ composed_content.append(child.lstrip())
56
+
57
+ if child.name == "pre":
58
+ # print(child)
59
+ has_pre = True
60
+ break
61
+ if child.name == "table":
62
+ continue
63
+ if child.name is None:
64
+ continue
65
+ child_filt = child.get_text().lower()
66
+ if (
67
+ child.name in ["div", "p"]
68
+ and "transcriber" in child_filt
69
+ and "note" in child_filt
70
+ ):
71
+ continue
72
+ if "pagenum" in child.get("class", ""):
73
+ continue
74
+ composed_content.append(child)
75
+ else:
76
+ # For HR rules.
77
+ look_ahead = 5
78
+ sibs = []
79
+ rule = None
80
+ for sibling in pre.next_siblings:
81
+ if look_ahead > 0:
82
+ if sibling.name == "hr":
83
+ rule = sibling
84
+ break
85
+ else:
86
+ sibs.append(sibling)
87
+ if not rule:
88
+ raise Exception
89
+ else:
90
+ pass
91
+ for t_block in rule.next_siblings:
92
+ if isinstance(t_block, str):
93
+ composed_content.append(t_block.lstrip(" "))
94
+ continue
95
+ if not t_block.get_text().strip():
96
+ continue
97
+
98
+ if t_block.name == "hr" and "full" in t_block.get("class", ""):
99
+ break
100
+ gt = t_block.get_text().lower()
101
+ if t_block.name == "pre" and "gutenberg" in gt:
102
+ break
103
+ elif t_block.name == "p" and "end of" in gt and "gutenberg" in gt:
104
+ break
105
+ else:
106
+ composed_content.append(t_block)
107
+
108
+ else:
109
+ print("no pre", filename)
110
+ # We have to iter the book page it like some sort of caveman...
111
+ body = soup.find("body")
112
+ if not body:
113
+ raise Exception("Invisible man?")
114
+ # ???
115
+ pre_ctx = []
116
+ ctx = []
117
+ post_ctx = []
118
+ mode = 0
119
+ for children in body:
120
+ if isinstance(children, str):
121
+ continue
122
+ if isinstance(children, type(None)):
123
+ continue
124
+ gt = children.get_text()
125
+ if children.name == "table":
126
+ children.decompose()
127
+ if (
128
+ (children.name == "div" or children.name == "p")
129
+ and "start of" in gt.lower()
130
+ and "gutenberg" in gt.lower()
131
+ ):
132
+ mode = 0.5
133
+ pre_ctx.append(children)
134
+ if (
135
+ (children.name == "div" or children.name == "p")
136
+ and "end of" in gt.lower()
137
+ and "gutenberg" in gt.lower()
138
+ and mode == 1
139
+ ):
140
+ mode = 2
141
+ post_ctx.append(children)
142
+ if mode == 0:
143
+ pre_ctx.append(children)
144
+ elif mode == 0.5:
145
+ mode = 1
146
+ pre_ctx.append(children)
147
+ elif mode == 1:
148
+ ctx.append(children)
149
+ elif mode == 2:
150
+ post_ctx.append(children)
151
+ z_ctx = []
152
+ for pp in ctx:
153
+ if pp.name is None:
154
+ continue
155
+ z_ctx.append(pp)
156
+ composed_content = z_ctx
157
+ pure = []
158
+ for content in composed_content:
159
+ if isinstance(content, str):
160
+ pure.append(content)
161
+ continue
162
+ if not content.name:
163
+ continue
164
+ if "toc" in content.get("class", "") and content.name in ["ul", "ol"]:
165
+ continue
166
+ pure.append(content)
167
+ pp = normalize_puncts(
168
+ "\n".join(
169
+ [
170
+ md.convert_soup(soup_content)
171
+ if not isinstance(soup_content, str)
172
+ else soup_content
173
+ for soup_content in pure
174
+ ]
175
+ )
176
+ )
177
+ pp = clean_newlines(pp)
178
+ pp = merge_consecutive_lines(pp)
179
+ pp = clean_newlines(pp)
180
+ pathlib.Path(filename).write_text(pp, encoding="utf-8")
181
+
182
+ return pp
183
+
184
+
185
+ pathlib.Path("gutenberg_processed").mkdir(exist_ok=True)
186
+
187
+
188
+ if __name__ == "__main__":
189
+ with fut.ProcessPoolExecutor(max_workers=64) as pool:
190
+ futures = []
191
+ for gutenbook in orjson.loads(pathlib.Path("final.json").read_bytes()):
192
+ path, f_type = gutenbook
193
+ if f_type == "html":
194
+ # print("processing", path)
195
+ path = pathlib.Path(path)
196
+ list_html = list(path.glob("*.htm"))
197
+ if len(list_html) > 1:
198
+ print(list_html)
199
+ list_html = list_html[0]
200
+
201
+ futures.append(
202
+ pool.submit(
203
+ blackbox,
204
+ list_html,
205
+ str(
206
+ pathlib.Path("gutenberg_processed")
207
+ / list_html.with_suffix(".txt").name
208
+ ),
209
+ )
210
+ )
211
+ print("Waiting for the future to arrive...")
212
+ fut.wait(futures)