|
|
|
|
|
""" |
|
Wikipedia Text Extractor: |
|
|
|
This script consists of two main stages, transforming raw HTML source code to testing and filtering lines to retain. |
|
|
|
Required: |
|
pip install beautifulsoup4 langid |
|
|
|
Step 1: /html_wiki/ -> /txt_wiki/ |
|
- Removal of undesired tags and classes. |
|
- Extraction of text content from the HTML code. |
|
|
|
Step 2: /txt_wiki/ -> /txt_wiki_lines/ |
|
- Sentence-by-sentence testing to determine elements for exclusion based on criteria like count of special characters, language detection, etc. |
|
- Language testing is performed on each line and also for every element within parentheses. |
|
|
|
The two stages have been deliberately separated. Additionally, numerous log files are generated to aid in debugging, testing, and refining exclusion parameters. |
|
The goal here is to have a fruitful harvest for some quality text juice, not to hit the lottery jackpot numbers. |
|
|
|
Author : Guillaume Eckendoerffer |
|
Date : 22-09-23 |
|
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ |
|
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr |
|
""" |
|
|
|
import os, re |
|
from bs4 import BeautifulSoup |
|
from langid.langid import LanguageIdentifier, model |
|
identifier = LanguageIdentifier.from_modelstring(model, norm_probs=True) |
|
|
|
MAX_LENGTH = int(330) |
|
MAX_SENTENCE_LENGTH = int(350) |
|
|
|
ALLOWED_LANGUAGE = ['fr'] |
|
|
|
MAX_NUM_COUNT = 26 |
|
MAX_PLUS_SIGN_COUNT = 5 |
|
MAX_EQUALS_SIGN_COUNT = 5 |
|
|
|
MAX_DOUBLE_QUOTE_COUNT = 18 |
|
MAX_PARENTHESIS_COUNT = 14 |
|
MAX_BRACKET_COUNT = 12 |
|
MAX_COMMA_COUNT = 40 |
|
MAX_DOLLAR_COUNT = 5 |
|
|
|
LONG_WORD_CHARS = 29 |
|
MAXIMUM_LONG_WORDS = 3 |
|
|
|
CLASSES_TO_REMOVE = ['bandeau-container', 'bandeau-section', 'metadata', 'bandeau-niveau-information', 'gallery', 'infobox_v3'] |
|
|
|
TAG_TO_REMOVE = ['nav', 'menu', 'ul', 'ol', 'table', 'h1', 'h2', 'h3', 'h4', 'h5'] |
|
|
|
PATH = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/') |
|
HTML_PATH = PATH + '/sources/html_wiki/' |
|
TARGET_LONG = PATH + "/excluded_long.txt" |
|
TARGET_LANG = PATH + "/excluded_lang.txt" |
|
TARGET_MATH = PATH + "/excluded_math.txt" |
|
TARGET_CHARS = PATH + "/excluded_chars.txt" |
|
TARGET_PARENTH = PATH + "/excluded_parentheses.txt" |
|
TARGET_LATEX = PATH + "/excluded_latex.txt" |
|
FILES = [f for f in os.listdir(HTML_PATH) if os.path.isfile(os.path.join(HTML_PATH, f))] |
|
|
|
with open(TARGET_LONG, 'w', encoding="utf8") as f: |
|
f.write("") |
|
with open(TARGET_LANG, 'w', encoding="utf8") as f: |
|
f.write("") |
|
with open(TARGET_MATH, 'w', encoding="utf8") as f: |
|
f.write("") |
|
with open(TARGET_CHARS, 'w', encoding="utf8") as f: |
|
f.write("") |
|
with open(TARGET_PARENTH, 'w', encoding="utf8") as f: |
|
f.write("") |
|
with open(TARGET_LATEX, 'w', encoding="utf8") as f: |
|
f.write("") |
|
|
|
def extract_wikipedia_text(html_path, txt_path): |
|
""" Step 1: Extraction of text content from the HTML code """ |
|
with open(html_path, "r", encoding="utf-8") as f: |
|
wiki_content = f.read() |
|
soup = BeautifulSoup(wiki_content, 'html.parser') |
|
|
|
|
|
for tag in soup.find_all(TAG_TO_REMOVE): |
|
tag.decompose() |
|
|
|
|
|
for class_name in CLASSES_TO_REMOVE: |
|
for div in soup.find_all("div", class_=class_name): |
|
div.decompose() |
|
|
|
|
|
text = soup.get_text() |
|
text = text.replace(chr(8217), "'") |
|
text = text.replace("`", "'") |
|
text = text.replace("‘", "'") |
|
text = re.sub(r'\[\d+\]', ' ', text) |
|
text = re.sub(r'\{[^\}]*\}', ' ', text) |
|
text = re.sub(r'\[[^\}]*\]', ' ', text) |
|
|
|
with open(txt_path, "w", encoding="utf-8") as f: |
|
f.write(text) |
|
return len(text) |
|
|
|
def split_into_sentences(text): |
|
sentences = re.split(r'([.;!?]\s*\u00BB|[.;!?\u00BB]\s*(?!\u00BB)|\s*--\s*)', text) |
|
sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])] |
|
return sentences |
|
|
|
def text_standardize(text): |
|
text = text.replace('—', '-') |
|
text = text.replace('–', '-') |
|
text = text.replace('―', '-') |
|
text = text.replace('…', '...') |
|
text = re.sub('''(~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text) |
|
text = re.sub('\s*\n\s*', ' \n ', text) |
|
text = re.sub('[^\S\n]+', ' ', text) |
|
text = re.sub(r"\s{2,}", " ", text) |
|
return text.strip() |
|
|
|
def has_repeated_uniq_char(text): |
|
pattern = r'([a-zA-Z0-9] ){5,}' |
|
return bool(re.search(pattern, text)) |
|
|
|
def countLongText(text): |
|
nb_long = 0 |
|
t = text.split() |
|
for w in t: |
|
if len(w) > LONG_WORD_CHARS: |
|
if w.count('-')!= 0: |
|
nb_long += 1 |
|
return nb_long |
|
|
|
def remove_latex(text): |
|
sp_chars = 0 |
|
sp_chars += text.count('(') |
|
sp_chars += text.count(')') |
|
sp_chars += text.count('{') |
|
sp_chars += text.count('}') |
|
sp_chars += text.count('_') |
|
sp_chars += text.count('/') |
|
sp_chars += text.count(' ') |
|
if sp_chars > (len(text)/2) and len(text) > 4: |
|
text = '' |
|
return text |
|
|
|
def extract_parentheses(text): |
|
stack = [] |
|
results = [] |
|
for i, char in enumerate(text): |
|
if char == '(': |
|
stack.append(i) |
|
elif char == ')' and stack: |
|
start = stack.pop() |
|
results.append((start, i)) |
|
return results |
|
|
|
def is_date_or_year_range(content): |
|
return bool(re.match(r'^\d{4}(-\d{4})?$', content.strip())) |
|
|
|
def remove_language_in_parentheses(line, target_file_parentheses): |
|
for start, end in reversed(extract_parentheses(line)): |
|
match = line[start+1:end] |
|
if is_date_or_year_range(match): |
|
continue |
|
lang = identifier.classify(match) |
|
if lang[0] not in ALLOWED_LANGUAGE: |
|
line = line[:start] + line[end+1:] |
|
target_file_parentheses.write(f'({match})' + "\n") |
|
return line |
|
|
|
def test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses): |
|
nb_words_line = len(line_add.split()) |
|
if countLongText(line_add.strip()) > MAXIMUM_LONG_WORDS \ |
|
or len( re.findall( r'\d+ ', line_add.strip() ) ) > MAX_NUM_COUNT \ |
|
or line_add.count('=') > MAX_EQUALS_SIGN_COUNT or line_add.count('+') > MAX_PLUS_SIGN_COUNT: |
|
target_file_math.write(f"{line_add.strip()} \n") |
|
elif line_add.count('"') > MAX_DOUBLE_QUOTE_COUNT or line_add.count('(') > MAX_PARENTHESIS_COUNT \ |
|
or line_add.count('[') > MAX_BRACKET_COUNT \ |
|
or line_add.count(',') > MAX_COMMA_COUNT \ |
|
or line_add.count('$') > MAX_DOLLAR_COUNT: |
|
target_file_chars.write(f"{line_add.strip()} \n") |
|
else: |
|
lang = identifier.classify(line_add) |
|
if lang[0] not in ALLOWED_LANGUAGE: |
|
target_file_lang.write(f"[{lang[0]}] {line_add.strip()} \n") |
|
else: |
|
if len(line_add.split()) > MAX_SENTENCE_LENGTH or has_repeated_uniq_char(line_add.strip()): |
|
target_file_long.write(f"[{nb_words_line}] {line_add.strip()} \n") |
|
else: |
|
line_add = re.sub(r"\s{2,}", " ",remove_language_in_parentheses(line_add.strip(), target_file_parentheses)) |
|
target_file.write(f"{line_add} \n") |
|
|
|
def test_line(full_path, full_target_path): |
|
""" Step 2: Exclusion based on criteria """ |
|
nb_words_line = 0 |
|
line_add = "" |
|
|
|
with open(full_target_path, 'w', encoding="utf8") as f: |
|
f.write("") |
|
|
|
with open(full_path, "r", encoding="utf8", errors="ignore") as source_file, \ |
|
open(full_target_path, "a", encoding="utf8") as target_file, \ |
|
open(TARGET_LONG, "a", encoding="utf8") as target_file_long, \ |
|
open(TARGET_LANG, "a", encoding="utf8") as target_file_lang, \ |
|
open(TARGET_MATH, "a", encoding="utf8") as target_file_math, \ |
|
open(TARGET_CHARS, "a", encoding="utf8") as target_file_chars, \ |
|
open(TARGET_PARENTH, "a", encoding="utf8") as target_file_parentheses, \ |
|
open(TARGET_LATEX, "a", encoding="utf8") as target_file_latex: |
|
|
|
for line in source_file: |
|
line = '' if line.count('Articles détaillés :') else line.strip() |
|
line = line.replace("Un article de Wikipédia, l'encyclopédie libre.", "") |
|
if line.count('Ce document provient de') \ |
|
or line.count('https://') \ |
|
or line.count('wikipedia.') \ |
|
or line.count('index.') \ |
|
or line.count('php?') \ |
|
or line.count('title='): |
|
line='' |
|
|
|
line = re.sub(r',\s,', ' ', line) |
|
line = re.sub(r'\.\s\.', '.', line) |
|
line = re.sub(r',\s\.', '.', line) |
|
|
|
sentences = split_into_sentences(re.sub(r"\s{2,}", " ", line)) |
|
for sentence in sentences: |
|
if remove_latex(text_standardize(sentence)) =='': |
|
target_file_latex.write(f"{sentence.strip()} \n") |
|
sentence ='' |
|
words = len(sentence.split()) |
|
if len(line_add.split()) + words < MAX_LENGTH: |
|
nb_words_line += words |
|
line_add += f" {text_standardize(sentence)}" |
|
else: |
|
test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses) |
|
nb_words_line = len(sentence.split()) |
|
line_add = f" {text_standardize(sentence)}" |
|
|
|
if nb_words_line: |
|
test_exclude(line_add, target_file_math, target_file_chars, target_file_lang, target_file_long, target_file, target_file_parentheses) |
|
|
|
|
|
for i, file in enumerate(FILES): |
|
html_path = HTML_PATH + file |
|
txt_path = html_path.replace('html_wiki', 'txt_wiki') |
|
txt_len = extract_wikipedia_text(html_path, txt_path) |
|
txt_lines_path = html_path.replace('html_wiki', 'txt_wiki_lines') |
|
test_line(txt_path, txt_lines_path) |
|
print(f"({i+1}/{len(FILES)}) {file} {txt_len}") |
|
|