|
|
|
|
|
import bz2 |
|
import json |
|
import xml.etree.cElementTree as etree |
|
import re |
|
import sqlite3 |
|
from urllib.parse import quote |
|
import mwparserfromhell |
|
import requests |
|
import os |
|
import coremltools as ct |
|
from transformers import AutoTokenizer |
|
import numpy as np |
|
from usearch.index import Index, Matches |
|
import multiprocessing |
|
from multiprocessing import Pool |
|
|
|
def download_wiki_dump(lang, date): |
|
"""Download the Wikipedia dump file.""" |
|
local_filename = f"{lang}wiki-{date}-dump.xml.bz2" |
|
if os.path.isfile(local_filename): |
|
print(f"Using existing file: {local_filename}") |
|
return local_filename |
|
|
|
base_url = f"https://dumps.wikimedia.org/{lang}wiki/{date}/" |
|
dump_status_url = base_url + "dumpstatus.json" |
|
response = requests.get(dump_status_url) |
|
dump_info = json.loads(response.text) |
|
|
|
for filename, info in dump_info["jobs"]["articlesmultistreamdump"]["files"].items(): |
|
if filename.endswith(".xml.bz2"): |
|
file_url = base_url + filename |
|
break |
|
else: |
|
raise ValueError("Couldn't find the multistream dump file") |
|
|
|
with requests.get(file_url, stream=True) as r: |
|
r.raise_for_status() |
|
with open(local_filename, 'wb') as f: |
|
for chunk in r.iter_content(chunk_size=8192): |
|
f.write(chunk) |
|
|
|
return local_filename |
|
|
|
def parse_and_clean_wikicode(raw_content, parser, lang): |
|
"""Strips formatting and unwanted sections from raw page content.""" |
|
wikicode = parser.parse(raw_content) |
|
|
|
|
|
re_rm_magic = re.compile("__[A-Z]*__", flags=re.UNICODE) |
|
|
|
|
|
media_prefixes = "|".join(["File", "Image", "Media"]) |
|
re_rm_wikilink = re.compile(f"^(?:{media_prefixes}):", flags=re.IGNORECASE | re.UNICODE) |
|
|
|
def rm_wikilink(obj): |
|
return bool(re_rm_wikilink.match(str(obj.title))) |
|
|
|
|
|
def rm_tag(obj): |
|
return str(obj.tag) in {"ref", "table"} |
|
|
|
|
|
cat_prefixes = "|".join(["Category"]) |
|
re_clean_wikilink = re.compile(f"^(?:{cat_prefixes}):", flags=re.IGNORECASE | re.UNICODE) |
|
|
|
def is_category(obj): |
|
return bool(re_clean_wikilink.match(str(obj.title))) |
|
|
|
def clean_wikilink(obj): |
|
text = obj.__strip__() |
|
text = re.sub(re_clean_wikilink, "", text) |
|
obj.text = text |
|
|
|
def try_replace_obj(obj): |
|
try: |
|
clean_wikilink(obj) |
|
except ValueError: |
|
|
|
pass |
|
|
|
def try_remove_obj(obj, section): |
|
try: |
|
section.remove(obj) |
|
except ValueError: |
|
|
|
pass |
|
|
|
sections = [] |
|
|
|
for section in wikicode.get_sections(flat=True, include_lead=True, include_headings=True): |
|
heading = section.filter_headings()[0].title.strip() if section.filter_headings() else "Lead" |
|
for obj in section.ifilter_wikilinks(recursive=True): |
|
if rm_wikilink(obj): |
|
try_remove_obj(obj, section) |
|
elif is_category(obj): |
|
try_replace_obj(obj) |
|
for obj in section.ifilter_tags(matches=rm_tag, recursive=True): |
|
try_remove_obj(obj, section) |
|
|
|
section_text = re.sub(re_rm_magic, "", section.strip_code().strip()) |
|
if section_text.startswith(f"{heading}\n"): |
|
section_text = section_text[len(heading):].strip() |
|
|
|
sections.append((heading, section_text)) |
|
return sections |
|
|
|
def construct_url(title, lang): |
|
return f"https://{lang}.wikipedia.org/wiki/{quote(title)}" |
|
|
|
def process_wiki_dump(filepath, lang): |
|
"""Process the Wikipedia dump and yield cleaned articles.""" |
|
total_pages = 0 |
|
main_namespace_pages = 0 |
|
articles_with_content = 0 |
|
redirects = 0 |
|
total_sections = 0 |
|
try: |
|
with bz2.BZ2File(filepath) as f: |
|
context = etree.iterparse(f, events=("end",)) |
|
for _, elem in context: |
|
if not elem.tag.endswith("page"): |
|
continue |
|
|
|
total_pages += 1 |
|
|
|
namespace = elem.tag[:-4] |
|
title = elem.find(f"./{namespace}title").text |
|
ns = elem.find(f"./{namespace}ns").text |
|
id_ = elem.find(f"./{namespace}id").text |
|
|
|
if ns != "0": |
|
elem.clear() |
|
continue |
|
|
|
main_namespace_pages += 1 |
|
|
|
red_ = elem.find(f"./{namespace}redirect") |
|
if red_ is not None: |
|
redirects += 1 |
|
elem.clear() |
|
continue |
|
|
|
raw_content = elem.find(f"./{namespace}revision/{namespace}text").text |
|
elem.clear() |
|
if raw_content is None or raw_content.strip() == "": |
|
continue |
|
|
|
articles_with_content += 1 |
|
|
|
try: |
|
sections = parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, lang=lang) |
|
except (mwparserfromhell.parser.ParserError) as e: |
|
print(f"💥 mwparserfromhell ParseError: {e}") |
|
return |
|
|
|
|
|
|
|
if not sections or len(sections) == 0: |
|
return |
|
|
|
sections = [(heading, text.strip()) for heading, text in sections if len(text.strip()) > 1 and heading != "References" and heading != text.strip()] |
|
|
|
total_sections += len(sections) |
|
|
|
url = construct_url(title, lang) |
|
|
|
yield { |
|
"article_id": id_, |
|
"url": url, |
|
"title": title, |
|
"sections": sections |
|
} |
|
|
|
elem.clear() |
|
|
|
if articles_with_content % 1000 == 0: |
|
print(f"Processed article {articles_with_content}: {title} {id_}") |
|
finally: |
|
print(f"Total pages processed: {total_pages}") |
|
print(f"Pages in main namespace: {main_namespace_pages}") |
|
print(f"Redirects skipped: {redirects}") |
|
print(f"Articles with content: {articles_with_content}") |
|
|
|
def tokenize(text): |
|
return tokenizer( |
|
text, |
|
add_special_tokens=True, |
|
max_length=512, |
|
padding='max_length', |
|
truncation=True, |
|
return_attention_mask=True, |
|
return_tensors='np' |
|
) |
|
|
|
def tokenize_for_chunking(text): |
|
res = tokenizer( |
|
text, |
|
add_special_tokens=True, |
|
padding='do_not_pad', |
|
truncation=False, |
|
return_tensors='np' |
|
) |
|
return res['input_ids'].flatten() |
|
|
|
def embed(text): |
|
result = tokenize(text) |
|
token_ids = result['input_ids'].astype(np.float32) |
|
mask = result['attention_mask'].astype(np.float32) |
|
|
|
|
|
predictions = model.predict({"input_ids": token_ids, "attention_mask": mask}) |
|
return predictions['embeddings'][0] |
|
|
|
def build_section_header(section_name, article_title): |
|
return f"Page: {article_title}\nSection: {section_name}\n\n" |
|
|
|
def lazypack_chunk4(text, seps=["\n\n", ".\n", "?\n", "\n", ". ", "? ", " "], max_length=512, overlap=64, tokenize_fn=None): |
|
def count_units(chunk): |
|
return len(tokenize_fn(chunk)) if tokenize_fn else len(chunk) |
|
|
|
chunks = [text] |
|
if count_units(text) <= max_length: |
|
return chunks |
|
|
|
for sep in seps: |
|
new_chunks = [] |
|
for chunk in chunks: |
|
split_chunks = [sub_chunk.strip() for sub_chunk in chunk.split(sep)] |
|
packed_chunk = "" |
|
for split_chunk in split_chunks: |
|
if packed_chunk and count_units(packed_chunk + sep + split_chunk) <= (max_length - overlap): |
|
packed_chunk += sep + split_chunk |
|
else: |
|
if packed_chunk: |
|
new_chunks.append(packed_chunk) |
|
packed_chunk = split_chunk |
|
if packed_chunk: |
|
new_chunks.append(packed_chunk) |
|
|
|
chunks = [x + sep.strip() if sep.strip() and x != new_chunks[-1] else x for x in new_chunks] |
|
chunks = new_chunks |
|
if all(count_units(chunk) <= (max_length - overlap) for chunk in chunks): |
|
break |
|
|
|
final_chunks = [] |
|
previous_end = "" |
|
for i in range(len(chunks)): |
|
current_chunk = chunks[i] |
|
if previous_end: |
|
|
|
half_overlap_length = min(overlap, int(count_units(previous_end) / 2), int(count_units(current_chunk) / 2)) |
|
|
|
words = previous_end.split() |
|
overlap_text = "" |
|
overlap_count = 0 |
|
for word in reversed(words): |
|
overlap_count += count_units(word + " ") |
|
if overlap_count > half_overlap_length: |
|
break |
|
overlap_text = word + " " + overlap_text |
|
|
|
|
|
combined_chunk = (overlap_text + current_chunk).strip() |
|
if count_units(combined_chunk) > max_length: |
|
final_chunks.append(current_chunk) |
|
else: |
|
final_chunks.append(combined_chunk) |
|
else: |
|
final_chunks.append(current_chunk) |
|
previous_end = current_chunk |
|
|
|
return final_chunks |
|
|
|
def initialize_worker(): |
|
global model, tokenizer |
|
model = ct.models.CompiledMLModel('./msmarco_distilbert_base_tas_b_512_single_quantized.mlmodelc') |
|
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/msmarco-distilbert-base-tas-b") |
|
|
|
def batch_embed(batch, db_name): |
|
conn = sqlite3.connect(db_name) |
|
placeholders = ', '.join(['?'] * len(batch)) |
|
rows = conn.execute(f"SELECT * FROM article_sections WHERE id IN ({placeholders})", batch).fetchall() |
|
conn.close() |
|
|
|
print(f"🚜 starting embedding batch from {rows[0][0]} to {rows[-1][0]}...") |
|
|
|
keys = [] |
|
vectors = [] |
|
for row in rows: |
|
(section_id, article_id, url, title, seq_id, section, text) = row |
|
chunk_header = build_section_header(section, title) |
|
chunk_header_offset = len(tokenize_for_chunking(chunk_header)) |
|
max_chunk_length = 512 - chunk_header_offset |
|
chunks = lazypack_chunk4(text, max_length=max_chunk_length, tokenize_fn=tokenize_for_chunking) |
|
for chunk in chunks: |
|
chunk_embedding = embed(chunk_header + chunk) |
|
keys.append(section_id) |
|
vectors.append(chunk_embedding) |
|
print(f"🚜 finished embedding batch from {rows[0][0]} to {rows[-1][0]}.") |
|
return keys, np.array(vectors) |
|
|
|
def index_batches_parallel(db_name, index_name, precision, batch_size=2000, num_workers=3): |
|
conn = sqlite3.connect(db_name) |
|
total_sections = conn.execute("SELECT COUNT(1) FROM article_sections").fetchone()[0] |
|
conn.close() |
|
|
|
print(f"🔮 Beginning embedding {total_sections} sections...") |
|
|
|
batches = [(list(range(i, min(i + batch_size, total_sections))), db_name) for i in range(0, total_sections, batch_size)] |
|
|
|
with Pool(num_workers, initializer=initialize_worker) as pool: |
|
results = pool.starmap(batch_embed, batches) |
|
|
|
batch_count = len(results) |
|
print(f"🔮 Beginning indexing {total_sections} in {batch_count} batches...") |
|
allowed_precision = ['f16', 'f32'] |
|
index_precision = precision if precision in allowed_precision else 'f32' |
|
index = Index(ndim=768, connectivity=16, metric="ip", multi=True, dtype=index_precision) |
|
for keys, vectors in results: |
|
index.add(keys=keys, vectors=vectors) |
|
|
|
index.save(index_name) |
|
index_size = len(index) |
|
print(f"🔮 Indexing complete with {index_size} records; saved to disk at `{index_name}`.") |
|
|
|
|
|
def save_wiki_dump(lang, date): |
|
db_name = f"wikipedia_{lang}_{date}.db" |
|
db_exists = os.path.exists(db_name) |
|
if db_exists: |
|
print(f"📦 Found existing database `{db_name}`. Skipping download & clean.") |
|
else: |
|
print(f"☁️ Loading Wikipedia articles in {lang} from {date}...") |
|
dump_filepath = download_wiki_dump(lang, date) |
|
print(f"🔍 Downloaded dump file. Cleaning... {dump_filepath}") |
|
articles = process_wiki_dump(dump_filepath, lang) |
|
|
|
n_articles = articles.__sizeof__() |
|
if n_articles == 0: |
|
print("💩 No articles found!") |
|
print(f" - the data> {articles}") |
|
exit(1) |
|
|
|
print(f"📦 Found {n_articles} articles! Importing into sqlite `{db_name}`, embedding and indexing...") |
|
conn = sqlite3.connect(db_name) |
|
cursor = conn.cursor() |
|
|
|
cursor.execute(""" |
|
CREATE TABLE IF NOT EXISTS article_sections ( |
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
|
article_id INTEGER, |
|
url TEXT, |
|
title TEXT, |
|
sequence_id INTEGER, |
|
section_name TEXT, |
|
text TEXT |
|
); |
|
""") |
|
|
|
batch_size = 1000 |
|
batch = [] |
|
total_articles = 0 |
|
total_sections = 0 |
|
|
|
for article in articles: |
|
article_id = article["article_id"] |
|
url = article["url"] |
|
title = article["title"] |
|
for seq_id, (section, text) in enumerate(article["sections"]): |
|
batch.append((article_id, url, title, seq_id, section, text)) |
|
total_sections += 1 |
|
total_articles += 1 |
|
|
|
if len(batch) >= batch_size: |
|
print(f"⚙️ Processing articles up to {total_articles}...") |
|
cursor.executemany(""" |
|
INSERT OR REPLACE INTO article_sections (article_id, url, title, sequence_id, section_name, text) |
|
VALUES (?, ?, ?, ?, ?, ?); |
|
""", batch) |
|
conn.commit() |
|
|
|
print(f"⚙️ Processed {total_articles} articles with {total_sections} sections...") |
|
batch = [] |
|
|
|
if batch: |
|
print(f"⚙️ Processing articles up to {total_articles}...") |
|
cursor.executemany(""" |
|
INSERT OR REPLACE INTO article_sections (article_id, url, title, sequence_id, section_name, text) |
|
VALUES (?, ?, ?, ?, ?, ?); |
|
""", batch) |
|
conn.commit() |
|
|
|
conn.execute("VACUUM;") |
|
count = conn.execute("SELECT COUNT(1) FROM article_sections;").fetchone()[0] |
|
conn.close() |
|
|
|
print(f"🌎 Initial processing complete! Total sections in database: {count}") |
|
|
|
|
|
index_precision = "f16" |
|
index_path = f"./{lang}wiki-{date}.{index_precision}.index" |
|
index_batches_parallel(db_name, index_path, index_precision) |
|
|
|
index = Index.restore(index_path, view=True) |
|
conn = sqlite3.connect(db_name) |
|
cursor = conn.cursor() |
|
|
|
query = "What is the capital of AUS?" |
|
print(f"🔎 testing search... '{query}'") |
|
qembed = embed(query) |
|
res: Matches = index.search(qembed, 5) |
|
print(f" - Results:") |
|
for result in res: |
|
(title, section, text) = cursor.execute("SELECT title, section_name, text FROM article_sections WHERE id = ?;", (f"{result.key}",)).fetchone() |
|
snippet = text[:280].replace("\n", " ") |
|
print(f" - Key: {result.key} | Distance: {result.distance} | Excerpt from '{title}', '{section}': {snippet}") |
|
|
|
conn.close() |
|
|
|
if __name__ == "__main__": |
|
model = ct.models.CompiledMLModel('./msmarco_distilbert_base_tas_b_512_single_quantized.mlmodelc') |
|
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/msmarco-distilbert-base-tas-b") |
|
lang = "simple" |
|
date = "20240720" |
|
save_wiki_dump(lang, date) |
|
|