|
"""Script to generate splits for benchmarking text embedding clustering. |
|
Data and preprocessing based on 10kGNAD dataset (https://github.com/tblock/10kGNAD).""" |
|
|
|
import random |
|
import re |
|
import sqlite3 |
|
import sys |
|
|
|
import jsonlines |
|
import numpy as np |
|
import pandas as pd |
|
from bs4 import BeautifulSoup |
|
from sklearn.model_selection import train_test_split |
|
from tqdm import tqdm |
|
|
|
random.seed(42) |
|
|
|
|
|
DATA_PATH = sys.argv[1] |
|
|
|
INCLUDE_BODY = ( |
|
False |
|
) |
|
ARTICLE_QUERY = f"SELECT Path, Title{', Body' if INCLUDE_BODY else ''} FROM Articles WHERE PATH LIKE 'Newsroom/%' AND PATH NOT LIKE 'Newsroom/User%' ORDER BY Path" |
|
|
|
NUM_SPLITS = 10 |
|
SPLIT_RANGE = np.array([0.1, 1.0]) |
|
|
|
|
|
def get_split(frame, split_range=SPLIT_RANGE): |
|
samples = random.randint(*(split_range * len(frame)).astype(int)) |
|
return frame.sample(samples).to_dict("list") |
|
|
|
|
|
def write_sets(name, sets): |
|
with jsonlines.open(name, "w") as f_out: |
|
f_out.write_all(sets) |
|
|
|
|
|
conn = sqlite3.connect(DATA_PATH) |
|
cursor = conn.cursor() |
|
|
|
samples = [] |
|
for row in tqdm(cursor.execute(ARTICLE_QUERY).fetchall(), unit_scale=True): |
|
path, title = row[0], row[1] |
|
|
|
text = title |
|
|
|
if INCLUDE_BODY: |
|
body = row[-1] |
|
soup = BeautifulSoup(body, "html.parser") |
|
|
|
|
|
description_obj = soup.find("h2", {"itemprop": "description"}) |
|
if description_obj is not None: |
|
text += ( |
|
" " + description_obj.text.replace("\n", " ").replace("\t", " ").strip() |
|
) |
|
|
|
|
|
text_container = soup.find("div", {"class": "copytext"}) |
|
if text_container is not None: |
|
for p in text_container.findAll("p"): |
|
text += " " + ( |
|
p.text.replace("\n", " ") |
|
.replace("\t", " ") |
|
.replace('"', "") |
|
.replace("'", "") |
|
+ " " |
|
) |
|
text = text.strip() |
|
|
|
|
|
for author in re.findall( |
|
r"\.\ \(.+,.+2[0-9]+\)", text[-50:] |
|
): |
|
text = text.replace(author, ".") |
|
|
|
|
|
label = path.split("/")[1] |
|
samples.append([text, label]) |
|
|
|
conn.close() |
|
|
|
samples = pd.DataFrame(samples, columns=["sentences", "labels"]) |
|
|
|
sets = [] |
|
for _ in range(NUM_SPLITS): |
|
sets.append(get_split(samples)) |
|
|
|
write_sets("test.jsonl", sets) |
|
|