|
import os |
|
from collections import defaultdict |
|
from typing import List |
|
|
|
import datasets |
|
from datasets import Sequence, Value, load_dataset |
|
|
|
from .process import process_text, get_structured_data |
|
from typing import List |
|
from math import ceil |
|
from .configs import SUB_DATASETS |
|
|
|
def processing(data, name): |
|
if name == "processed": |
|
data['text'] = [process_text(text) for text in data['text']] |
|
elif name == "structured": |
|
data['text'] = [process_text(text) for text in data['text']] |
|
data['structured_text'] = [ |
|
get_structured_data(text, default_value={"item": [], "content": []}) for text in data['text'] |
|
] |
|
return data |
|
|
|
|
|
def sliding(texts: List[str], window_size: int=5, stride:int=3) -> List[str]: |
|
n_iter = ceil((len(texts)-window_size)/stride)+1 |
|
return [texts[i*stride:i*stride+window_size] for i in range(n_iter)] |
|
|
|
class NamuWiki(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = SUB_DATASETS |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="", |
|
features=self.config.features, |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + "", |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
if self.config.name == "processed": |
|
data_file = dl_manager.download(self.config.data_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": data_file, |
|
"split": "train" |
|
} |
|
), |
|
] |
|
|
|
elif self.config.name.startswith(("char", "word")): |
|
_, length = self.config.name.split("-") |
|
length = int(length) |
|
data_file = dl_manager.download(self.config.data_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": data_file, |
|
"split": "train", |
|
"length": length |
|
} |
|
), |
|
] |
|
|
|
elif self.config.name == "raw": |
|
data_file = dl_manager.download_and_extract(self.config.data_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": os.path.join(data_file, "namuwiki_20210301.json"), |
|
"split": "train" |
|
} |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_file, split, length=None): |
|
os.system("pip install ijson") |
|
import ijson |
|
"""Generate NamuWiki examples.""" |
|
_TARGET = {"title", "text", "contributors.item"} |
|
n, output = 0, defaultdict(list) |
|
with open(data_file) as f: |
|
for key, dtype, value in ijson.parse(f): |
|
key = key.replace("item.", "") |
|
if key == "namespace" and len(output): |
|
output = {k: (v[0] if k != "contributors" else v) for k, v in output.items()} |
|
yield n, processing(output, self.config.name) |
|
output = defaultdict(list) |
|
n += 1 |
|
elif key in _TARGET: |
|
output[key.replace(".item", "")].append(value) |
|
|