Datasets:

Modalities:
Text
Libraries:
Datasets
License:
eddie14 commited on
Commit
94a1be9
1 Parent(s): cf6b1bc
Files changed (3) hide show
  1. configs.py +89 -0
  2. namu.py +94 -0
  3. process.py +76 -0
configs.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from datasets import Value, Sequence
3
+
4
+ PROCESS_FILES = [
5
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000001.parquet",
6
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000002.parquet",
7
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000003.parquet",
8
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000004.parquet",
9
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000005.parquet",
10
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000006.parquet",
11
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000007.parquet",
12
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000008.parquet",
13
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000009.parquet",
14
+ "https://huggingface.co/datasets/korean-corpus/namu/resolve/main/processed/namuwiki210301-processed-000010.parquet",
15
+ ]
16
+
17
+ class NamuWikiConfig(datasets.BuilderConfig):
18
+ """BuilderConfig for NamuWiki"""
19
+
20
+ def __init__(self, features, data_url, citation, url, **kwargs):
21
+ """BuilderConfig for NamuWiki.
22
+ Args:
23
+
24
+ **kwargs: keyword arguments forwarded to super.
25
+ """
26
+ super().__init__(version=datasets.Version("1.0.2"), **kwargs)
27
+ self.features = features
28
+ self.data_url = data_url
29
+ self.citation = citation
30
+ self.url = url
31
+
32
+
33
+
34
+ SUB_DATASETS = [
35
+ NamuWikiConfig(
36
+ name="raw",
37
+ features=datasets.Features(
38
+ {
39
+ "title": Value(dtype='string', id=None),
40
+ "text": Value(dtype='string', id=None),
41
+ "contributors": Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)
42
+ }
43
+ ),
44
+ data_url="https://www.dropbox.com/s/03b49jdrx7xi712/namuwiki210301.7z?dl=1",
45
+ citation="",
46
+ url="",
47
+ ),
48
+ NamuWikiConfig(
49
+ name="processed",
50
+ features=datasets.Features(
51
+ {
52
+ "id": Value(dtype='string', id=None),
53
+ "title": Value(dtype='string', id=None),
54
+ "text": Value(dtype='string', id=None),
55
+ }
56
+ ),
57
+ data_url=PROCESS_FILES,
58
+ citation="",
59
+ url="",
60
+ ),
61
+ ]+[
62
+ NamuWikiConfig(
63
+ name=f"char-{length}",
64
+ features=datasets.Features(
65
+ {
66
+ "id": Value(dtype='string', id=None),
67
+ "title": Value(dtype='string', id=None),
68
+ "text": Value(dtype='string', id=None),
69
+ }
70
+ ),
71
+ data_url=PROCESS_FILES,
72
+ citation="",
73
+ url="",
74
+ ) for length in [32, 64, 128, 256, 512]
75
+ ]+[
76
+ NamuWikiConfig(
77
+ name=f"word-{length}",
78
+ features=datasets.Features({
79
+ "id": Value(dtype='string', id=None),
80
+ "title": Value(dtype='string', id=None),
81
+ "text": Value(dtype='string', id=None),
82
+
83
+ }),
84
+ data_url=PROCESS_FILES,
85
+ citation="",
86
+ url="",
87
+ ) for length in [32, 64, 128, 256, 512]
88
+ ]
89
+
namu.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import defaultdict
3
+ from typing import List
4
+
5
+ import datasets
6
+ from datasets import Sequence, Value, load_dataset
7
+
8
+ from .process import process_text, get_structured_data
9
+ from typing import List
10
+ from math import ceil
11
+ from .configs import SUB_DATASETS
12
+
13
+ def processing(data, name):
14
+ if name == "processed":
15
+ data['text'] = [process_text(text) for text in data['text']]
16
+ elif name == "structured":
17
+ data['text'] = [process_text(text) for text in data['text']]
18
+ data['structured_text'] = [
19
+ get_structured_data(text, default_value={"item": [], "content": []}) for text in data['text']
20
+ ]
21
+ return data
22
+
23
+
24
+ def sliding(texts: List[str], window_size: int=5, stride:int=3) -> List[str]:
25
+ n_iter = ceil((len(texts)-window_size)/stride)+1
26
+ return [texts[i*stride:i*stride+window_size] for i in range(n_iter)]
27
+
28
+ class NamuWiki(datasets.GeneratorBasedBuilder):
29
+ BUILDER_CONFIGS = SUB_DATASETS
30
+
31
+ def _info(self):
32
+ return datasets.DatasetInfo(
33
+ description="",
34
+ features=self.config.features,
35
+ homepage=self.config.url,
36
+ citation=self.config.citation + "\n" + "",
37
+ )
38
+
39
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
40
+ if self.config.name == "processed":
41
+ data_file = dl_manager.download(self.config.data_url)
42
+ return [
43
+ datasets.SplitGenerator(
44
+ name=datasets.Split.TRAIN,
45
+ gen_kwargs={
46
+ "data_file": data_file,
47
+ "split": "train"
48
+ }
49
+ ),
50
+ ]
51
+
52
+ elif self.config.name.startswith(("char", "word")):
53
+ _, length = self.config.name.split("-")
54
+ length = int(length)
55
+ data_file = dl_manager.download(self.config.data_url)
56
+ return [
57
+ datasets.SplitGenerator(
58
+ name=datasets.Split.TRAIN,
59
+ gen_kwargs={
60
+ "data_file": data_file,
61
+ "split": "train",
62
+ "length": length
63
+ }
64
+ ),
65
+ ]
66
+
67
+ elif self.config.name == "raw":
68
+ data_file = dl_manager.download_and_extract(self.config.data_url)
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={
73
+ "data_file": os.path.join(data_file, "namuwiki_20210301.json"),
74
+ "split": "train"
75
+ }
76
+ ),
77
+ ]
78
+
79
+ def _generate_examples(self, data_file, split, length=None):
80
+ os.system("pip install ijson")
81
+ import ijson
82
+ """Generate NamuWiki examples."""
83
+ _TARGET = {"title", "text", "contributors.item"}
84
+ n, output = 0, defaultdict(list)
85
+ with open(data_file) as f:
86
+ for key, dtype, value in ijson.parse(f):
87
+ key = key.replace("item.", "")
88
+ if key == "namespace" and len(output):
89
+ output = {k: (v[0] if k != "contributors" else v) for k, v in output.items()}
90
+ yield n, processing(output, self.config.name)
91
+ output = defaultdict(list)
92
+ n += 1
93
+ elif key in _TARGET:
94
+ output[key.replace(".item", "")].append(value)
process.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from collections import defaultdict
3
+
4
+
5
+ def remove_double_linked_text(text):
6
+ PATTERN = "\[\[[^\[\]\|]+\|([^\]]+)\]\]"
7
+ result = re.search(PATTERN,text)
8
+ while result is not None:
9
+ s,e = result.span()
10
+ text = text[:s]+result.group(1)+text[e:]
11
+ result = re.search(PATTERN, text)
12
+ return text
13
+
14
+
15
+ def remove_linked_text(text):
16
+ PATTERN = "\[\[([^\[\]]+)\]\]"
17
+ result = re.search(PATTERN,text)
18
+ while result is not None:
19
+ s,e = result.span()
20
+ text = text[:s]+result.group(1)+text[e:]
21
+ result = re.search(PATTERN, text)
22
+ return text
23
+
24
+
25
+ def remove_attribute_in_table(text):
26
+ PATTERN = "{{{[^}]+ ([^\}]+)}}}"
27
+ result = re.search(PATTERN,text)
28
+ while result is not None:
29
+ s,e = result.span()
30
+ text = text[:s]+result.group(1)+text[e:]
31
+ result = re.search(PATTERN, text)
32
+
33
+ text = re.sub("<bgcolor=#[^>]+>", "", text)
34
+ text = re.sub("<-[0-9]>", "", text)
35
+ text = re.sub("\|\|<table[^\n]+\n", "", text)
36
+ text = re.sub("<tablewidth\=[^>]+>", "", text)
37
+ text = re.sub("<width\=[^>]+>", "", text)
38
+ text = re.sub("(?<=코멘트\-)\|\|(?=\n)", "", text)
39
+
40
+ return text
41
+
42
+
43
+ def replace_link(text):
44
+ text = re.sub("\[youtube\([^\]]+\)\]", "[YOUTUBE LINK]", text)
45
+ return text
46
+
47
+
48
+ def process_text(text: str):
49
+ text = text.strip()
50
+ text = re.sub("\[\[파일:[^\]]+\]\]", "", text)
51
+ text = remove_double_linked_text(text)
52
+ text = remove_linked_text(text)
53
+ text = re.sub("'''", "", text)
54
+ text = replace_link(text)
55
+ text = remove_attribute_in_table(text)
56
+ return text
57
+
58
+
59
+ def get_structured_data(text: str, pattern="\n\=\= ([^\=\n]+) \=\=\n", default_value=None) -> dict:
60
+ outputs = defaultdict(list)
61
+ matched = re.search(pattern, text)
62
+ is_first = True
63
+ while matched is not None:
64
+ b,s = matched.span()
65
+ if is_first:
66
+ outputs['item'].append("meta")
67
+ outputs["content"].append(text[:b])
68
+ is_first = False
69
+ outputs["item"].append(matched.group(1))
70
+ text = text[s:]
71
+ matched = re.search(pattern, text)
72
+ e = matched.start() if matched is not None else None
73
+ outputs["content"].append(text[:e].strip())
74
+ if not outputs and default_value is not None:
75
+ return default_value
76
+ return dict(outputs)