File size: 4,100 Bytes
0cc9f42 6183ed6 0cc9f42 4965131 0cc9f42 4965131 6183ed6 0cc9f42 4965131 0cc9f42 4965131 0cc9f42 4965131 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Thai Literature Corpora (TLC): Corpora of machine-ingestible Thai classical literature texts."""
import json
import datasets
_CITATION = """\
@misc{
author={Sawatphol, Jitkapat},
title={Thai Literature Corpora},
year={2019},
howpublished={\\url{https://attapol.github.io/tlc.html}}
}
"""
_HOMEPAGE = "https://attapol.github.io/tlc.html"
_DESCRIPTION = """\
Thai Literature Corpora (TLC): Corpora of machine-ingestible Thai classical literature texts.
Release: 6/25/19
It consists of two datasets:
## TLC set
It is texts from [Vajirayana Digital Library](https://vajirayana.org/), stored by chapters and stanzas (non-tokenized).
tlc v.2.0 (6/17/19 : a total of 34 documents, 292,270 lines, 31,790,734 characters)
tlc v.1.0 (6/11/19 : a total of 25 documents, 113,981 lines, 28,775,761 characters)
## TNHC set
It is texts from Thai National Historical Corpus, stored by lines (manually tokenized).
tnhc v.1.0 (6/25/19 : a total of 47 documents, 756,478 lines, 13,361,142 characters)
"""
_URLs = {
"tlcv1.0": "https://github.com/jitkapat/thailitcorpus/releases/download/v.1.0/tlc_v.1.0.tar.gz",
"tlcv2.0": "https://github.com/jitkapat/thailitcorpus/releases/download/v.2.0/tlc_v.2.0.tar.gz",
"tnhcv1.0": "https://github.com/jitkapat/thailitcorpus/releases/download/v.1.0/tnhc_v.1.0.tar.gz",
}
_FILENAMES = {
"tlcv1.0": "นิราศอิเหนา.json",
"tlcv2.0": "นิราศอิเหนา.json",
"tnhcv1.0": "กาพย์เห่เรือ.json",
}
class TlcConfig(datasets.BuilderConfig):
"""BuilderConfig for Tlc."""
def __init__(self, **kwargs):
"""BuilderConfig for Tlc.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TlcConfig, self).__init__(**kwargs)
class Tlc(datasets.GeneratorBasedBuilder):
"""Thai Literature Corpora (TLC): Corpora of machine-ingestible Thai classical literature texts."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="tlcv1.0", version=datasets.Version("1.0.0"), description="Thai Literature Corpora"
),
datasets.BuilderConfig(
name="tlcv2.0", version=datasets.Version("2.0.0"), description="Thai Literature Corpora"
),
datasets.BuilderConfig(
name="tnhcv1.0",
version=datasets.Version("1.0.0"),
description="Thai Literature Corpora: Thai National Historical Corpus",
),
]
DEFAULT_CONFIG_NAME = "tlcv2.0"
def _info(self):
if self.config.name.startswith("tlc"):
features = datasets.Features(
{
"ch_num": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
}
)
else:
features = datasets.Features(
{
"text": datasets.Sequence((datasets.Value("string"))),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_URLs[self.config.name])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"files": dl_manager.iter_archive(archive), "filepath": _FILENAMES[self.config.name]},
)
]
def _generate_examples(self, files, filepath):
_id = 0
for path, f in files:
if path == filepath:
data = json.loads(f.read().decode("utf-8"))
for d in data:
if self.config.name.startswith("tlc"):
yield _id, d
else:
yield _id, {"text": d}
_id += 1
break
|