File size: 4,736 Bytes
7d938bd
 
 
7d159a4
7d938bd
7d159a4
7d938bd
7d159a4
 
7d938bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d159a4
7d938bd
7d159a4
 
 
 
 
 
7d938bd
 
 
7d159a4
7d938bd
 
 
 
 
 
 
7d159a4
7d938bd
 
 
 
 
 
 
7d159a4
 
7d938bd
 
 
 
 
 
 
 
 
7d159a4
 
 
 
7d938bd
 
 
 
 
7d159a4
7d938bd
 
 
 
7d159a4
 
 
 
 
7d938bd
7d159a4
7d938bd
 
 
 
 
 
 
 
 
 
 
 
 
7d159a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d938bd
7d159a4
 
 
7d938bd
7d159a4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
"""Public domain texts from Project Ben-Yehuda- a set of books extracted from the Project BenYehuda library"""

import csv
csv.field_size_limit(1000000000)

from pathlib import Path

import datasets
import logging

_CITATION = """\
@article{,
  author = {},
  title = {Public domain texts from Project Ben-Yehuda},
  journal = {},
  url = {https://github.com/projectbenyehuda/public_domain_dump},
  year = {2020},
}
"""

_DESCRIPTION = """\
This repository contains a dump of thousands of public domain works in Hebrew, from Project Ben-Yehuda, in plaintext UTF-8 files, with and without diacritics (nikkud). The metadata (pseudocatalogue.csv) file is a list of titles, authors, genres, and file paths, to help you process the dump.
All these works are in the public domain, so you are free to make any use of them, and do not need to ask for permission.
There are 10078 files, 3181136 lines
"""

logger = logging.getLogger(__name__)

URLS = dict(
    html="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/html.zip",
    catalogue="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/pseudocatalogue.csv",
    txt="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/txt.zip",
    txt_stripped="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/txt_stripped.zip",
)


class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("0.2.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("int32"),
                    "path": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "authors": datasets.Value("string"),
                    "translators": datasets.Value("string"),
                    "original_language": datasets.Value("string"),
                    "genre": datasets.Value("string"),
                    "source_edition": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "txt_stripped": datasets.Value("string"),
                    "html": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/projectbenyehuda/public_domain_dump",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):

        paths = {}
        for key, url in URLS.items():
            logger.info("Downloading %s", url)
            paths[key] = dl_manager.download_and_extract(url)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepaths": paths,
                },
            )
        ]

    def _generate_examples(self, filepaths):
        catalogue_path = filepaths["catalogue"]
        html_path = Path(filepaths["html"]) / "html"
        txt_path = Path(filepaths["txt"]) / "txt"
        txt_stripped_path = Path(filepaths["txt_stripped"]) / "txt_stripped"

        with open(catalogue_path, encoding="utf-8") as f:
            metadata_dict = csv.DictReader(
                f,
                fieldnames=[
                    "_id",
                    "path",
                    "title",
                    "authors",
                    "translators",
                    "original_language",
                    "genre",
                    "source_edition",
                ],
            )
            for data in metadata_dict:
                if data["path"] == "path":
                    continue

                yield data["_id"], {
                    "id": data["_id"],
                    "title": data["title"],
                    "path": data["path"],
                    "authors": data["authors"],
                    "translators": data["translators"],
                    "original_language": data["original_language"],
                    "genre": data["genre"],
                    "source_edition": data["source_edition"],
                    "text": self.read_file(txt_path / f"{data['path'].strip('/')}.txt"),
                    "txt_stripped": self.read_file(
                        txt_stripped_path / f"{data['path'].strip('/')}.txt"
                    ),
                    "html": self.read_file(html_path / f"{data['path'].strip('/')}.html"),
                }

    def read_file(self, filepath):
        filepath = Path(filepath)
        if filepath.exists():
            with open(filepath, encoding="utf-8") as f:
                return f.read()
        else:
            return None