hebrew_projectbenyehuda / hebrew_projectbenyehuda.py
imvladikon's picture
Update hebrew_projectbenyehuda.py
7d159a4
raw
history blame
No virus
4.74 kB
"""Public domain texts from Project Ben-Yehuda- a set of books extracted from the Project BenYehuda library"""
import csv
csv.field_size_limit(1000000000)
from pathlib import Path
import datasets
import logging
_CITATION = """\
@article{,
author = {},
title = {Public domain texts from Project Ben-Yehuda},
journal = {},
url = {https://github.com/projectbenyehuda/public_domain_dump},
year = {2020},
}
"""
_DESCRIPTION = """\
This repository contains a dump of thousands of public domain works in Hebrew, from Project Ben-Yehuda, in plaintext UTF-8 files, with and without diacritics (nikkud). The metadata (pseudocatalogue.csv) file is a list of titles, authors, genres, and file paths, to help you process the dump.
All these works are in the public domain, so you are free to make any use of them, and do not need to ask for permission.
There are 10078 files, 3181136 lines
"""
logger = logging.getLogger(__name__)
URLS = dict(
html="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/html.zip",
catalogue="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/pseudocatalogue.csv",
txt="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/txt.zip",
txt_stripped="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/txt_stripped.zip",
)
class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.2.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"path": datasets.Value("string"),
"title": datasets.Value("string"),
"authors": datasets.Value("string"),
"translators": datasets.Value("string"),
"original_language": datasets.Value("string"),
"genre": datasets.Value("string"),
"source_edition": datasets.Value("string"),
"text": datasets.Value("string"),
"txt_stripped": datasets.Value("string"),
"html": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/projectbenyehuda/public_domain_dump",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
paths = {}
for key, url in URLS.items():
logger.info("Downloading %s", url)
paths[key] = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": paths,
},
)
]
def _generate_examples(self, filepaths):
catalogue_path = filepaths["catalogue"]
html_path = Path(filepaths["html"]) / "html"
txt_path = Path(filepaths["txt"]) / "txt"
txt_stripped_path = Path(filepaths["txt_stripped"]) / "txt_stripped"
with open(catalogue_path, encoding="utf-8") as f:
metadata_dict = csv.DictReader(
f,
fieldnames=[
"_id",
"path",
"title",
"authors",
"translators",
"original_language",
"genre",
"source_edition",
],
)
for data in metadata_dict:
if data["path"] == "path":
continue
yield data["_id"], {
"id": data["_id"],
"title": data["title"],
"path": data["path"],
"authors": data["authors"],
"translators": data["translators"],
"original_language": data["original_language"],
"genre": data["genre"],
"source_edition": data["source_edition"],
"text": self.read_file(txt_path / f"{data['path'].strip('/')}.txt"),
"txt_stripped": self.read_file(
txt_stripped_path / f"{data['path'].strip('/')}.txt"
),
"html": self.read_file(html_path / f"{data['path'].strip('/')}.html"),
}
def read_file(self, filepath):
filepath = Path(filepath)
if filepath.exists():
with open(filepath, encoding="utf-8") as f:
return f.read()
else:
return None