File size: 1,559 Bytes
c07d523 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import json
from typing import List
import datasets
_DESCRIPTION = "Russian dataset for detection and substraction of anglicisms."
_URLS = {
"train": "data/train.jsonl",
"test": "data/test.jsonl"
}
_LICENSE = "apache-2.0"
class GazetaDataset(datasets.GeneratorBasedBuilder):
"""Gazeta Dataset"""
VERSION = datasets.Version("0.2.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="default", version=VERSION, description=""),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
features = datasets.Features(
{
"word": datasets.Value("string"),
"form": datasets.Value("string"),
"sentence": datasets.Value("string"),
"paraphrase": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("sentence", "paraphrase"),
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, data |