File size: 3,068 Bytes
1e12018 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import collections
import io
import zstandard
import json
from dataclasses import dataclass
import datasets
logger = datasets.logging.get_logger(__name__)
@dataclass
class Identification:
label: str
prob: float
_DESCRIPTION = """
"""
_URL = ""
_LICENSE = """
"""
_CITATION = """
"""
_BASE_DATA_PAT_FORMAT_STR = ""
_BASE_CHECKSUM_FILE_NAME = "checksum.sha256"
class Oscar2301JAConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
"""BuilderConfig for OSCAR.
Args:
language (str): It has to contain 2-letter or 3-letter coded strings. For example: "se", "hu", "eml"
**kwargs: Keyword arguments forwarded to super.
"""
description = (
f"filter OSCAR 2023 dataset ja"
)
super(Oscar2301JAConfig, self).__init__(
name="oscar_2023_ja_filtered", description=description, **kwargs
)
class Oscar2301(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Oscar2301JAConfig(
version=datasets.Version("2023.1.0"),
)
]
BUILDER_CONFIG_CLASS = Oscar2301JAConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int64"),
"text": datasets.Value("string")
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
checksum_path = self.config.base_data_path + _BASE_CHECKSUM_FILE_NAME
checksum_file = dl_manager.download(checksum_path)
with open(checksum_file, encoding="utf-8") as f:
data_filenames = [line.split()[1] for line in f if line]
data_urls = [
self.config.base_data_path + data_filename
for data_filename in data_filenames
]
doc_files = dl_manager.download(
[url for url in data_urls if url.endswith(".jsonl.zst")]
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files}
),
]
def _generate_examples(self, doc_files):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for doc_path in doc_files:
logger.info("generating examples from = %s", doc_path)
with open(doc_path, "rb") as fh:
dctx = zstandard.ZstdDecompressor()
stream_reader = dctx.stream_reader(fh)
buffered_reader = io.BufferedReader(stream_reader)
text_stream = io.TextIOWrapper(buffered_reader, encoding="utf-8")
for line in text_stream:
doc = json.loads(line)
yield id_, {"id": id_, "text": doc["text"]}
id_ += 1 |