|
import collections |
|
import io |
|
import zstandard |
|
import json |
|
|
|
from dataclasses import dataclass |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
@dataclass |
|
class Identification: |
|
label: str |
|
prob: float |
|
|
|
|
|
_DESCRIPTION = """ |
|
""" |
|
|
|
_URL = "" |
|
|
|
_LICENSE = """ |
|
""" |
|
|
|
_CITATION = """ |
|
""" |
|
|
|
_BASE_DATA_PAT_FORMAT_STR = "" |
|
_BASE_CHECKSUM_FILE_NAME = "checksum.sha256" |
|
|
|
class Oscar2301JAConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for OSCAR. |
|
Args: |
|
language (str): It has to contain 2-letter or 3-letter coded strings. For example: "se", "hu", "eml" |
|
**kwargs: Keyword arguments forwarded to super. |
|
""" |
|
|
|
description = ( |
|
f"filter OSCAR 2023 dataset ja" |
|
) |
|
super(Oscar2301JAConfig, self).__init__( |
|
name="oscar_2023_ja_filtered", description=description, **kwargs |
|
) |
|
|
|
|
|
class Oscar2301(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
Oscar2301JAConfig( |
|
version=datasets.Version("2023.1.0"), |
|
) |
|
] |
|
BUILDER_CONFIG_CLASS = Oscar2301JAConfig |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int64"), |
|
"text": datasets.Value("string") |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_URL, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
checksum_path = self.config.base_data_path + _BASE_CHECKSUM_FILE_NAME |
|
checksum_file = dl_manager.download(checksum_path) |
|
with open(checksum_file, encoding="utf-8") as f: |
|
data_filenames = [line.split()[1] for line in f if line] |
|
data_urls = [ |
|
self.config.base_data_path + data_filename |
|
for data_filename in data_filenames |
|
] |
|
doc_files = dl_manager.download( |
|
[url for url in data_urls if url.endswith(".jsonl.zst")] |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files} |
|
), |
|
] |
|
|
|
def _generate_examples(self, doc_files): |
|
"""This function returns the examples in the raw (text) form by iterating on all the files.""" |
|
id_ = 0 |
|
for doc_path in doc_files: |
|
logger.info("generating examples from = %s", doc_path) |
|
|
|
with open(doc_path, "rb") as fh: |
|
dctx = zstandard.ZstdDecompressor() |
|
stream_reader = dctx.stream_reader(fh) |
|
buffered_reader = io.BufferedReader(stream_reader) |
|
text_stream = io.TextIOWrapper(buffered_reader, encoding="utf-8") |
|
for line in text_stream: |
|
doc = json.loads(line) |
|
yield id_, {"id": id_, "text": doc["text"]} |
|
id_ += 1 |