|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Ottoman Literary Dataset from late 19th century up to early 20th century.""" |
|
|
|
|
|
import json |
|
import warnings |
|
from typing import List |
|
|
|
import datasets |
|
from transformers import PreTrainedTokenizerBase |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_DESCRIPTION = """\ |
|
First level categorization of Ottoman articles. |
|
""" |
|
|
|
_URLS = { |
|
"train": "train.json", |
|
"val": "val.json", |
|
"test": "test.json", |
|
} |
|
|
|
_CLASS_NAMES = ["literary_text", "cultural_discourse", "other"] |
|
|
|
|
|
class NonwestlitFirstLevelConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Dataset.""" |
|
|
|
def __init__( |
|
self, tokenizer: PreTrainedTokenizerBase = None, max_sequence_length: int = None, **kwargs |
|
): |
|
"""BuilderConfig for Dataset. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(NonwestlitFirstLevelConfig, self).__init__(**kwargs) |
|
self.tokenizer = tokenizer |
|
self.max_sequence_length = max_sequence_length |
|
|
|
@property |
|
def features(self): |
|
return { |
|
"labels": datasets.ClassLabel(names=_CLASS_NAMES), |
|
"input_ids": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"iid": datasets.Value("uint32"), |
|
"chunk_id": datasets.Value("uint32"), |
|
} |
|
|
|
|
|
class NonwestlitFirstLevelDataset(datasets.GeneratorBasedBuilder): |
|
"""Nonwestlit Ottoman Classification Dataset""" |
|
|
|
BUILDER_CONFIGS = [ |
|
NonwestlitFirstLevelConfig( |
|
name="seq_cls", |
|
version=datasets.Version("1.0.0", ""), |
|
description=_DESCRIPTION, |
|
) |
|
] |
|
BUILDER_CONFIG_CLASS = NonwestlitFirstLevelConfig |
|
__current_id = 1 |
|
__current_chunk_id = 1 |
|
|
|
@property |
|
def __next_id(self): |
|
cid = self.__current_id |
|
self.__current_id += 1 |
|
return cid |
|
|
|
@property |
|
def __next_chunk_id(self): |
|
cid = self.__current_chunk_id |
|
self.__current_chunk_id += 1 |
|
return cid |
|
|
|
def __reset_chunk_id(self): |
|
self.__current_chunk_id = 1 |
|
|
|
def _info(self): |
|
if self.config.tokenizer is None: |
|
raise RuntimeError( |
|
"For HF Datasets and for chunking to be carried out, 'tokenizer' must be given." |
|
) |
|
if "llama" in self.config.tokenizer.name_or_path: |
|
warnings.warn( |
|
"It is suggested to pass 'max_sequence_length' argument for Llama-2 model family. There " |
|
"might be errors for the data processing parts as `model_max_len` attributes are set to" |
|
"MAX_INT64 (?)." |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(self.config.features), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["val"]} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]} |
|
), |
|
] |
|
|
|
def prepare_articles(self, article: str) -> List[str]: |
|
tokenizer = self.config.tokenizer |
|
model_inputs = tokenizer( |
|
article, |
|
truncation=True, |
|
padding=True, |
|
max_length=self.config.max_sequence_length, |
|
return_overflowing_tokens=True, |
|
) |
|
return tokenizer.batch_decode(model_inputs["input_ids"], skip_special_tokens=True) |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", filepath) |
|
with open(filepath, encoding="utf-8") as f: |
|
dataset = json.load(f) |
|
|
|
chunk_id = 0 |
|
for instance in dataset: |
|
iid = instance.get("id", self.__next_id) |
|
label = instance.get("label") |
|
article = self.prepare_articles(instance["article"]) |
|
self.__reset_chunk_id() |
|
for chunk in article: |
|
chunk_inputs = { |
|
"iid": iid, |
|
"chunk_id": self.__next_chunk_id, |
|
"title": instance["title"], |
|
"input_ids": chunk, |
|
"labels": int(label) - 1, |
|
} |
|
yield chunk_id, chunk_inputs |
|
chunk_id += 1 |
|
|