devrim commited on
Commit
f85e38a
1 Parent(s): 48b5e35

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. ottoman_first_level.py +161 -0
  3. test.json +0 -0
  4. train.json +3 -0
  5. val.json +0 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ train.json filter=lfs diff=lfs merge=lfs -text
ottoman_first_level.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Nonwestlit codebase authors the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Ottoman Literary Dataset from late 19th century up to early 20th century."""
18
+
19
+
20
+ import json
21
+ import warnings
22
+ from typing import List
23
+
24
+ import datasets
25
+ from transformers import PreTrainedTokenizerBase
26
+
27
+ logger = datasets.logging.get_logger(__name__)
28
+
29
+
30
+ _DESCRIPTION = """\
31
+ First level categorization of Ottoman articles.
32
+ """
33
+
34
+ _URLS = {
35
+ "train": "train.json",
36
+ "val": "val.json",
37
+ "test": "test.json",
38
+ }
39
+
40
+ _CLASS_NAMES = ["literary_text", "cultural_discourse", "other"]
41
+
42
+
43
+ class NonwestlitFirstLevelConfig(datasets.BuilderConfig):
44
+ """BuilderConfig for Dataset."""
45
+
46
+ def __init__(
47
+ self, tokenizer: PreTrainedTokenizerBase = None, max_sequence_length: int = None, **kwargs
48
+ ):
49
+ """BuilderConfig for Dataset.
50
+
51
+ Args:
52
+ **kwargs: keyword arguments forwarded to super.
53
+ """
54
+ super(NonwestlitFirstLevelConfig, self).__init__(**kwargs)
55
+ self.tokenizer = tokenizer
56
+ self.max_sequence_length = max_sequence_length
57
+
58
+ @property
59
+ def features(self):
60
+ return {
61
+ "labels": datasets.ClassLabel(names=_CLASS_NAMES),
62
+ "input_ids": datasets.Value("string"),
63
+ "title": datasets.Value("string"),
64
+ "iid": datasets.Value("uint32"),
65
+ "chunk_id": datasets.Value("uint32"),
66
+ }
67
+
68
+
69
+ class NonwestlitFirstLevelDataset(datasets.GeneratorBasedBuilder):
70
+ """Nonwestlit Ottoman Classification Dataset"""
71
+
72
+ BUILDER_CONFIGS = [
73
+ NonwestlitFirstLevelConfig(
74
+ name="seq_cls",
75
+ version=datasets.Version("1.0.0", ""),
76
+ description=_DESCRIPTION,
77
+ )
78
+ ]
79
+ BUILDER_CONFIG_CLASS = NonwestlitFirstLevelConfig
80
+ __current_id = 1
81
+ __current_chunk_id = 1
82
+
83
+ @property
84
+ def __next_id(self):
85
+ cid = self.__current_id
86
+ self.__current_id += 1
87
+ return cid
88
+
89
+ @property
90
+ def __next_chunk_id(self):
91
+ cid = self.__current_chunk_id
92
+ self.__current_chunk_id += 1
93
+ return cid
94
+
95
+ def __reset_chunk_id(self):
96
+ self.__current_chunk_id = 1
97
+
98
+ def _info(self):
99
+ if self.config.tokenizer is None:
100
+ raise RuntimeError(
101
+ "For HF Datasets and for chunking to be carried out, 'tokenizer' must be given."
102
+ )
103
+ if "llama" in self.config.tokenizer.name_or_path:
104
+ warnings.warn(
105
+ "It is suggested to pass 'max_sequence_length' argument for Llama-2 model family. There "
106
+ "might be errors for the data processing parts as `model_max_len` attributes are set to"
107
+ "MAX_INT64 (?)."
108
+ )
109
+ return datasets.DatasetInfo(
110
+ description=_DESCRIPTION,
111
+ features=datasets.Features(self.config.features),
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ data_dir = dl_manager.download_and_extract(_URLS)
116
+
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["val"]}
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]}
126
+ ),
127
+ ]
128
+
129
+ def prepare_articles(self, article: str) -> List[str]:
130
+ tokenizer = self.config.tokenizer
131
+ model_inputs = tokenizer(
132
+ article,
133
+ truncation=True,
134
+ padding=True,
135
+ max_length=self.config.max_sequence_length,
136
+ return_overflowing_tokens=True,
137
+ )
138
+ return tokenizer.batch_decode(model_inputs["input_ids"], skip_special_tokens=True)
139
+
140
+ def _generate_examples(self, filepath):
141
+ """This function returns the examples in the raw (text) form."""
142
+ logger.info("generating examples from = %s", filepath)
143
+ with open(filepath, encoding="utf-8") as f:
144
+ dataset = json.load(f)
145
+
146
+ chunk_id = 0
147
+ for instance in dataset:
148
+ iid = instance.get("id", self.__next_id)
149
+ label = instance.get("label")
150
+ article = self.prepare_articles(instance["article"])
151
+ self.__reset_chunk_id()
152
+ for chunk in article:
153
+ chunk_inputs = {
154
+ "iid": iid,
155
+ "chunk_id": self.__next_chunk_id,
156
+ "title": instance["title"],
157
+ "input_ids": chunk,
158
+ "labels": int(label) - 1,
159
+ }
160
+ yield chunk_id, chunk_inputs
161
+ chunk_id += 1
test.json ADDED
The diff for this file is too large to render. See raw diff
 
train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3dea32a569695632827b777b8d182b0e135ac3a0d8be09f6af3609fc3fb4739
3
+ size 11715427
val.json ADDED
The diff for this file is too large to render. See raw diff