Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
shamikbose89 commited on
Commit
e844be7
1 Parent(s): 62cf4f9

Upload clmet_3_1.py

Browse files

Dataloader for clmet_3_1 dataset

Files changed (1) hide show
  1. clmet_3_1.py +292 -0
clmet_3_1.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """The Corpus of Late Modern English Texts, version 3.1 (CLMET3.1) has been created by Hendrik De Smet,
15
+ Susanne Flach, Hans-Jürgen Diller and Jukka Tyrkkö, as an offshoot of a bigger project developing a database
16
+ of text descriptors (Diller, De Smet & Tyrkkö 2011). CLMET3.1 is a principled collection of public domain
17
+ texts drawn from various online archiving projects. """
18
+
19
+ import os
20
+ import xml.etree.ElementTree as ET
21
+ import datasets
22
+ from bs4 import BeautifulSoup
23
+
24
+
25
+ _CITATION = """@article{de2015corpus,
26
+ title={Corpus of Late Modern English texts (version 3.1)},
27
+ author={De Smet, Hendrik and Flach, Susanne and Tyrkk{\"o}, Jukka and Diller, Hans-J{\"u}rgen},
28
+ year={2015}
29
+ }
30
+ """
31
+
32
+ _DESCRIPTION = """The Corpus of Late Modern English Texts, version 3.1 (CLMET3.1) has been created by Hendrik De Smet,
33
+ Susanne Flach, Hans-Jürgen Diller and Jukka Tyrkkö, as an offshoot of a bigger project developing a database of text
34
+ descriptors (Diller, De Smet & Tyrkkö 2011). CLMET3.1 is a principled collection of public domain texts drawn from
35
+ various online archiving projects. This dataset can be used for part-of-speech tagging, NER and text classification
36
+ """
37
+
38
+ _HOMEPAGE = "http://fedora.clarin-d.uni-saarland.de/clmet/clmet.html"
39
+
40
+ _LICENSE = "Creative Commons Attribution Non Commercial Share Alike 4.0 International"
41
+
42
+ _DATASETNAME = "clmet"
43
+
44
+ _URLS = {
45
+ _DATASETNAME: "http://fedora.clarin-d.uni-saarland.de/clmet/clmet3_1.zip",
46
+ }
47
+
48
+ _POS_LIST = [
49
+ "CC",
50
+ "CD",
51
+ "DT",
52
+ "EX",
53
+ "FW",
54
+ "IN",
55
+ "JJ",
56
+ "JJR",
57
+ "JJS",
58
+ "MD",
59
+ "NN",
60
+ "NNS",
61
+ "NP",
62
+ "NPS",
63
+ "PDT",
64
+ "POS",
65
+ "PP",
66
+ "PP$",
67
+ "RB",
68
+ "RBR",
69
+ "RBS",
70
+ "RP",
71
+ "SENT",
72
+ "SYM",
73
+ "TO",
74
+ "UH",
75
+ "VB",
76
+ "VBD",
77
+ "VBG",
78
+ "VBN",
79
+ "VBZ",
80
+ "VBP",
81
+ "WDT",
82
+ "WP",
83
+ "WP$",
84
+ "WRB",
85
+ "XX0",
86
+ "CURR",
87
+ "PUN",
88
+ "LQUO",
89
+ "RQUO",
90
+ "BRL",
91
+ "BRR",
92
+ "LS",
93
+ ]
94
+ _POS_LOOKUP = {tag: idx for idx, tag in enumerate(_POS_LIST)}
95
+ _CLASS_LIST = [
96
+ "ADJ",
97
+ "ADV",
98
+ "ART",
99
+ "CONJ",
100
+ "INTJ",
101
+ "PREP",
102
+ "PRON",
103
+ "PUNC",
104
+ "SUBST",
105
+ "SYM",
106
+ "UNC",
107
+ "VERB",
108
+ "QUOT"
109
+ ]
110
+ _CLASS_LOOKUP = {tag: idx for idx, tag in enumerate(_CLASS_LIST)}
111
+ logger = datasets.utils.logging.get_logger(__name__)
112
+
113
+
114
+ class CLMET_3_1(datasets.GeneratorBasedBuilder):
115
+ """"""
116
+
117
+ VERSION = datasets.Version("3.1.0")
118
+
119
+ BUILDER_CONFIGS = [
120
+ datasets.BuilderConfig(
121
+ name="plain",
122
+ version=VERSION,
123
+ description="This format contains text as single string and the classifications",
124
+ ),
125
+ datasets.BuilderConfig(
126
+ name="class",
127
+ version=VERSION,
128
+ description="This format contains the text as a list of tokens, annotated according to the simplified Oxford wordclass tags",
129
+ ),
130
+ datasets.BuilderConfig(
131
+ name="pos",
132
+ version=VERSION,
133
+ description="This format contains the text as a list of tokens, annotated according to the Penn Treebank POS tags",
134
+ ),
135
+ ]
136
+
137
+ DEFAULT_CONFIG_NAME = "plain"
138
+
139
+ def _info(self):
140
+ if self.config.name == "plain":
141
+ features = datasets.Features(
142
+ {
143
+ "text": datasets.Value("string"),
144
+ "genre": datasets.Value("string"),
145
+ "subgenre": datasets.Value("string"),
146
+ "year": datasets.Value("string"),
147
+ "quarter_cent": datasets.Value("string"),
148
+ "decade": datasets.Value("string"),
149
+ "title": datasets.Value("string"),
150
+ "author": datasets.Value("string"),
151
+ "notes": datasets.Value("string"),
152
+ "comments": datasets.Value("string"),
153
+ "period": datasets.Value("string"),
154
+ "id": datasets.Value("string"),
155
+ }
156
+ )
157
+ elif self.config.name == "class":
158
+ logger.warn(f"CLASS tags are as follows: {_CLASS_LIST}")
159
+ features = datasets.Features(
160
+ {
161
+ "text": datasets.Sequence(datasets.Value("string")),
162
+ "pos_tags": datasets.Sequence(datasets.Value("int32")),
163
+ "genre": datasets.Value("string"),
164
+ "subgenre": datasets.Value("string"),
165
+ "year": datasets.Value("string"),
166
+ "quarter_cent": datasets.Value("string"),
167
+ "decade": datasets.Value("string"),
168
+ "title": datasets.Value("string"),
169
+ "author": datasets.Value("string"),
170
+ "notes": datasets.Value("string"),
171
+ "comments": datasets.Value("string"),
172
+ "period": datasets.Value("string"),
173
+ "id": datasets.Value("string"),
174
+ }
175
+ )
176
+ elif self.config.name == "pos":
177
+ logger.warn(f"POS tags are as follows: {_POS_LIST}")
178
+ features = datasets.Features(
179
+ {
180
+ "text": datasets.Sequence(datasets.Value("string")),
181
+ "pos_tags": datasets.Sequence(datasets.Value("int32")),
182
+ "genre": datasets.Value("string"),
183
+ "subgenre": datasets.Value("string"),
184
+ "year": datasets.Value("string"),
185
+ "quarter_cent": datasets.Value("string"),
186
+ "decade": datasets.Value("string"),
187
+ "title": datasets.Value("string"),
188
+ "author": datasets.Value("string"),
189
+ "notes": datasets.Value("string"),
190
+ "comments": datasets.Value("string"),
191
+ "period": datasets.Value("string"),
192
+ "id": datasets.Value("string"),
193
+ }
194
+ )
195
+ return datasets.DatasetInfo(
196
+ description=_DESCRIPTION,
197
+ features=features,
198
+ homepage=_HOMEPAGE,
199
+ license=_LICENSE,
200
+ citation=_CITATION,
201
+ )
202
+
203
+ def _split_generators(self, dl_manager):
204
+ urls = _URLS[_DATASETNAME]
205
+ data_dir = dl_manager.download_and_extract(urls)
206
+ data_dir = os.path.join(data_dir, "clmet", "corpus", "txt")
207
+ return [
208
+ datasets.SplitGenerator(
209
+ name=datasets.Split.TRAIN,
210
+ # These kwargs will be passed to _generate_examples
211
+ gen_kwargs={
212
+ "data_dir": data_dir,
213
+ "split": "train",
214
+ },
215
+ ),
216
+ ]
217
+
218
+ def parse_pos_text(self, content_parts, pos_type):
219
+ tokens = []
220
+ pos_tags = []
221
+ unknown_tag = False
222
+ malformed_token = False
223
+ for content_part in content_parts:
224
+ text = content_part.text.strip()
225
+ for text_part in text.split():
226
+ try:
227
+ token, pos_tag = text_part.split("_")
228
+ pos_tag = pos_tag.replace("\n", "").strip().upper()
229
+ if pos_type == "pos":
230
+ pos_tag_idx = _POS_LOOKUP.get(pos_tag,-1)
231
+ else:
232
+ pos_tag_idx = _CLASS_LOOKUP.get(pos_tag,-1)
233
+ if pos_tag_idx==-1:
234
+ unknown_tag = True
235
+ tokens.append(token)
236
+ pos_tags.append(pos_tag_idx)
237
+ except Exception as e:
238
+ malformed_token = True
239
+ return tokens, pos_tags, unknown_tag, malformed_token
240
+
241
+ def parse_file(self, file, pos_type):
242
+ with open(file, "r", encoding="utf-8") as fp:
243
+ soup = BeautifulSoup(fp, features="html.parser")
244
+ id = soup.id.text
245
+ period = soup.period.text
246
+ quarter_cent = soup.quartcent.text
247
+ decade = soup.decade.text
248
+ year = soup.year.text
249
+ genre = soup.genre.text
250
+ subgenre = soup.subgenre.text
251
+ title = soup.title.text
252
+ notes = soup.notes.text
253
+ comments = soup.comments.text
254
+ author = soup.author.text
255
+ data_point = {
256
+ "id": id,
257
+ "period": period,
258
+ "genre": genre,
259
+ "subgenre": subgenre,
260
+ "decade": decade,
261
+ "quarter_cent": quarter_cent,
262
+ "title": title,
263
+ "notes": notes if notes else "",
264
+ "comments": comments if comments else "",
265
+ "author": author,
266
+ "year": year,
267
+ }
268
+ content_parts = soup.find("text").find_all("p")
269
+
270
+ if pos_type in ["pos", "class"]:
271
+ content = self.parse_pos_text(content_parts, pos_type)
272
+ if content[2]:
273
+ logger.warn(f'Unknown tag in sample {id}')
274
+ if content[3]:
275
+ logger.warn(f'Malformed token in sample {id}')
276
+ data_point["text"] = content[0]
277
+ data_point["pos_tags"] = content[1]
278
+ else:
279
+ content = []
280
+ for content_part in content_parts:
281
+ content.append(content_part.text)
282
+ content = " ".join(content)
283
+ data_point["text"] = content
284
+ return (id, data_point)
285
+
286
+ def _generate_examples(self, data_dir, split):
287
+ final_data_dir = os.path.join(data_dir, self.config.name)
288
+ for file in os.listdir(final_data_dir):
289
+ id, data = self.parse_file(
290
+ os.path.join(final_data_dir, file), self.config.name
291
+ )
292
+ yield id, data