davanstrien HF staff commited on
Commit
d357e48
1 Parent(s): 3f10bcb

Update hipe2020.py

Browse files
Files changed (1) hide show
  1. hipe2020.py +192 -0
hipe2020.py CHANGED
@@ -16,7 +16,9 @@
16
  # Lint as: python3
17
  """TODO"""
18
 
 
19
  import datasets
 
20
 
21
 
22
  _CITATION = """\
@@ -93,6 +95,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
93
  datasets.features.ClassLabel(
94
  names=[
95
  "O",
 
96
  "B-loc",
97
  "B-org",
98
  "B-pers",
@@ -114,17 +117,157 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
114
  "B-org",
115
  "B-pers",
116
  "B-prod",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  "B-time",
 
118
  "I-loc",
 
 
 
 
 
 
 
 
 
 
 
119
  "I-org",
 
 
 
120
  "I-pers",
 
 
 
121
  "I-prod",
 
 
122
  "I-time",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  ]
124
  )
125
  ),
 
 
126
  "no_space_after": datasets.Sequence(datasets.Value("bool")),
127
  "end_of_line": datasets.Sequence(datasets.Value("bool")),
 
 
 
128
  }
129
  ),
130
  supervised_keys=None,
@@ -164,11 +307,20 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
164
  ]
165
 
166
  def _generate_examples(self, filepath):
 
 
 
167
  with open(filepath, encoding="utf-8") as f:
168
  guid = 0
169
  tokens = []
170
  NE_COARSE_LIT_tags = []
171
  NE_COARSE_METO_tags = []
 
 
 
 
 
 
172
  no_space_after = []
173
  end_of_line = []
174
  for line in f:
@@ -177,19 +329,44 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
177
  ):
178
  continue
179
  if line.startswith("#") or line == "\n":
 
 
 
 
 
 
 
 
 
 
180
  if tokens:
181
  yield guid, {
182
  "id": str(guid),
183
  "tokens": tokens,
184
  "NE_COARSE_LIT": NE_COARSE_LIT_tags,
185
  "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
 
 
 
 
 
 
186
  "no_space_after": no_space_after,
187
  "end_of_line": end_of_line,
 
 
 
188
  }
189
  guid += 1
190
  tokens = []
191
  NE_COARSE_LIT_tags = []
192
  NE_COARSE_METO_tags = []
 
 
 
 
 
 
193
  no_space_after = []
194
  end_of_line = []
195
  else:
@@ -200,6 +377,12 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
200
  tokens.append(splits[0])
201
  NE_COARSE_LIT_tags.append(splits[1])
202
  NE_COARSE_METO_tags.append(splits[2])
 
 
 
 
 
 
203
  misc = splits[-1]
204
  is_space = "NoSpaceAfter" in misc
205
  is_end_of_line = "EndOfLine" in misc
@@ -212,6 +395,15 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
212
  "tokens": tokens,
213
  "NE_COARSE_LIT": NE_COARSE_LIT_tags,
214
  "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
 
 
 
 
 
 
215
  "no_space_after": no_space_after,
216
  "end_of_line": end_of_line,
 
 
 
217
  }
 
16
  # Lint as: python3
17
  """TODO"""
18
 
19
+ from datetime import datetime
20
  import datasets
21
+ import re
22
 
23
 
24
  _CITATION = """\
 
95
  datasets.features.ClassLabel(
96
  names=[
97
  "O",
98
+ "B-comp",
99
  "B-loc",
100
  "B-org",
101
  "B-pers",
 
117
  "B-org",
118
  "B-pers",
119
  "B-prod",
120
+ "I-loc",
121
+ "I-org",
122
+ "I-pers",
123
+ ]
124
+ )
125
+ ),
126
+ "NE_FINE_LIT_tags": datasets.Sequence(
127
+ datasets.features.ClassLabel(
128
+ names=[
129
+ "O",
130
+ "B-comp.name",
131
+ "B-loc",
132
+ "B-loc.add.elec",
133
+ "B-loc.add.phys",
134
+ "B-loc.adm.nat",
135
+ "B-loc.adm.reg",
136
+ "B-loc.adm.sup",
137
+ "B-loc.adm.town",
138
+ "B-loc.fac",
139
+ "B-loc.oro",
140
+ "B-loc.phys.astro",
141
+ "B-loc.phys.geo",
142
+ "B-loc.phys.hydro",
143
+ "B-loc.unk",
144
+ "B-org",
145
+ "B-org.adm",
146
+ "B-org.ent",
147
+ "B-org.ent.pressagency",
148
+ "B-pers",
149
+ "B-pers.coll",
150
+ "B-pers.ind",
151
+ "B-pers.ind.articleauthor",
152
+ "B-prod",
153
+ "B-prod.doctr",
154
+ "B-prod.media",
155
  "B-time",
156
+ "B-time.date.abs",
157
  "I-loc",
158
+ "I-loc.add.elec",
159
+ "I-loc.add.phys",
160
+ "I-loc.adm.nat",
161
+ "I-loc.adm.reg",
162
+ "I-loc.adm.sup",
163
+ "I-loc.adm.town",
164
+ "I-loc.fac",
165
+ "I-loc.oro",
166
+ "I-loc.phys.geo",
167
+ "I-loc.phys.hydro",
168
+ "I-loc.unk",
169
  "I-org",
170
+ "I-org.adm",
171
+ "I-org.ent",
172
+ "I-org.ent.pressagency",
173
  "I-pers",
174
+ "I-pers.coll",
175
+ "I-pers.ind",
176
+ "I-pers.ind.articleauthor",
177
  "I-prod",
178
+ "I-prod.doctr",
179
+ "I-prod.media",
180
  "I-time",
181
+ "I-time.date.abs",
182
+ ]
183
+ )
184
+ ),
185
+ "NE_FINE_METO_tags": datasets.Sequence(
186
+ datasets.features.ClassLabel(
187
+ names=[
188
+ "O",
189
+ "B-loc",
190
+ "B-loc.adm.reg",
191
+ "B-loc.adm.town",
192
+ "B-loc.fac",
193
+ "B-loc.oro",
194
+ "B-org",
195
+ "B-org.adm",
196
+ "B-org.ent",
197
+ "B-pers.coll",
198
+ "B-pers.ind",
199
+ "B-prod.media",
200
+ "I-loc",
201
+ "I-loc.adm.reg",
202
+ "I-loc.fac",
203
+ "I-loc.oro",
204
+ "I-org",
205
+ "I-org.adm",
206
+ "I-org.ent",
207
+ "I-pers",
208
+ "I-pers.ind",
209
+ ]
210
+ )
211
+ ),
212
+ "NE_FINE_COMP_tags": datasets.Sequence(
213
+ datasets.features.ClassLabel(
214
+ names=[
215
+ "O",
216
+ "B-comp.demonym",
217
+ "B-comp.function",
218
+ "B-comp.name",
219
+ "B-comp.qualifier",
220
+ "B-comp.title",
221
+ "I-comp.demonym",
222
+ "I-comp.function",
223
+ "I-comp.name",
224
+ "I-comp.qualifier",
225
+ "I-comp.title",
226
+ ]
227
+ )
228
+ ),
229
+ "NE_NESTED_tags": datasets.Sequence(
230
+ datasets.features.ClassLabel(
231
+ names=[
232
+ "O",
233
+ "B-loc",
234
+ "B-loc.adm.nat",
235
+ "B-loc.adm.reg",
236
+ "B-loc.adm.sup",
237
+ "B-loc.adm.town",
238
+ "B-loc.fac",
239
+ "B-loc.oro",
240
+ "B-loc.phys.geo",
241
+ "B-loc.phys.hydro",
242
+ "B-org",
243
+ "B-org.adm",
244
+ "B-org.ent",
245
+ "B-pers.coll",
246
+ "B-pers.ind",
247
+ "B-prod.media",
248
+ "B-time.date.abs",
249
+ "I-loc",
250
+ "I-loc.adm.nat",
251
+ "I-loc.adm.reg",
252
+ "I-loc.adm.town",
253
+ "I-loc.fac",
254
+ "I-loc.oro",
255
+ "I-loc.phys.geo",
256
+ "I-loc.phys.hydro",
257
+ "I-org",
258
+ "I-org.adm",
259
+ "I-org.ent",
260
+ "I-pers.ind",
261
  ]
262
  )
263
  ),
264
+ "NEL_LIT_ID": datasets.Sequence(datasets.Value("string")),
265
+ "NEL_METO_ID": datasets.Sequence(datasets.Value("string")),
266
  "no_space_after": datasets.Sequence(datasets.Value("bool")),
267
  "end_of_line": datasets.Sequence(datasets.Value("bool")),
268
+ "date": datasets.Value("timestamp[s]"),
269
+ "title": datasets.Value("string"),
270
+ "document_id": datasets.Value("string"),
271
  }
272
  ),
273
  supervised_keys=None,
 
307
  ]
308
 
309
  def _generate_examples(self, filepath):
310
+ date_re = re.compile(r"# date = (\d{4}-\d{2}-\d{02})")
311
+ title_re = re.compile(r"newspaper = (\w{3})")
312
+ document_id_re = re.compile(r"document_id = (.*)")
313
  with open(filepath, encoding="utf-8") as f:
314
  guid = 0
315
  tokens = []
316
  NE_COARSE_LIT_tags = []
317
  NE_COARSE_METO_tags = []
318
+ NE_FINE_LIT_tags = []
319
+ NE_FINE_METO_tags = []
320
+ NE_FINE_COMP_tags = []
321
+ NE_NESTED_tags = []
322
+ NEL_LIT_ID = []
323
+ NEL_METO_ID = []
324
  no_space_after = []
325
  end_of_line = []
326
  for line in f:
 
329
  ):
330
  continue
331
  if line.startswith("#") or line == "\n":
332
+ date_match = re.search(date_re, line)
333
+ if date_match:
334
+ date = date_match.group(1)
335
+ date = datetime.strptime(date, "%Y-%m-%d")
336
+ title_match = re.search(title_re, line)
337
+ if title_match:
338
+ title = title_match.group(1)
339
+ document_id_match = re.search(document_id_re, line)
340
+ if document_id_match:
341
+ document_id = document_id_match.group(1)
342
  if tokens:
343
  yield guid, {
344
  "id": str(guid),
345
  "tokens": tokens,
346
  "NE_COARSE_LIT": NE_COARSE_LIT_tags,
347
  "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
348
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
349
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
350
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
351
+ "NE_NESTED_tags": NE_NESTED_tags,
352
+ "NEL_LIT_ID": NEL_LIT_ID,
353
+ "NEL_METO_ID": NEL_METO_ID,
354
  "no_space_after": no_space_after,
355
  "end_of_line": end_of_line,
356
+ "date": date,
357
+ "title": title,
358
+ "document_id": document_id,
359
  }
360
  guid += 1
361
  tokens = []
362
  NE_COARSE_LIT_tags = []
363
  NE_COARSE_METO_tags = []
364
+ NE_FINE_LIT_tags = []
365
+ NE_FINE_METO_tags = []
366
+ NE_FINE_COMP_tags = []
367
+ NE_NESTED_tags = []
368
+ NEL_LIT_ID = []
369
+ NEL_METO_ID = []
370
  no_space_after = []
371
  end_of_line = []
372
  else:
 
377
  tokens.append(splits[0])
378
  NE_COARSE_LIT_tags.append(splits[1])
379
  NE_COARSE_METO_tags.append(splits[2])
380
+ NE_FINE_LIT_tags.append(splits[3])
381
+ NE_FINE_METO_tags.append(splits[4])
382
+ NE_FINE_COMP_tags.append(splits[5])
383
+ NE_NESTED_tags.append(splits[6])
384
+ NEL_LIT_ID.append(splits[7])
385
+ NEL_METO_ID.append(splits[8])
386
  misc = splits[-1]
387
  is_space = "NoSpaceAfter" in misc
388
  is_end_of_line = "EndOfLine" in misc
 
395
  "tokens": tokens,
396
  "NE_COARSE_LIT": NE_COARSE_LIT_tags,
397
  "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
398
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
399
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
400
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
401
+ "NE_NESTED_tags": NE_NESTED_tags,
402
+ "NEL_LIT_ID": NEL_LIT_ID,
403
+ "NEL_METO_ID": NEL_METO_ID,
404
  "no_space_after": no_space_after,
405
  "end_of_line": end_of_line,
406
+ "date": date,
407
+ "title": title,
408
+ "document_id": document_id,
409
  }