Matej Klemen commited on
Commit
b8e1a39
1 Parent(s): 633031a

Add first version of G-KOMET 1.0

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. gkomet.py +287 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "G-KOMET 1.0 (a corpus of metaphorical expressions in spoken Slovene language) is a corpus of speech transcriptions and \nconversations that covers 50,000 lexical units. The corpus contains samples from the Gos corpus of spoken Slovene \nand includes a balanced set of transcriptions of informative, educational, entertaining, private, and public discourse.\n\nThe annotation scheme was based on the MIPVU metaphor identification process. \nThis protocol was modified and adapted to the specifics of the Slovene language and the specifics of the spoken \nlanguage. Corpus was annotated for the following relations to metaphor: indirect metaphor, direct metaphor, borderline \ncases and metaphor signals. In addition, the corpus introduces a new \u2018frame\u2019 tag, which gives information about the \nconcept to which it refers. \n", "citation": "@InProceedings{antloga2022gkomet,\ntitle = {Korpusni pristopi za identifikacijo metafore in metonimije: primer metonimije v korpusu gKOMET},\nauthor={Antloga, \u000b{S}pela},\nbooktitle={Proceedings of the Conference on Language Technologies and Digital Humanities (Student papers)},\nyear={2022},\npages={271-277}\n}\n", "homepage": "http://hdl.handle.net/11356/1490", "license": "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)", "features": {"document_name": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_paragraph": {"dtype": "uint32", "id": null, "_type": "Value"}, "idx_sentence": {"dtype": "uint32", "id": null, "_type": "Value"}, "sentence_words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "met_type": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}], "met_frame": [{"type": {"dtype": "string", "id": null, "_type": "Value"}, "word_indices": {"feature": {"dtype": "uint32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "g_komet", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 733608, "num_examples": 5695, "dataset_name": "g_komet"}}, "download_checksums": {"https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1490/G-Komet.zip": {"num_bytes": 1005891, "checksum": "29fffdaf085b889926eedcd9673f2129513b4578f41ba9bf7c632fe50cd45a8f"}}, "download_size": 1005891, "post_processing_size": null, "dataset_size": 733608, "size_in_bytes": 1739499}}
gkomet.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Metaphor corpus G-KOMET 1.0"""
2
+ import logging
3
+ import os
4
+ import re
5
+ import xml.etree.ElementTree as ET
6
+ from typing import List, Tuple
7
+
8
+ import datasets
9
+
10
+ _CITATION = """\
11
+ @InProceedings{antloga2022gkomet,
12
+ title = {Korpusni pristopi za identifikacijo metafore in metonimije: primer metonimije v korpusu gKOMET},
13
+ author={Antloga, \v{S}pela},
14
+ booktitle={Proceedings of the Conference on Language Technologies and Digital Humanities (Student papers)},
15
+ year={2022},
16
+ pages={271-277}
17
+ }
18
+ """
19
+
20
+
21
+ _DESCRIPTION = """\
22
+ G-KOMET 1.0 (a corpus of metaphorical expressions in spoken Slovene language) is a corpus of speech transcriptions and
23
+ conversations that covers 50,000 lexical units. The corpus contains samples from the Gos corpus of spoken Slovene
24
+ and includes a balanced set of transcriptions of informative, educational, entertaining, private, and public discourse.
25
+
26
+ The annotation scheme was based on the MIPVU metaphor identification process.
27
+ This protocol was modified and adapted to the specifics of the Slovene language and the specifics of the spoken
28
+ language. Corpus was annotated for the following relations to metaphor: indirect metaphor, direct metaphor, borderline
29
+ cases and metaphor signals. In addition, the corpus introduces a new ‘frame’ tag, which gives information about the
30
+ concept to which it refers.
31
+ """
32
+
33
+ _HOMEPAGE = "http://hdl.handle.net/11356/1490"
34
+
35
+ _LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
36
+
37
+ _URLS = {
38
+ "gkomet": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1490/G-Komet.zip"
39
+ }
40
+
41
+
42
+ XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}"
43
+ EL_LEAF, EL_TYPE, EL_FRAME = range(3)
44
+
45
+
46
+ def namespace(element):
47
+ # https://stackoverflow.com/a/12946675
48
+ m = re.match(r'\{.*\}', element.tag)
49
+ return m.group(0) if m else ''
50
+
51
+
52
+ def word_info(sent_el):
53
+ def _resolve_recursively(element) -> List:
54
+ """ Knowingly ignored tags: name (anonymized, without IDs), gap, vocal, pause, del,
55
+ linkGrp (handled separately in linkgroup_info()) """
56
+ # Leaf node: word or punctuation character
57
+ if element.tag.endswith(("w", "pc")):
58
+ id_curr = element.attrib[f"{XML_NAMESPACE}id"]
59
+ return [(id_curr, element.text)]
60
+
61
+ # Annotated word or word group - not interested in the annotations in this function
62
+ elif element.tag.endswith("seg"):
63
+ parsed_data = []
64
+ for child in element:
65
+ if child.tag.endswith(("c", "vocal", "pause")): # empty space betw. words or "special" word
66
+ continue
67
+
68
+ res = _resolve_recursively(child)
69
+ if isinstance(res, list):
70
+ parsed_data.extend(res)
71
+ else:
72
+ parsed_data.append(res)
73
+
74
+ return parsed_data
75
+
76
+ id_words, words = [], []
77
+ for child_el in sent_el:
78
+ curr_annotations = _resolve_recursively(child_el)
79
+ if curr_annotations is not None: # None = unrecognized ("unimportant") element
80
+ for ann in curr_annotations:
81
+ id_words.append(ann[0])
82
+ words.append(ann[1])
83
+
84
+ return id_words, words
85
+
86
+
87
+ def seg_info(sent_el):
88
+ def _resolve_recursively(element) -> Tuple:
89
+ """ Returns (type[, subtype], deeper_elements, latest_element)"""
90
+ # Leaf node: word or punctuation character
91
+ if element.tag.endswith(("w", "pc")):
92
+ id_curr = element.attrib[f"{XML_NAMESPACE}id"]
93
+ return EL_LEAF, [], [id_curr]
94
+
95
+ # Annotated word or word group
96
+ elif element.tag.endswith("seg"):
97
+ subtype = element.attrib["subtype"]
98
+ if element.attrib["type"] == "frame":
99
+ ann_type = EL_FRAME
100
+ elif element.attrib["type"] == "metaphor":
101
+ ann_type = EL_TYPE
102
+ elif element.attrib["type"] == "idiom":
103
+ ann_type = EL_TYPE
104
+ else:
105
+ raise ValueError(f"Unrecognized seg type: {element.attrib['type']}")
106
+
107
+ deeper_elements = []
108
+ latest_element = []
109
+ for child in element:
110
+ if child.tag.endswith(("c", "vocal", "pause")): # empty space betw. words or "special" word
111
+ continue
112
+
113
+ res = _resolve_recursively(child)
114
+ if res[0] == EL_LEAF:
115
+ latest_element.extend(res[2])
116
+ else:
117
+ deeper_elements.append(res)
118
+ latest_element.extend(res[3])
119
+
120
+ return ann_type, subtype, deeper_elements, latest_element
121
+
122
+ annotations = []
123
+ for child_el in sent_el:
124
+ if not child_el.tag.endswith("seg"):
125
+ continue
126
+
127
+ ann_type, subtype, deeper_elements, latest_element = _resolve_recursively(child_el)
128
+ annotations.extend(list(map(lambda _tup: (_tup[0], _tup[1], _tup[3]), deeper_elements)))
129
+ annotations.append((ann_type, subtype, latest_element))
130
+
131
+ return annotations
132
+
133
+
134
+ def linkgroup_info(sent_el):
135
+ annotations = []
136
+ for child_el in sent_el:
137
+ if not child_el.tag.endswith("linkGrp"):
138
+ continue
139
+
140
+ for curr_link in child_el:
141
+ ann_type = EL_TYPE
142
+ if child_el.attrib["type"] not in {"metonymy", "frame", "metaphor", "idiom"}:
143
+ logging.warning(f"Uncovered linkGrp element type, skipping: {child_el.attrib['type']}")
144
+ continue
145
+
146
+ if child_el.attrib["type"] == "metonymy":
147
+ subtype = curr_link.attrib["ana"]
148
+ elif child_el.attrib["type"] in {"frame", "metaphor"}:
149
+ ann_type = EL_TYPE if child_el.attrib["type"] == "metaphor" else EL_FRAME
150
+ subtype = curr_link.attrib["ana"].split(":")[-1]
151
+ else:
152
+ subtype = "idiom"
153
+
154
+ tokens_involved = list(map(lambda _tok_id: _tok_id[1:] if _tok_id.startswith("#") else _tok_id,
155
+ curr_link.attrib["target"].split(" ")))
156
+ annotations.append((ann_type, subtype, tokens_involved))
157
+
158
+ return annotations
159
+
160
+
161
+ class GKomet(datasets.GeneratorBasedBuilder):
162
+ """G-KOMET 1.0 is a corpus of metaphorical expressions in spoken Slovene language. """
163
+
164
+ VERSION = datasets.Version("1.0.0")
165
+
166
+ def _info(self):
167
+ features = datasets.Features(
168
+ {
169
+ "document_name": datasets.Value("string"),
170
+ "idx": datasets.Value("uint32"), # index inside current document
171
+ "idx_paragraph": datasets.Value("uint32"),
172
+ "idx_sentence": datasets.Value("uint32"), # index inside current paragraph
173
+ "sentence_words": datasets.Sequence(datasets.Value("string")),
174
+ "met_type": [{
175
+ "type": datasets.Value("string"),
176
+ "word_indices": datasets.Sequence(datasets.Value("uint32"))
177
+ }],
178
+ "met_frame": [{
179
+ "type": datasets.Value("string"),
180
+ "word_indices": datasets.Sequence(datasets.Value("uint32"))
181
+ }]
182
+ }
183
+ )
184
+ return datasets.DatasetInfo(
185
+ description=_DESCRIPTION,
186
+ features=features,
187
+ homepage=_HOMEPAGE,
188
+ license=_LICENSE,
189
+ citation=_CITATION,
190
+ )
191
+
192
+ def _split_generators(self, dl_manager):
193
+ data_dir = dl_manager.download_and_extract(_URLS["gkomet"])
194
+ return [
195
+ datasets.SplitGenerator(
196
+ name=datasets.Split.TRAIN,
197
+ gen_kwargs={"data_dir": os.path.join(data_dir, "G-Komet")},
198
+ )
199
+ ]
200
+
201
+ def _generate_examples(self, data_dir):
202
+ data_files = []
203
+ for fname in os.listdir(data_dir):
204
+ curr_path = os.path.join(data_dir, fname)
205
+ if os.path.isfile(curr_path) and fname.endswith(".xml") and fname != "G-Komet.xml": # G-Komet.xml = meta-file
206
+ data_files.append(fname)
207
+ data_files = sorted(data_files)
208
+
209
+ idx_example = 0
210
+ for fname in data_files:
211
+ fpath = os.path.join(data_dir, fname)
212
+ curr_doc = ET.parse(fpath)
213
+ root = curr_doc.getroot()
214
+ NAMESPACE = namespace(root)
215
+
216
+ idx_sent_glob = 0
217
+ for idx_par, curr_par in enumerate(root.iterfind(f".//{NAMESPACE}p")):
218
+ id2position = {} # {<idx_sent> -> {<id_word>: <position> foreach word} foreach sent}
219
+ all_words = []
220
+
221
+ # Pass#1: extract word information
222
+ for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
223
+ id_words, words = word_info(curr_sent)
224
+
225
+ id2position[idx_sent] = dict(zip(id_words, range(len(words))))
226
+ all_words.append(words)
227
+
228
+ all_types, all_frames = [], []
229
+
230
+ # Pass#2: extract annotations from <seg>ments
231
+ for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
232
+ annotated_segs = seg_info(curr_sent)
233
+ all_types.append([])
234
+ all_frames.append([])
235
+
236
+ for curr_ann in annotated_segs:
237
+ ann_type, ann_subtype, words_involved = curr_ann
238
+ if ann_type == EL_TYPE:
239
+ all_types[idx_sent].append({
240
+ "type": ann_subtype,
241
+ "word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved
242
+ if _id_word in id2position[idx_sent]]
243
+ })
244
+ elif ann_type == EL_FRAME:
245
+ all_frames[idx_sent].append({
246
+ "type": ann_subtype,
247
+ "word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved
248
+ if _id_word in id2position[idx_sent]]
249
+ })
250
+
251
+ # Pass#3: extract annotations from <linkGrp>s
252
+ for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")):
253
+ annotated_linkgroups = linkgroup_info(curr_sent)
254
+
255
+ for curr_ann in annotated_linkgroups:
256
+ ann_type, ann_subtype, words_involved = curr_ann
257
+
258
+ if ann_type == EL_TYPE:
259
+ all_types[idx_sent].append({
260
+ "type": ann_subtype,
261
+ "word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved
262
+ if _id_word in id2position[idx_sent]]
263
+ })
264
+ elif ann_type == EL_FRAME:
265
+ all_frames[idx_sent].append({
266
+ "type": ann_subtype,
267
+ "word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved
268
+ if _id_word in id2position[idx_sent]]
269
+ })
270
+
271
+ idx_sent = 0
272
+ for curr_words, curr_types, curr_frames in zip(all_words, all_types, all_frames):
273
+ if len(curr_words) == 0:
274
+ continue
275
+
276
+ yield idx_example, {
277
+ "document_name": fname,
278
+ "idx": idx_sent_glob,
279
+ "idx_paragraph": idx_par,
280
+ "idx_sentence": idx_sent,
281
+ "sentence_words": curr_words,
282
+ "met_type": curr_types,
283
+ "met_frame": curr_frames
284
+ }
285
+ idx_example += 1
286
+ idx_sent += 1
287
+ idx_sent_glob += 1