Datasets:
Tasks:
Token Classification
Languages:
Slovenian
Size:
1K - 10K
Tags:
metaphor-classification
metonymy-classification
metaphor-frame-classification
multiword-expression-detection
License:
"""Metaphor corpus G-KOMET 1.0""" | |
import logging | |
import os | |
import re | |
import xml.etree.ElementTree as ET | |
from typing import List, Tuple | |
import datasets | |
_CITATION = """\ | |
@InProceedings{antloga2022gkomet, | |
title = {Korpusni pristopi za identifikacijo metafore in metonimije: primer metonimije v korpusu gKOMET}, | |
author={Antloga, \v{S}pela}, | |
booktitle={Proceedings of the Conference on Language Technologies and Digital Humanities (Student papers)}, | |
year={2022}, | |
pages={271-277} | |
} | |
""" | |
_DESCRIPTION = """\ | |
G-KOMET 1.0 (a corpus of metaphorical expressions in spoken Slovene language) is a corpus of speech transcriptions and | |
conversations that covers 50,000 lexical units. The corpus contains samples from the Gos corpus of spoken Slovene | |
and includes a balanced set of transcriptions of informative, educational, entertaining, private, and public discourse. | |
The annotation scheme was based on the MIPVU metaphor identification process. | |
This protocol was modified and adapted to the specifics of the Slovene language and the specifics of the spoken | |
language. Corpus was annotated for the following relations to metaphor: indirect metaphor, direct metaphor, borderline | |
cases and metaphor signals. In addition, the corpus introduces a new ‘frame’ tag, which gives information about the | |
concept to which it refers. | |
""" | |
_HOMEPAGE = "http://hdl.handle.net/11356/1490" | |
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" | |
_URLS = { | |
"gkomet": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1490/G-Komet.zip" | |
} | |
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" | |
EL_LEAF, EL_TYPE, EL_FRAME = range(3) | |
def namespace(element): | |
# https://stackoverflow.com/a/12946675 | |
m = re.match(r'\{.*\}', element.tag) | |
return m.group(0) if m else '' | |
def word_info(sent_el): | |
def _resolve_recursively(element) -> List: | |
""" Knowingly ignored tags: name (anonymized, without IDs), gap, vocal, pause, del, | |
linkGrp (handled separately in linkgroup_info()) """ | |
# Leaf node: word or punctuation character | |
if element.tag.endswith(("w", "pc")): | |
id_curr = element.attrib[f"{XML_NAMESPACE}id"] | |
return [(id_curr, element.text)] | |
# Annotated word or word group - not interested in the annotations in this function | |
elif element.tag.endswith("seg"): | |
parsed_data = [] | |
for child in element: | |
if child.tag.endswith(("c", "vocal", "pause")): # empty space betw. words or "special" word | |
continue | |
res = _resolve_recursively(child) | |
if isinstance(res, list): | |
parsed_data.extend(res) | |
else: | |
parsed_data.append(res) | |
return parsed_data | |
id_words, words = [], [] | |
for child_el in sent_el: | |
curr_annotations = _resolve_recursively(child_el) | |
if curr_annotations is not None: # None = unrecognized ("unimportant") element | |
for ann in curr_annotations: | |
id_words.append(ann[0]) | |
words.append(ann[1]) | |
return id_words, words | |
def seg_info(sent_el): | |
def _resolve_recursively(element) -> Tuple: | |
""" Returns (type[, subtype], deeper_elements, latest_element)""" | |
# Leaf node: word or punctuation character | |
if element.tag.endswith(("w", "pc")): | |
id_curr = element.attrib[f"{XML_NAMESPACE}id"] | |
return EL_LEAF, [], [id_curr] | |
# Annotated word or word group | |
elif element.tag.endswith("seg"): | |
subtype = element.attrib["subtype"] | |
if element.attrib["type"] == "frame": | |
ann_type = EL_FRAME | |
elif element.attrib["type"] == "metaphor": | |
ann_type = EL_TYPE | |
elif element.attrib["type"] == "idiom": | |
ann_type = EL_TYPE | |
else: | |
raise ValueError(f"Unrecognized seg type: {element.attrib['type']}") | |
deeper_elements = [] | |
latest_element = [] | |
for child in element: | |
if child.tag.endswith(("c", "vocal", "pause")): # empty space betw. words or "special" word | |
continue | |
res = _resolve_recursively(child) | |
if res[0] == EL_LEAF: | |
latest_element.extend(res[2]) | |
else: | |
deeper_elements.extend(res[2]) | |
deeper_elements.append((res[0], res[1], res[3])) | |
latest_element.extend(res[3]) | |
return ann_type, subtype, deeper_elements, latest_element | |
annotations = [] | |
for child_el in sent_el: | |
if not child_el.tag.endswith("seg"): | |
continue | |
ann_type, subtype, deeper_elements, latest_element = _resolve_recursively(child_el) | |
annotations.extend(deeper_elements) | |
annotations.append((ann_type, subtype, latest_element)) | |
return annotations | |
def linkgroup_info(sent_el): | |
annotations = [] | |
for child_el in sent_el: | |
if not child_el.tag.endswith("linkGrp"): | |
continue | |
for curr_link in child_el: | |
ann_type = EL_TYPE | |
if child_el.attrib["type"] not in {"metonymy", "frame", "metaphor", "idiom"}: | |
logging.warning(f"Uncovered linkGrp element type, skipping: {child_el.attrib['type']}") | |
continue | |
if child_el.attrib["type"] == "metonymy": | |
subtype = curr_link.attrib["ana"] | |
elif child_el.attrib["type"] in {"frame", "metaphor"}: | |
ann_type = EL_TYPE if child_el.attrib["type"] == "metaphor" else EL_FRAME | |
subtype = curr_link.attrib["ana"].split(":")[-1] | |
else: | |
subtype = "idiom" | |
tokens_involved = list(map(lambda _tok_id: _tok_id[1:] if _tok_id.startswith("#") else _tok_id, | |
curr_link.attrib["target"].split(" "))) | |
annotations.append((ann_type, subtype, tokens_involved)) | |
return annotations | |
class GKomet(datasets.GeneratorBasedBuilder): | |
"""G-KOMET 1.0 is a corpus of metaphorical expressions in spoken Slovene language. """ | |
VERSION = datasets.Version("1.0.0") | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"document_name": datasets.Value("string"), | |
"idx": datasets.Value("uint32"), # index inside current document | |
"idx_paragraph": datasets.Value("uint32"), | |
"idx_sentence": datasets.Value("uint32"), # index inside current paragraph | |
"sentence_words": datasets.Sequence(datasets.Value("string")), | |
"met_type": [{ | |
"type": datasets.Value("string"), | |
"word_indices": datasets.Sequence(datasets.Value("uint32")) | |
}], | |
"met_frame": [{ | |
"type": datasets.Value("string"), | |
"word_indices": datasets.Sequence(datasets.Value("uint32")) | |
}] | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
data_dir = dl_manager.download_and_extract(_URLS["gkomet"]) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={"data_dir": os.path.join(data_dir, "G-Komet")}, | |
) | |
] | |
def _generate_examples(self, data_dir): | |
data_files = [] | |
for fname in os.listdir(data_dir): | |
curr_path = os.path.join(data_dir, fname) | |
if os.path.isfile(curr_path) and fname.endswith(".xml") and fname != "G-Komet.xml": # G-Komet.xml = meta-file | |
data_files.append(fname) | |
data_files = sorted(data_files) | |
idx_example = 0 | |
for fname in data_files: | |
fpath = os.path.join(data_dir, fname) | |
curr_doc = ET.parse(fpath) | |
root = curr_doc.getroot() | |
NAMESPACE = namespace(root) | |
idx_sent_glob = 0 | |
for idx_par, curr_par in enumerate(root.iterfind(f".//{NAMESPACE}p")): | |
id2position = {} # {<idx_sent> -> {<id_word>: <position> foreach word} foreach sent} | |
all_words = [] | |
# Pass#1: extract word information | |
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")): | |
id_words, words = word_info(curr_sent) | |
id2position[idx_sent] = dict(zip(id_words, range(len(words)))) | |
all_words.append(words) | |
all_types, all_frames = [], [] | |
# Pass#2: extract annotations from <seg>ments | |
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")): | |
annotated_segs = seg_info(curr_sent) | |
all_types.append([]) | |
all_frames.append([]) | |
for curr_ann in annotated_segs: | |
ann_type, ann_subtype, words_involved = curr_ann | |
if ann_type == EL_TYPE: | |
all_types[idx_sent].append({ | |
"type": ann_subtype, | |
"word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved | |
if _id_word in id2position[idx_sent]] | |
}) | |
elif ann_type == EL_FRAME: | |
all_frames[idx_sent].append({ | |
"type": ann_subtype, | |
"word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved | |
if _id_word in id2position[idx_sent]] | |
}) | |
# Pass#3: extract annotations from <linkGrp>s | |
for idx_sent, curr_sent in enumerate(curr_par.iterfind(f"{NAMESPACE}s")): | |
annotated_linkgroups = linkgroup_info(curr_sent) | |
for curr_ann in annotated_linkgroups: | |
ann_type, ann_subtype, words_involved = curr_ann | |
if ann_type == EL_TYPE: | |
all_types[idx_sent].append({ | |
"type": ann_subtype, | |
"word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved | |
if _id_word in id2position[idx_sent]] | |
}) | |
elif ann_type == EL_FRAME: | |
all_frames[idx_sent].append({ | |
"type": ann_subtype, | |
"word_indices": [id2position[idx_sent][_id_word] for _id_word in words_involved | |
if _id_word in id2position[idx_sent]] | |
}) | |
idx_sent = 0 | |
for curr_words, curr_types, curr_frames in zip(all_words, all_types, all_frames): | |
if len(curr_words) == 0: | |
continue | |
yield idx_example, { | |
"document_name": fname, | |
"idx": idx_sent_glob, | |
"idx_paragraph": idx_par, | |
"idx_sentence": idx_sent, | |
"sentence_words": curr_words, | |
"met_type": curr_types, | |
"met_frame": curr_frames | |
} | |
idx_example += 1 | |
idx_sent += 1 | |
idx_sent_glob += 1 | |