|
import glob |
|
from dataclasses import dataclass |
|
from typing import Dict, List |
|
from pathlib import Path |
|
|
|
import datasets |
|
|
|
|
|
def remove_prefix(a: str, prefix: str) -> str: |
|
if a.startswith(prefix): |
|
a = a[len(prefix) :] |
|
return a |
|
|
|
|
|
def parse_brat_file( |
|
txt_file: Path, |
|
annotation_file_suffixes: List[str] = None, |
|
parse_notes: bool = False, |
|
) -> Dict: |
|
""" |
|
Parse a brat file into the schema defined below. |
|
`txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt' |
|
Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files, |
|
e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'. |
|
Will include annotator notes, when `parse_notes == True`. |
|
brat_features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"document_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"text_bound_annotations": [ # T line in brat, e.g. type or event trigger |
|
{ |
|
"offsets": datasets.Sequence([datasets.Value("int32")]), |
|
"text": datasets.Sequence(datasets.Value("string")), |
|
"type": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
} |
|
], |
|
"events": [ # E line in brat |
|
{ |
|
"trigger": datasets.Value( |
|
"string" |
|
), # refers to the text_bound_annotation of the trigger, |
|
"id": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"arguments": datasets.Sequence( |
|
{ |
|
"role": datasets.Value("string"), |
|
"ref_id": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
], |
|
"relations": [ # R line in brat |
|
{ |
|
"id": datasets.Value("string"), |
|
"head": { |
|
"ref_id": datasets.Value("string"), |
|
"role": datasets.Value("string"), |
|
}, |
|
"tail": { |
|
"ref_id": datasets.Value("string"), |
|
"role": datasets.Value("string"), |
|
}, |
|
"type": datasets.Value("string"), |
|
} |
|
], |
|
"equivalences": [ # Equiv line in brat |
|
{ |
|
"id": datasets.Value("string"), |
|
"ref_ids": datasets.Sequence(datasets.Value("string")), |
|
} |
|
], |
|
"attributes": [ # M or A lines in brat |
|
{ |
|
"id": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"ref_id": datasets.Value("string"), |
|
"value": datasets.Value("string"), |
|
} |
|
], |
|
"normalizations": [ # N lines in brat |
|
{ |
|
"id": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"ref_id": datasets.Value("string"), |
|
"resource_name": datasets.Value( |
|
"string" |
|
), # Name of the resource, e.g. "Wikipedia" |
|
"cuid": datasets.Value( |
|
"string" |
|
), # ID in the resource, e.g. 534366 |
|
"text": datasets.Value( |
|
"string" |
|
), # Human readable description/name of the entity, e.g. "Barack Obama" |
|
} |
|
], |
|
### OPTIONAL: Only included when `parse_notes == True` |
|
"notes": [ # # lines in brat |
|
{ |
|
"id": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"ref_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
} |
|
], |
|
}, |
|
) |
|
""" |
|
|
|
example = {} |
|
example["document_id"] = txt_file.with_suffix("").name |
|
with txt_file.open() as f: |
|
example["text"] = f.read() |
|
|
|
|
|
|
|
if annotation_file_suffixes is None: |
|
annotation_file_suffixes = [".a1", ".a2", ".ann"] |
|
|
|
if len(annotation_file_suffixes) == 0: |
|
raise AssertionError( |
|
"At least one suffix for the to-be-read annotation files should be given!" |
|
) |
|
|
|
ann_lines = [] |
|
for suffix in annotation_file_suffixes: |
|
annotation_file = txt_file.with_suffix(suffix) |
|
if annotation_file.exists(): |
|
with annotation_file.open() as f: |
|
ann_lines.extend(f.readlines()) |
|
|
|
example["text_bound_annotations"] = [] |
|
example["events"] = [] |
|
example["relations"] = [] |
|
example["equivalences"] = [] |
|
example["attributes"] = [] |
|
example["normalizations"] = [] |
|
|
|
if parse_notes: |
|
example["notes"] = [] |
|
|
|
for line in ann_lines: |
|
line = line.strip() |
|
if not line: |
|
continue |
|
|
|
if line.startswith("T"): |
|
ann = {} |
|
fields = line.split("\t") |
|
|
|
ann["id"] = fields[0] |
|
ann["type"] = fields[1].split()[0] |
|
ann["offsets"] = [] |
|
span_str = remove_prefix(fields[1], (ann["type"] + " ")) |
|
text = fields[2] |
|
for span in span_str.split(";"): |
|
start, end = span.split() |
|
ann["offsets"].append([int(start), int(end)]) |
|
|
|
|
|
ann["text"] = [] |
|
if len(ann["offsets"]) > 1: |
|
i = 0 |
|
for start, end in ann["offsets"]: |
|
chunk_len = end - start |
|
ann["text"].append(text[i : chunk_len + i]) |
|
i += chunk_len |
|
while i < len(text) and text[i] == " ": |
|
i += 1 |
|
else: |
|
ann["text"] = [text] |
|
|
|
example["text_bound_annotations"].append(ann) |
|
|
|
elif line.startswith("E"): |
|
ann = {} |
|
fields = line.split("\t") |
|
|
|
ann["id"] = fields[0] |
|
|
|
ann["type"], ann["trigger"] = fields[1].split()[0].split(":") |
|
|
|
ann["arguments"] = [] |
|
for role_ref_id in fields[1].split()[1:]: |
|
argument = { |
|
"role": (role_ref_id.split(":"))[0], |
|
"ref_id": (role_ref_id.split(":"))[1], |
|
} |
|
ann["arguments"].append(argument) |
|
|
|
example["events"].append(ann) |
|
|
|
elif line.startswith("R"): |
|
ann = {} |
|
fields = line.split("\t") |
|
|
|
ann["id"] = fields[0] |
|
ann["type"] = fields[1].split()[0] |
|
|
|
ann["head"] = { |
|
"role": fields[1].split()[1].split(":")[0], |
|
"ref_id": fields[1].split()[1].split(":")[1], |
|
} |
|
ann["tail"] = { |
|
"role": fields[1].split()[2].split(":")[0], |
|
"ref_id": fields[1].split()[2].split(":")[1], |
|
} |
|
|
|
example["relations"].append(ann) |
|
|
|
|
|
|
|
|
|
|
|
elif line.startswith("*"): |
|
ann = {} |
|
fields = line.split("\t") |
|
|
|
ann["id"] = fields[0] |
|
ann["ref_ids"] = fields[1].split()[1:] |
|
|
|
example["equivalences"].append(ann) |
|
|
|
elif line.startswith("A") or line.startswith("M"): |
|
ann = {} |
|
fields = line.split("\t") |
|
|
|
ann["id"] = fields[0] |
|
|
|
info = fields[1].split() |
|
ann["type"] = info[0] |
|
ann["ref_id"] = info[1] |
|
|
|
if len(info) > 2: |
|
ann["value"] = info[2] |
|
else: |
|
ann["value"] = "" |
|
|
|
example["attributes"].append(ann) |
|
|
|
elif line.startswith("N"): |
|
ann = {} |
|
fields = line.split("\t") |
|
|
|
ann["id"] = fields[0] |
|
ann["text"] = fields[2] |
|
|
|
info = fields[1].split() |
|
|
|
ann["type"] = info[0] |
|
ann["ref_id"] = info[1] |
|
ann["resource_name"] = info[2].split(":")[0] |
|
ann["cuid"] = info[2].split(":")[1] |
|
example["normalizations"].append(ann) |
|
|
|
elif parse_notes and line.startswith("#"): |
|
ann = {} |
|
fields = line.split("\t") |
|
|
|
ann["id"] = fields[0] |
|
ann["text"] = fields[2] if len(fields) == 3 else None |
|
|
|
info = fields[1].split() |
|
|
|
ann["type"] = info[0] |
|
ann["ref_id"] = info[1] |
|
example["notes"].append(ann) |
|
|
|
return example |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{lauscher2018b, |
|
title = {An argument-annotated corpus of scientific publications}, |
|
booktitle = {Proceedings of the 5th Workshop on Mining Argumentation}, |
|
publisher = {Association for Computational Linguistics}, |
|
author = {Lauscher, Anne and Glava\v{s}, Goran and Ponzetto, Simone Paolo}, |
|
address = {Brussels, Belgium}, |
|
year = {2018}, |
|
pages = {40–46} |
|
} |
|
""" |
|
_DESCRIPTION = """\ |
|
The SciArg dataset is an extension of the Dr. Inventor corpus (Fisas et al., 2015, 2016) with an annotation layer containing |
|
fine-grained argumentative components and relations. It is the first argument-annotated corpus of scientific |
|
publications (in English), which allows for joint analyses of argumentation and other rhetorical dimensions of |
|
scientific writing. |
|
""" |
|
_URL = "http://data.dws.informatik.uni-mannheim.de/sci-arg/compiled_corpus.zip" |
|
_HOMEPAGE = "https://github.com/anlausch/ArguminSci" |
|
|
|
|
|
@dataclass |
|
class SciArgConfig(datasets.BuilderConfig): |
|
data_url = _URL |
|
subdirectory_mapping = {"compiled_corpus": datasets.Split.TRAIN} |
|
filename_blacklist = [] |
|
|
|
|
|
class SciArg(datasets.GeneratorBasedBuilder): |
|
"""Scientific Argument corpus""" |
|
|
|
DEFAULT_CONFIG_CLASS = SciArgConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
SciArgConfig( |
|
name="full", |
|
version="1.0.0", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "full" |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
features = datasets.Features( |
|
{ |
|
"document_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"text_bound_annotations": [ |
|
{ |
|
"offsets": datasets.Sequence([datasets.Value("int32")]), |
|
"text": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
} |
|
], |
|
"relations": [ |
|
{ |
|
"id": datasets.Value("string"), |
|
"head": { |
|
"ref_id": datasets.Value("string"), |
|
"role": datasets.Value("string"), |
|
}, |
|
"tail": { |
|
"ref_id": datasets.Value("string"), |
|
"role": datasets.Value("string"), |
|
}, |
|
"type": datasets.Value("string"), |
|
} |
|
], |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]: |
|
"""Returns SplitGenerators.""" |
|
data_dir = self.config.data_dir or Path(dl_manager.download_and_extract(self.config.data_url)) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=split, gen_kwargs={"filepath": data_dir / subdir}) |
|
for subdir, split in self.config.subdirectory_mapping.items() |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
for txt_file in glob.glob(filepath / "*.txt"): |
|
|
|
brat_parsed = parse_brat_file(Path(txt_file)) |
|
if brat_parsed["document_id"] in self.config.filename_blacklist: |
|
continue |
|
relevant_subset = {f_name: brat_parsed[f_name] for f_name in self.info.features} |
|
yield brat_parsed["document_id"], relevant_subset |
|
|