Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
disrpt / disrpt.py
sileod's picture
Update disrpt.py
c29d973
raw
history blame
7.77 kB
import datasets
import csv
import requests
import pandas as pd
import inspect
import copy
from .process_underscores import run
key_to_entry = requests.get('https://www.dropbox.com/scl/fi/85pnc7n6e4puoureavtzo/filtered_disrpt.json?rlkey=6cbgbe9vn2549eths7ah8gm7u&dl=1').json()
citation="\n".join(key_to_entry.values())
datasets_and_citations = {
"deu.rst.pcc": "stede-neumann-2014-potsdam",
"eng.dep.covdtb": "nishida-matsumoto-2022-domain",
"eng.dep.scidtb": "yang-li-2018-scidtb",
"eng.rst.gum": "Zeldes2017",
"eng.rst.rstdt": "carlson-etal-2001-building",
"eng.sdrt.stac": "asher-etal-2016-discourse",
"eus.rst.ert": "IruskietaAranzabeIlarrazaEtAl2013",
"fas.rst.prstc": "shahmohammadi2021persian",
"fra.sdrt.annodis": "afantenos-etal-2012-empirical",
"nld.rst.nldt": "redeker-etal-2012-multi",
"por.rst.cstn": "CardosoMazieroRosarioCastroJorgeEtAl2011",
"rus.rst.rrt": "toldova-etal-2017-rhetorical",
"spa.rst.rststb": "da-cunha-etal-2011-development",
"spa.rst.sctb": "cao-etal-2018-rst",
"zho.dep.scidtb": "yi-etal-2021-unifying,cheng-li-2019-zero",
"zho.rst.gcdt": "peng_gcdt_2022,peng_chinese_2022",
"zho.rst.sctb": "cao-etal-2018-rst",
"eng.pdtb.pdtb": "prasad-etal-2014-reflections",
"eng.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"ita.pdtb.luna": "tonelli-etal-2010-annotation,RiccardiStepanovChowdhury2016",
"por.pdtb.crpc": "CRPC-DB-Portuguese,genereux-etal-2012-introducing",
"por.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"tha.pdtb.tdtb": "",
"tur.pdtb.tdb": "zeyrek-webber-2008-discourse,zeyrek-kurfali-2017-tdb",
"tur.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"zho.pdtb.cdtb": "Zhou2014"
}
class Config(datasets.BuilderConfig):
citation=citation
files = [
"deu.rst.pcc",
"eng.dep.covdtb",
"eng.dep.scidtb",
"eng.pdtb.gum",
"eng.pdtb.pdtb",
"eng.pdtb.tedm",
"eng.rst.gentle",
"eng.rst.gum",
"eng.rst.rstdt",
"eng.sdrt.stac",
"eus.rst.ert",
"fas.rst.prstc",
"fra.sdrt.annodis",
"ita.pdtb.luna",
"nld.rst.nldt",
"por.pdtb.crpc",
"por.pdtb.tedm",
"por.rst.cstn",
"rus.rst.rrt",
"spa.rst.rststb",
"spa.rst.sctb",
"tha.pdtb.tdtb",
"tur.pdtb.tdb",
"tur.pdtb.tedm",
"zho.dep.scidtb",
"zho.pdtb.cdtb",
"zho.rst.gcdt",
"zho.rst.sctb"
]
def fix_mwe(sentence):
mwe={}
sentence['parent_mwe']=[]
for i, x in enumerate(sentence['id']):
if '-' in x:
for a in x.split('-'):
mwe[a]=sentence['form'][i]
sentence['parent_mwe']+=[mwe.get(x,'')]
for i, x in enumerate(sentence['id']):
if "-" in x:
for k,v in sentence.items():
del v[i]
return sentence
def parse_conll_stream(file_stream):
names = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc','doc_id']
sentence = {name: [] for name in names}
mwe_id=[]
for line in file_stream:
line = line.strip()
if line.startswith("#"):
if "doc_id" in line:
doc_id=line.split('=')[-1].strip()
continue
if not line:
if sentence['id']:
yield sentence
sentence = {name: [] for name in names}
continue
token_data = line.split('\t') + [doc_id]
for name, value in zip(names, token_data):
if name=='id' and not value.isnumeric():
mwe_id=value.split('-')
else:
sentence[name].append(value)
def get_kwarg_names(func):
return [k for k, v in inspect.signature(func).parameters.items() if v.default != v.empty]
_URLs = {f'{task}-{split}.{type}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.{type}" \
for task in files for split in 'train dev test'.split() for type in ['rels','conllu']}
#_URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}
conllu_features = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc', 'seg','doc_id']
feature_type = {"seg":datasets.features.Sequence(
datasets.features.ClassLabel(names=["O","B-Segment"])),
'id':datasets.Value("string"),'doc_id':datasets.Value("string")}
conllu_features = datasets.Features({x:feature_type.get(x,datasets.Sequence(datasets.Value("string")))
for x in conllu_features})
def map_seg(x):
return [("B-Segment" if "beginseg=yes" in a.lower() else "O") for a in x]
def remove_type(x):
return x.replace(".rels","").replace(".conllu","")
class Dataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Config(
name=f"{n}.{type}",
data_dir=f"{n}.{type}",
) for n in files for type in ["rels","conllu"]
]
def __init__(self,*args,**kwargs):
self.BUILDER_CONFIG_CLASS.__post_init__=lambda x:x
base_kwargs_names=get_kwarg_names(super().__init__)
gen_kwargs={}
self.files={}
self.preprocessed_underscores=dict()
for k,v in copy.deepcopy(kwargs).items():
if k not in base_kwargs_names:
gen_kwargs[k]=v
del kwargs[k]
self.gen_kwargs=gen_kwargs
return super().__init__(*args,**kwargs)
def _split_generators(self, dl_manager: datasets.DownloadManager):
cfg_name = self.config.name.rsplit('.', 1)[0]
data_dir = remove_type(self.config.data_dir)
type = self.config.name.split('.')[-1]
urls={k:v for (k,v) in _URLs.items() if cfg_name in k and requests.get(v).status_code!=404}
data_file = dl_manager.download(urls)
self.files = {**self.files, **data_file}
splits_dict = {datasets.Split.TRAIN: 'train', datasets.Split.VALIDATION: 'dev', datasets.Split.TEST: 'test'}
split_generators = [
datasets.SplitGenerator(name=split, gen_kwargs={"filepath": data_file[f"{data_dir}-{key}.{type}"]})
for split, key in splits_dict.items()
if f"{data_dir}-{key}.{type}" in data_file
]
return split_generators
def _info(self): return datasets.DatasetInfo(
citation=key_to_entry.get(datasets_and_citations.get(remove_type(self.config.name)),None),
features=(None if ".rels" in self.config.name else conllu_features)
)
def _generate_examples(self, filepath):
print(filepath)
corpus=self.config.name.split('.')[2]
run_args={
'corpus':corpus,
'rel_files': [v for k, v in self.files.items() if '.rels' in k],
'dep_files': [v for k, v in self.files.items() if '.conllu' in k],
**{k:v for k,v in self.gen_kwargs.items() if 'path' in k}
}
print('run_args',run_args)
if corpus in ['rstdt','pdtb','cdtb','gum','tdb'] and not self.preprocessed_underscores.get(corpus,False) and self.gen_kwargs.get('process_underscore',True):
run(**run_args)
self.preprocessed_underscores[corpus]=True
with open(filepath, encoding="utf-8") as f:
if "conllu" in self.config.name:
stream=parse_conll_stream(f)
for i, row in enumerate(stream):
row['seg']=map_seg(row['misc'])
row['doc_id']=row['doc_id'][0]
yield i,row
reader = csv.DictReader(f,delimiter='\t',quoting=csv.QUOTE_NONE)
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, row