Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
disrpt / disrpt.py
sileod's picture
Update disrpt.py
fde9896
raw
history blame
6.78 kB
import datasets
import csv
import requests
import pandas as pd
import inspect
import copy
import ftfy
from .process_underscores import run
key_to_entry = requests.get('https://www.dropbox.com/scl/fi/85pnc7n6e4puoureavtzo/filtered_disrpt.json?rlkey=6cbgbe9vn2549eths7ah8gm7u&dl=1').json()
citation="\n".join(key_to_entry.values())
datasets_and_citations = {
"deu.rst.pcc": "stede-neumann-2014-potsdam",
"eng.dep.covdtb": "nishida-matsumoto-2022-domain",
"eng.dep.scidtb": "yang-li-2018-scidtb",
"eng.rst.gum": "Zeldes2017",
"eng.rst.rstdt": "carlson-etal-2001-building",
"eng.sdrt.stac": "asher-etal-2016-discourse",
"eus.rst.ert": "IruskietaAranzabeIlarrazaEtAl2013",
"fas.rst.prstc": "shahmohammadi2021persian",
"fra.sdrt.annodis": "afantenos-etal-2012-empirical",
"nld.rst.nldt": "redeker-etal-2012-multi",
"por.rst.cstn": "CardosoMazieroRosarioCastroJorgeEtAl2011",
"rus.rst.rrt": "toldova-etal-2017-rhetorical",
"spa.rst.rststb": "da-cunha-etal-2011-development",
"spa.rst.sctb": "cao-etal-2018-rst",
"zho.dep.scidtb": "yi-etal-2021-unifying,cheng-li-2019-zero",
"zho.rst.gcdt": "peng_gcdt_2022,peng_chinese_2022",
"zho.rst.sctb": "cao-etal-2018-rst",
"eng.pdtb.pdtb": "prasad-etal-2014-reflections",
"eng.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"ita.pdtb.luna": "tonelli-etal-2010-annotation,RiccardiStepanovChowdhury2016",
"por.pdtb.crpc": "CRPC-DB-Portuguese,genereux-etal-2012-introducing",
"por.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"tha.pdtb.tdtb": "",
"tur.pdtb.tdb": "zeyrek-webber-2008-discourse,zeyrek-kurfali-2017-tdb",
"tur.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
"zho.pdtb.cdtb": "Zhou2014"
}
class Config(datasets.BuilderConfig):
citation=citation
files = [
"eng.dep.covdtb",
"eng.dep.scidtb",
"eng.pdtb.pdtb",
"eng.pdtb.tedm",
"eng.rst.gum",
"eng.rst.rstdt",
"eng.sdrt.stac",
"deu.rst.pcc",
"eus.rst.ert",
"fas.rst.prstc",
"fra.sdrt.annodis",
"ita.pdtb.luna",
"nld.rst.nldt",
"por.pdtb.crpc",
"por.pdtb.tedm",
"por.rst.cstn",
"rus.rst.rrt",
"spa.rst.rststb",
"spa.rst.sctb",
"tha.pdtb.tdtb",
"tur.pdtb.tdb",
"tur.pdtb.tedm",
"zho.dep.scidtb",
"zho.pdtb.cdtb",
"zho.rst.gcdt",
"zho.rst.sctb",
]
def parse_conll_stream(file_stream):
names = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc']
sentence = {name: [] for name in names}
for line in file_stream:
line = line.strip()
if line.startswith("#"):
continue
if not line:
if sentence['id']:
yield sentence
sentence = {name: [] for name in names}
continue
token_data = line.split('\t')
for name, value in zip(names, token_data):
sentence[name].append(value)
def get_kwarg_names(func):
return [k for k, v in inspect.signature(func).parameters.items() if v.default != v.empty]
_URLs = {f'{task}-{split}.{type}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.{type}" \
for task in files for split in 'train dev test'.split() for type in ['rels','conllu']}
#_URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}
conllu_features = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc', 'seg']
feature_type = {"seg":datasets.features.Sequence(
datasets.features.ClassLabel(names=["O","B-Segment"])),
'id':datasets.Value("string")}
conllu_features = datasets.Features({x:feature_type.get(x,datasets.Sequence(datasets.Value("string")))
for x in conllu_features})
def map_seg(x):
return [("B-Segment" if "beginseg=yes" in a.lower() else "O") for a in x]
def remove_type(x):
return x.replace(".rels","").replace(".conllu","")
class Dataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Config(
name=f"{n}.{type}",
data_dir=f"{n}.{type}",
) for n in files for type in ["rels","conllu"]
]
def __init__(self,*args,**kwargs):
self.BUILDER_CONFIG_CLASS.__post_init__=lambda x:x
base_kwargs_names=get_kwarg_names(super().__init__)
gen_kwargs={}
self.files={}
for k,v in copy.deepcopy(kwargs).items():
if k not in base_kwargs_names:
gen_kwargs[k]=v
del kwargs[k]
self.gen_kwargs=gen_kwargs
return super().__init__(*args,**kwargs)
def _split_generators(self, dl_manager: datasets.DownloadManager):
cfg_name = self.config.name.rsplit('.', 1)[0]
data_dir = remove_type(self.config.data_dir)
print("datadir:",data_dir)
type = self.config.name.split('.')[-1]
urls={k:v for (k,v) in _URLs.items() if cfg_name in k and requests.get(v).status_code!=404}
data_file = dl_manager.download(urls)
self.files = {**self.files, **data_file}
train_key = data_dir+'-train'
print("datafile:",data_file, self.config.data_dir)
if train_key in data_file:
train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})]
else:
train=[]
return train+[
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_file[data_dir+'-dev.'+type]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_file[data_dir+'-test.'+type]}),
]
def _info(self): return datasets.DatasetInfo(
citation=key_to_entry.get(datasets_and_citations.get(remove_type(self.config.name)),None),
features=(None if ".rels" in self.config.name else conllu_features)
)
def _generate_examples(self, filepath):
print(filepath)
run_args={
'rel_files': [v for k, v in self.files.items() if 'rels' in k],
'dep_files': [v for k, v in self.files.items() if 'conllu' in k]
}
print('run_args',run_args)
run('gum', **run_args)
with open(filepath, encoding="utf-8") as f:
if "conllu" in self.config.name:
stream=parse_conll_stream(f)
for i, row in enumerate(stream):
row['seg']=map_seg(row['misc'])
yield i,row
reader = csv.DictReader(f,delimiter='\t',quoting=csv.QUOTE_NONE)
for id_, row in enumerate(reader):
if id_ == 0:
continue
yield id_, row