Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
sileod commited on
Commit
e0b06e3
1 Parent(s): bb10b06

Update disrpt.py

Browse files
Files changed (1) hide show
  1. disrpt.py +91 -36
disrpt.py CHANGED
@@ -1,10 +1,44 @@
1
  import datasets
2
  import csv
3
  import requests
 
 
 
 
4
 
5
 
6
- citation='''
7
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  class Config(datasets.BuilderConfig):
10
  citation=citation
@@ -38,59 +72,80 @@ files = [
38
  "zho.rst.sctb",
39
  ]
40
 
41
- files_2021 = [
42
- "deu.rst.pcc",
43
- "eng.pdtb.pdtb",
44
- "eng.rst.gum",
45
- "eng.rst.rstdt",
46
- "eng.sdrt.stac",
47
- "eus.rst.ert",
48
- "fas.rst.prstc",
49
- "fra.sdrt.annodis",
50
- "fra.sdrt.annodis bug fixes",
51
- "nld.rst.nldt",
52
- "por.rst.cstn",
53
- "rus.rst.rrt",
54
- "spa.rst.rststb",
55
- "spa.rst.sctb",
56
- "tur.pdtb.tdb",
57
- "zho.pdtb.cdtb",
58
- "zho.rst.sctb",
59
- ]
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- _URLs = {f'{task}-{split}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.rels" \
63
- for task in files for split in 'train dev test'.split()}
64
- _URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}
 
 
 
65
 
66
  class Dataset(datasets.GeneratorBasedBuilder):
67
 
68
  BUILDER_CONFIGS = [
69
  Config(
70
- name=n,
71
- data_dir=n
72
- ) for n in files
73
  ]
74
-
 
 
 
 
 
 
 
 
 
 
75
  def _split_generators(self, dl_manager: datasets.DownloadManager):
76
- data_file = dl_manager.download(_URLs)
77
- train_key = self.config.data_dir+'-train'
 
 
 
 
 
 
78
  if train_key in data_file:
79
  train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})]
80
  else:
81
  train=[]
82
  return train+[
83
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_file[self.config.data_dir+'-dev']}),
84
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_file[self.config.data_dir+'-test']}),
85
-
86
  ]
87
 
88
-
89
- def _info(self): return datasets.DatasetInfo()
 
90
 
91
  def _generate_examples(self, filepath):
 
92
  with open(filepath, encoding="utf-8") as f:
93
- reader = csv.DictReader(f,delimiter='\t')
 
 
 
 
94
  for id_, row in enumerate(reader):
95
  if id_ == 0:
96
  continue
 
1
  import datasets
2
  import csv
3
  import requests
4
+ import pandas as pd
5
+ import inspect
6
+ import copy
7
+ import ftfy
8
 
9
 
10
+ key_to_entry = requests.get('https://www.dropbox.com/scl/fi/85pnc7n6e4puoureavtzo/filtered_disrpt.json?rlkey=6cbgbe9vn2549eths7ah8gm7u&dl=1').json()
11
+ citation="\n".join(key_to_entry.values())
12
+
13
+ datasets_and_citations = {
14
+ "deu.rst.pcc": "stede-neumann-2014-potsdam",
15
+ "eng.dep.covdtb": "nishida-matsumoto-2022-domain",
16
+ "eng.dep.scidtb": "yang-li-2018-scidtb",
17
+ "eng.rst.gum": "Zeldes2017",
18
+ "eng.rst.rstdt": "carlson-etal-2001-building",
19
+ "eng.sdrt.stac": "asher-etal-2016-discourse",
20
+ "eus.rst.ert": "IruskietaAranzabeIlarrazaEtAl2013",
21
+ "fas.rst.prstc": "shahmohammadi2021persian",
22
+ "fra.sdrt.annodis": "afantenos-etal-2012-empirical",
23
+ "nld.rst.nldt": "redeker-etal-2012-multi",
24
+ "por.rst.cstn": "CardosoMazieroRosarioCastroJorgeEtAl2011",
25
+ "rus.rst.rrt": "toldova-etal-2017-rhetorical",
26
+ "spa.rst.rststb": "da-cunha-etal-2011-development",
27
+ "spa.rst.sctb": "cao-etal-2018-rst",
28
+ "zho.dep.scidtb": "yi-etal-2021-unifying,cheng-li-2019-zero",
29
+ "zho.rst.gcdt": "peng_gcdt_2022,peng_chinese_2022",
30
+ "zho.rst.sctb": "cao-etal-2018-rst",
31
+ "eng.pdtb.pdtb": "prasad-etal-2014-reflections",
32
+ "eng.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
33
+ "ita.pdtb.luna": "tonelli-etal-2010-annotation,RiccardiStepanovChowdhury2016",
34
+ "por.pdtb.crpc": "CRPC-DB-Portuguese,genereux-etal-2012-introducing",
35
+ "por.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
36
+ "tha.pdtb.tdtb": "",
37
+ "tur.pdtb.tdb": "zeyrek-webber-2008-discourse,zeyrek-kurfali-2017-tdb",
38
+ "tur.pdtb.tedm": "zeyrek-etal-2018-multilingual,zeyrek2019ted",
39
+ "zho.pdtb.cdtb": "Zhou2014"
40
+ }
41
+
42
 
43
  class Config(datasets.BuilderConfig):
44
  citation=citation
 
72
  "zho.rst.sctb",
73
  ]
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
+ def parse_conll_stream(file_stream):
77
+ names = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc']
78
+ sentence = {name: [] for name in names}
79
+
80
+ for line in file_stream:
81
+ line = line.strip()
82
+ if line.startswith("#"):
83
+ continue
84
+ if not line:
85
+ if sentence['id']:
86
+ yield sentence
87
+ sentence = {name: [] for name in names}
88
+ continue
89
+ token_data = line.split('\t')
90
+ for name, value in zip(names, token_data):
91
+ sentence[name].append(value)
92
 
93
+ def get_kwarg_names(func):
94
+ return [k for k, v in inspect.signature(func).parameters.items() if v.default != v.empty]
95
+
96
+ _URLs = {f'{task}-{split}.{type}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.{type}" \
97
+ for task in files for split in 'train dev test'.split() for type in ['rels','conllu']}
98
+ #_URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}
99
 
100
  class Dataset(datasets.GeneratorBasedBuilder):
101
 
102
  BUILDER_CONFIGS = [
103
  Config(
104
+ name=f"{n}.{type}",
105
+ data_dir=f"{n}.{type}"
106
+ ) for n in files for type in ["rels","conllu"]
107
  ]
108
+ def __init__(self,*args,**kwargs):
109
+ self.BUILDER_CONFIG_CLASS.__post_init__=lambda x:x
110
+ base_kwargs_names=get_kwarg_names(super().__init__)
111
+ gen_kwargs={}
112
+ for k,v in copy.deepcopy(kwargs).items():
113
+ if k not in base_kwargs_names:
114
+ gen_kwargs[k]=v
115
+ del kwargs[k]
116
+ self.gen_kwargs=gen_kwargs
117
+ return super().__init__(*args,**kwargs)
118
+
119
  def _split_generators(self, dl_manager: datasets.DownloadManager):
120
+ cfg_name = self.config.name.rsplit('.', 1)[0]
121
+ data_dir = self.config.data_dir.replace(".rels","").replace(".conllu","")
122
+ print("datadir:",data_dir)
123
+ type = self.config.name.split('.')[-1]
124
+ urls={k:v for (k,v) in _URLs.items() if cfg_name in k and requests.get(v).status_code!=404}
125
+ data_file = dl_manager.download(urls)
126
+ train_key = data_dir+'-train'
127
+ print("datafile:",data_file, self.config.data_dir)
128
  if train_key in data_file:
129
  train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})]
130
  else:
131
  train=[]
132
  return train+[
133
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_file[data_dir+'-dev.'+type]}),
134
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_file[data_dir+'-test.'+type]}),
 
135
  ]
136
 
137
+ def _info(self): return datasets.DatasetInfo(
138
+ citation=key_to_entry.get(datasets_and_citations.get(self.config.name),None)
139
+ )
140
 
141
  def _generate_examples(self, filepath):
142
+ print("GEN",self.config.name)
143
  with open(filepath, encoding="utf-8") as f:
144
+ if "conllu" in self.config.name:
145
+ stream=parse_conll_stream(f)
146
+ for i, row in enumerate(stream):
147
+ yield i,row
148
+ reader = csv.DictReader(f,delimiter='\t',quoting=csv.QUOTE_NONE)
149
  for id_, row in enumerate(reader):
150
  if id_ == 0:
151
  continue