Update disrpt.py
Browse files
disrpt.py
CHANGED
@@ -4,6 +4,7 @@ import requests
|
|
4 |
import pandas as pd
|
5 |
import inspect
|
6 |
import copy
|
|
|
7 |
|
8 |
|
9 |
key_to_entry = requests.get('https://www.dropbox.com/scl/fi/85pnc7n6e4puoureavtzo/filtered_disrpt.json?rlkey=6cbgbe9vn2549eths7ah8gm7u&dl=1').json()
|
@@ -94,13 +95,28 @@ def get_kwarg_names(func):
|
|
94 |
|
95 |
_URLs = {f'{task}-{split}.{type}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.{type}" \
|
96 |
for task in files for split in 'train dev test'.split() for type in ['rels','conllu']}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
98 |
class Dataset(datasets.GeneratorBasedBuilder):
|
99 |
|
100 |
BUILDER_CONFIGS = [
|
101 |
Config(
|
102 |
name=f"{n}.{type}",
|
103 |
-
data_dir=f"{n}.{type}"
|
104 |
) for n in files for type in ["rels","conllu"]
|
105 |
]
|
106 |
def __init__(self,*args,**kwargs):
|
@@ -116,11 +132,13 @@ class Dataset(datasets.GeneratorBasedBuilder):
|
|
116 |
|
117 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
118 |
cfg_name = self.config.name.rsplit('.', 1)[0]
|
119 |
-
data_dir = self.config.data_dir
|
|
|
120 |
type = self.config.name.split('.')[-1]
|
121 |
urls={k:v for (k,v) in _URLs.items() if cfg_name in k and requests.get(v).status_code!=404}
|
122 |
data_file = dl_manager.download(urls)
|
123 |
train_key = data_dir+'-train'
|
|
|
124 |
if train_key in data_file:
|
125 |
train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})]
|
126 |
else:
|
@@ -131,7 +149,8 @@ class Dataset(datasets.GeneratorBasedBuilder):
|
|
131 |
]
|
132 |
|
133 |
def _info(self): return datasets.DatasetInfo(
|
134 |
-
citation=key_to_entry.get(datasets_and_citations.get(self.config.name),None)
|
|
|
135 |
)
|
136 |
|
137 |
def _generate_examples(self, filepath):
|
@@ -139,6 +158,7 @@ class Dataset(datasets.GeneratorBasedBuilder):
|
|
139 |
if "conllu" in self.config.name:
|
140 |
stream=parse_conll_stream(f)
|
141 |
for i, row in enumerate(stream):
|
|
|
142 |
yield i,row
|
143 |
reader = csv.DictReader(f,delimiter='\t',quoting=csv.QUOTE_NONE)
|
144 |
for id_, row in enumerate(reader):
|
|
|
4 |
import pandas as pd
|
5 |
import inspect
|
6 |
import copy
|
7 |
+
import ftfy
|
8 |
|
9 |
|
10 |
key_to_entry = requests.get('https://www.dropbox.com/scl/fi/85pnc7n6e4puoureavtzo/filtered_disrpt.json?rlkey=6cbgbe9vn2549eths7ah8gm7u&dl=1').json()
|
|
|
95 |
|
96 |
_URLs = {f'{task}-{split}.{type}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.{type}" \
|
97 |
for task in files for split in 'train dev test'.split() for type in ['rels','conllu']}
|
98 |
+
#_URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}
|
99 |
+
|
100 |
+
conllu_features = ['id', 'form', 'lemma', 'upos', 'xpos', 'feats', 'head', 'deprel', 'deps', 'misc', 'seg']
|
101 |
+
feature_type = {"seg":datasets.features.Sequence(
|
102 |
+
datasets.features.ClassLabel(names=["O","B-Segment"])),
|
103 |
+
'id':datasets.Value("string")}
|
104 |
+
|
105 |
+
conllu_features = datasets.Features({x:feature_type.get(x,datasets.Sequence(datasets.Value("string")))
|
106 |
+
for x in conllu_features})
|
107 |
+
|
108 |
+
def map_seg(x):
|
109 |
+
return [("B-Segment" if "beginseg=yes" in a.lower() else "O") for a in x]
|
110 |
+
|
111 |
+
def remove_type(x):
|
112 |
+
return x.replace(".rels","").replace(".conllu","")
|
113 |
|
114 |
class Dataset(datasets.GeneratorBasedBuilder):
|
115 |
|
116 |
BUILDER_CONFIGS = [
|
117 |
Config(
|
118 |
name=f"{n}.{type}",
|
119 |
+
data_dir=f"{n}.{type}",
|
120 |
) for n in files for type in ["rels","conllu"]
|
121 |
]
|
122 |
def __init__(self,*args,**kwargs):
|
|
|
132 |
|
133 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
134 |
cfg_name = self.config.name.rsplit('.', 1)[0]
|
135 |
+
data_dir = remove_type(self.config.data_dir)
|
136 |
+
print("datadir:",data_dir)
|
137 |
type = self.config.name.split('.')[-1]
|
138 |
urls={k:v for (k,v) in _URLs.items() if cfg_name in k and requests.get(v).status_code!=404}
|
139 |
data_file = dl_manager.download(urls)
|
140 |
train_key = data_dir+'-train'
|
141 |
+
print("datafile:",data_file, self.config.data_dir)
|
142 |
if train_key in data_file:
|
143 |
train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})]
|
144 |
else:
|
|
|
149 |
]
|
150 |
|
151 |
def _info(self): return datasets.DatasetInfo(
|
152 |
+
citation=key_to_entry.get(datasets_and_citations.get(remove_type(self.config.name)),None),
|
153 |
+
features=(None if ".rels" in self.config.name else conllu_features)
|
154 |
)
|
155 |
|
156 |
def _generate_examples(self, filepath):
|
|
|
158 |
if "conllu" in self.config.name:
|
159 |
stream=parse_conll_stream(f)
|
160 |
for i, row in enumerate(stream):
|
161 |
+
row['seg']=map_seg(row['misc'])
|
162 |
yield i,row
|
163 |
reader = csv.DictReader(f,delimiter='\t',quoting=csv.QUOTE_NONE)
|
164 |
for id_, row in enumerate(reader):
|