Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
File size: 2,506 Bytes
3c495f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48aa912
65b65de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c495f2
e1fb6bf
3c495f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b745e9
3c495f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import datasets
import csv
import requests


citation='''
'''

class Config(datasets.BuilderConfig):
    citation=citation

files = [
    "eng.dep.covdtb",
    "eng.dep.scidtb",
    "eng.pdtb.pdtb",
    "eng.pdtb.tedm",
    "eng.rst.gum",
    "eng.rst.rstdt",
    "eng.sdrt.stac",
    "deu.rst.pcc",
    "eus.rst.ert",
    "fas.rst.prstc",
    "fra.sdrt.annodis",
    "ita.pdtb.luna",
    "nld.rst.nldt",
    "por.pdtb.crpc",
    "por.pdtb.tedm",
    "por.rst.cstn",
    "rus.rst.rrt",
    "spa.rst.rststb",
    "spa.rst.sctb",
    "tha.pdtb.tdtb",
    "tur.pdtb.tdb",
    "tur.pdtb.tedm",
    "zho.dep.scidtb",
    "zho.pdtb.cdtb",
    "zho.rst.gcdt",
    "zho.rst.sctb",
]

files_2021 = [
    "deu.rst.pcc",
    "eng.pdtb.pdtb",
    "eng.rst.gum",
    "eng.rst.rstdt",
    "eng.sdrt.stac",
    "eus.rst.ert",
    "fas.rst.prstc",
    "fra.sdrt.annodis",
    "fra.sdrt.annodis bug fixes",
    "nld.rst.nldt",
    "por.rst.cstn",
    "rus.rst.rrt",
    "spa.rst.rststb",
    "spa.rst.sctb",
    "tur.pdtb.tdb",
    "zho.pdtb.cdtb",
    "zho.rst.sctb",
]


_URLs = {f'{task}-{split}':f"https://raw.githubusercontent.com/disrpt/sharedtask2023/main/data/{task}/{task}_{split}.rels" \
         for task in files for split in 'train dev test'.split()}
_URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404}

class Dataset(datasets.GeneratorBasedBuilder):
    
    BUILDER_CONFIGS = [
            Config(
                name=n,
                data_dir=n
            ) for n in files
    ]

    def _split_generators(self, dl_manager: datasets.DownloadManager):
        data_file = dl_manager.download(_URLs)
        train_key = self.config.data_dir+'-train'
        if train_key in data_file:
            train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})]
        else:
            train=[]
        return train+[
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_file[self.config.data_dir+'-dev']}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_file[self.config.data_dir+'-test']}),

        ]


    def _info(self): return datasets.DatasetInfo()
        
    def _generate_examples(self, filepath):
        with open(filepath, encoding="utf-8") as f:
            reader = csv.DictReader(f,delimiter='\t')
            for id_, row in enumerate(reader):
                if id_ == 0:
                    continue
                yield id_, row