File size: 2,123 Bytes
623e6df
 
012094a
623e6df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e5dbd1
165453a
3e5dbd1
63f86ef
3e5dbd1
 
623e6df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import datasets
import json
import os

citation='''
@article{han2022folio,
  title={FOLIO: Natural Language Reasoning with First-Order Logic},
  author = {Han, Simeng and Schoelkopf, Hailey and Zhao, Yilun and Qi, Zhenting and Riddell, Martin and Benson, Luke and Sun, Lucy and Zubova, Ekaterina and Qiao, Yujie and Burtell, Matthew and Peng, David and Fan, Jonathan and Liu, Yixin and Wong, Brian and Sailor, Malcolm and Ni, Ansong and Nan, Linyong and Kasai, Jungo and Yu, Tao and Zhang, Rui and Joty, Shafiq and Fabbri, Alexander R. and Kryscinski, Wojciech and Lin, Xi Victoria and Xiong, Caiming and Radev, Dragomir},
  journal={arXiv preprint arXiv:2209.00840},
  url = {https://arxiv.org/abs/2209.00840},
  year={2022}
}
'''

class FolioConfig(datasets.BuilderConfig):
    citation=citation

files = ["folio-train.jsonl", "folio-validation.jsonl"]

_URLs = {f:f"https://huggingface.co/datasets/metaeval/folio/resolve/main/{f}" for f in files}

class Folio(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
            FolioConfig(
                name='.'.join(n.split('.')[:-1]),
                data_dir=''
            ) for n in files
    ]
    

    def _split_generators(self, dl_manager: datasets.DownloadManager):
        data_file = dl_manager.download(_URLs)
        path=data_file[self.config.data_dir]
        ptrain=os.path.join(path,"folio-train.jsonl")
        pval=os.path.join(path,"folio-validation.jsonl")

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": ptrain}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": pval}),
        ]

    def _info(self):
        return datasets.DatasetInfo()
    
    def _generate_examples(self, filepath):
        cols=["conclusion", "premises", "label", "premises-FOL"]
        """Yields examples."""
        with open(filepath, "r", encoding="utf-8") as f:
            for id_, line in enumerate(f):
                line_dict = json.loads(line)
                line_dict={k:v for k,v in line_dict.items() if k in cols}
                yield id_, line_dict