Datasets:

Languages:
Polish
ArXiv:
License:
File size: 2,643 Bytes
4557dcf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import json
import os

import datasets

logger = datasets.logging.get_logger(__name__)

_CORPUS = "corpus"
_QUERIES = "queries"
_QRELS = "qrels"

URL = ""
_URLs = {
    _CORPUS: f"corpus.jsonl",
    _QUERIES: f"queries.jsonl",
    _QRELS: f"qrels/test.tsv",
}


class PuggIr(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=_CORPUS,
        ),
        datasets.BuilderConfig(
            name=_QUERIES,
        ),
        datasets.BuilderConfig(
            name=_QRELS,
        ),
    ]

    def _info(self):
        if self.config.name == _CORPUS:
            features = datasets.Features(
                {
                    "_id": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "text": datasets.Value("string"),
                }
            )
        elif self.config.name == _QUERIES:
            features = datasets.Features(
                {
                    "_id": datasets.Value("string"),
                    "query": datasets.Value("string"),
                }
            )
        elif self.config.name == _QRELS:
            features = datasets.Features(
                {
                    "query-id": datasets.Value("string"),
                    "corpus-id": datasets.Value("string"),
                    "score": datasets.Value("int32"),
                }
            )

        return datasets.DatasetInfo(
            features=features,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        my_urls = _URLs[self.config.name]
        data_dir = dl_manager.download_and_extract(my_urls)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": data_dir},
            ),
        ]

    def _generate_examples(self, filepath):
        """Yields examples."""
        if self.config.name in [_CORPUS, _QUERIES]:
            with open(filepath, encoding="utf-8") as f:
                for i, line in enumerate(f):
                    data = json.loads(line)
                    yield i, data

        elif self.config.name == _QRELS:
            with open(filepath, encoding="utf-8") as f:
                for i, line in enumerate(f):
                    if i == 0:
                        continue  # Skip header
                    query_id, corpus_id, score = line.strip().split("\t")
                    yield i, {
                        "query-id": query_id,
                        "corpus-id": corpus_id,
                        "score": int(score),
                    }