Datasets:

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
Russian
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
daf6abd
1 Parent(s): ba2117f

Delete loading script

Browse files
Files changed (1) hide show
  1. sberquad.py +0 -104
sberquad.py DELETED
@@ -1,104 +0,0 @@
1
- # coding=utf-8
2
- """SberQUAD: Sber Question Answering Dataset."""
3
-
4
- import os
5
- import json
6
-
7
- import datasets
8
- from datasets.tasks import QuestionAnsweringExtractive
9
-
10
-
11
- logger = datasets.logging.get_logger(__name__)
12
-
13
- _CITATION = """\
14
- @article{Efimov_2020,
15
- title={SberQuAD – Russian Reading Comprehension Dataset: Description and Analysis},
16
- ISBN={9783030582197},
17
- ISSN={1611-3349},
18
- url={http://dx.doi.org/10.1007/978-3-030-58219-7_1},
19
- DOI={10.1007/978-3-030-58219-7_1},
20
- journal={Experimental IR Meets Multilinguality, Multimodality, and Interaction},
21
- publisher={Springer International Publishing},
22
- author={Efimov, Pavel and Chertok, Andrey and Boytsov, Leonid and Braslavski, Pavel},
23
- year={2020},
24
- pages={3–15}
25
- }
26
- """
27
-
28
-
29
- _DESCRIPTION = """\
30
- Sber Question Answering Dataset (SberQuAD) is a reading comprehension \
31
- dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \
32
- articles, where the answer to every question is a segment of text, or span, \
33
- from the corresponding reading passage, or the question might be unanswerable. \
34
- Russian original analogue presented in Sberbank Data Science Journey 2017.
35
- """
36
-
37
- _URLS = {"train": os.path.join("data", "train_v1.0.json.gz"), "dev": os.path.join("data", "dev_v1.0.json.gz"), "test": os.path.join("data", "origin_test.json.gz")}
38
-
39
-
40
- class Sberquad(datasets.GeneratorBasedBuilder):
41
- """SberQUAD: Sber Question Answering Dataset. Version 1.0."""
42
-
43
- VERSION = datasets.Version("1.0.0")
44
- BUILDER_CONFIGS = [datasets.BuilderConfig(name="sberquad", version=VERSION, description=_DESCRIPTION)]
45
-
46
- def _info(self):
47
- return datasets.DatasetInfo(
48
- description=_DESCRIPTION,
49
- features=datasets.Features(
50
- {
51
- "id": datasets.Value("int32"),
52
- "title": datasets.Value("string"),
53
- "context": datasets.Value("string"),
54
- "question": datasets.Value("string"),
55
- "answers": datasets.features.Sequence(
56
- {
57
- "text": datasets.Value("string"),
58
- "answer_start": datasets.Value("int32"),
59
- }
60
- ),
61
- }
62
- ),
63
- supervised_keys=None,
64
- homepage="",
65
- citation=_CITATION,
66
- task_templates=[
67
- QuestionAnsweringExtractive(
68
- question_column="question", context_column="context", answers_column="answers"
69
- )
70
- ],
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- downloaded_files = dl_manager.download_and_extract(_URLS)
75
- return [
76
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
77
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
78
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
79
- ]
80
-
81
- def _generate_examples(self, filepath):
82
- """This function returns the examples in the raw (text) form."""
83
- logger.info("generating examples from = %s", filepath)
84
- key = 0
85
- with open(filepath, encoding="utf-8") as f:
86
- squad = json.load(f)
87
- for article in squad["data"]:
88
- title = article.get("title", "")
89
- for paragraph in article["paragraphs"]:
90
- context = paragraph["context"]
91
- for qa in paragraph["qas"]:
92
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
93
- answers = [answer["text"] for answer in qa["answers"]]
94
- yield key, {
95
- "title": title,
96
- "context": context,
97
- "question": qa["question"],
98
- "id": qa["id"],
99
- "answers": {
100
- "answer_start": answer_starts,
101
- "text": answers,
102
- },
103
- }
104
- key += 1