azizmatin commited on
Commit
e400ca0
1 Parent(s): 9cd78c7

Delete question_answering.py

Browse files
Files changed (1) hide show
  1. question_answering.py +0 -91
question_answering.py DELETED
@@ -1,91 +0,0 @@
1
- import json
2
- import datasets
3
-
4
- _DESCRIPTION = """\\\\\\\\
5
- This dataset is a reading comprehension dataset on Persian Wikipedia.
6
- The crowd-sourced dataset consists of more than 9,000 entries. Each entry can be either an impossible to answer or a question with one or more answers spanning in the passage (the context) from which the questioner proposed the question. Much like the SQuAD2.0 dataset, the impossible or unanswerable questions can be utilized to create a system which "knows that it doesn't know the answer".
7
- """
8
-
9
- _URL = "https://raw.githubusercontent.com/MatinChangiz/Question_answering/main/dataset/"
10
- _URLS = {
11
- "train": _URL + "pqa_train.json",
12
- "test": _URL + "pqa_test.json",
13
- }
14
-
15
- class QuestionAnsweringConfig(datasets.BuilderConfig):
16
- """BuilderConfig for dataset."""
17
- def __init__(self, **kwargs):
18
- """BuilderConfig for dataset.
19
- Args:
20
- **kwargs: keyword arguments forwarded to super.
21
- """
22
- super(QuestionAnsweringConfig, self).__init__(**kwargs)
23
-
24
- class datasetQA(datasets.GeneratorBasedBuilder):
25
- BUILDER_CONFIGS = [
26
- QuestionAnsweringConfig(name="dataset_qa", version=datasets.Version("1.0.0"), description="datasetQA version 1"),
27
- ]
28
-
29
- def _info(self):
30
- return datasets.DatasetInfo(
31
- # This is the description that will appear on the datasets page.
32
- description=_DESCRIPTION,
33
- # datasets.features.FeatureConnectors
34
- features=datasets.Features(
35
- {
36
- "id": datasets.Value("int32"),
37
- "title": datasets.Value("string"),
38
- "context": datasets.Value("string"),
39
- "question": datasets.Value("string"),
40
- "answers": datasets.features.Sequence(
41
- {
42
- "text": datasets.Value("string"),
43
- "answer_start": datasets.Value("int32"),
44
- }
45
- ),
46
- }
47
- ),
48
- supervised_keys=None,
49
- # Homepage of the dataset for documentation
50
- homepage="https://github.com/MatinChangiz/Question_answering",
51
- )
52
-
53
- def _split_generators(self, dl_manager):
54
- """Returns SplitGenerators."""
55
- # TODO(dataset_qa): Downloads the data and defines the splits
56
- # dl_manager is a datasets.download.DownloadManager that can be used to
57
- # download and extract URLs
58
- urls_to_download = _URLS
59
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
60
-
61
- return [
62
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
63
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
64
- ]
65
-
66
- def _generate_examples(self, filepath):
67
- """Yields examples."""
68
- # TODO(dataset_qa): Yields (key, example) tuples from the dataset
69
- with open(filepath, encoding="utf-8") as f:
70
- print(filepath)
71
- squad = json.load(f)
72
- for example in squad["data"]:
73
- title = example.get("title", "").strip()
74
- for paragraph in example["paragraphs"]:
75
- context = paragraph["context"].strip()
76
- for qa in paragraph["qas"]:
77
- question = qa["question"].strip()
78
- id_ = qa["id"]
79
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
80
- answers = [answer["text"].strip() for answer in qa["answers"]]
81
-
82
- yield id_, {
83
- "title": title,
84
- "context": context,
85
- "question": question,
86
- "id": id_,
87
- "answers": {
88
- "answer_start": answer_starts,
89
- "text": answers,
90
- },
91
- }