Evelyn18 commited on
Commit
88c3614
1 Parent(s): 4aa2be6

Upload becas.py

Browse files
Files changed (1) hide show
  1. becas.py +154 -0
becas.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets.utils import version
2
+ """TODO(squad_es): Add a description here."""
3
+
4
+
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ # TODO(squad_es): BibTeX citation
11
+ _CITATION = """\
12
+ @article{2016arXiv160605250R,
13
+ author = {Casimiro Pio , Carrino and Marta R. , Costa-jussa and Jose A. R. , Fonollosa},
14
+ title = "{Automatic Spanish Translation of the SQuAD Dataset for Multilingual
15
+ Question Answering}",
16
+ journal = {arXiv e-prints},
17
+ year = 2019,
18
+ eid = {arXiv:1912.05200v1},
19
+ pages = {arXiv:1912.05200v1},
20
+ archivePrefix = {arXiv},
21
+ eprint = {1912.05200v2},
22
+ }
23
+ """
24
+
25
+ # TODO(squad_es_v1):
26
+ _DESCRIPTION = """\
27
+ automatic translation of the Stanford Question Answering Dataset (SQuAD) v2 into Spanish
28
+ """
29
+
30
+ _URL = "https://raw.githubusercontent.com/Leo646/Posgrados/master/"
31
+ _URLS_V1 = {
32
+ "train": _URL + "Squad_posgrados/train.json"
33
+ #"dev": _URL + "SQuAD-es-v1.1/dev-v1.1-es.json",
34
+ }
35
+ #_URLS_V2 = {
36
+ # "train": _URL + "SQuAD-es-v2.0/train-v2.0-es.json",
37
+ #"dev": _URL + "SQuAD-es-v2.0/dev-v2.0-es.json",
38
+ #}
39
+
40
+
41
+ class SquadEsConfig(datasets.BuilderConfig):
42
+ """BuilderConfig for SQUADEsV2."""
43
+
44
+ def __init__(self, **kwargs):
45
+ """BuilderConfig for SQUADEsV2.
46
+ Args:
47
+ **kwargs: keyword arguments forwarded to super.
48
+ """
49
+ super(SquadEsConfig, self).__init__(**kwargs)
50
+
51
+
52
+ class SquadEs(datasets.GeneratorBasedBuilder):
53
+ """TODO(squad_es): Short description of my dataset."""
54
+
55
+ # TODO(squad_es): Set up version.
56
+ VERSION = datasets.Version("0.1.0")
57
+ BUILDER_CONFIGS = [
58
+ SquadEsConfig(
59
+ name="v1.1.0",
60
+ version=datasets.Version("1.1.0", ""),
61
+ description="Plain text Spanish squad version 1",
62
+ ),
63
+ #SquadEsConfig(
64
+ # name="v2.0.0",
65
+ # version=datasets.Version("2.0.0", ""),
66
+ # description="Plain text Spanish squad version 2",
67
+ #),
68
+ ]
69
+
70
+ def _info(self):
71
+ # TODO(squad_es): Specifies the datasets.DatasetInfo object
72
+ return datasets.DatasetInfo(
73
+ # This is the description that will appear on the datasets page.
74
+ description=_DESCRIPTION,
75
+ # datasets.features.FeatureConnectors
76
+ features=datasets.Features(
77
+ {
78
+
79
+ # These are the features of your dataset like images, labels ...
80
+ "id": datasets.Value("string"),
81
+ "title": datasets.Value("string"),
82
+ "context": datasets.Value("string"),
83
+ "question": datasets.Value("string"),
84
+ "answers": datasets.features.Sequence(
85
+ {
86
+ "text": datasets.Value("string"),
87
+ "answer_start": datasets.Value("int32"),
88
+ }
89
+ ),
90
+ }
91
+ ),
92
+ # If there's a common (input, target) tuple from the features,
93
+ # specify them here. They'll be used if as_supervised=True in
94
+ # builder.as_dataset.
95
+ supervised_keys=None,
96
+ # Homepage of the dataset for documentation
97
+ homepage="https://github.com/Leo646/Posgrados/tree/master/",
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ """Returns SplitGenerators."""
103
+ # TODO(squad_es): Downloads the data and defines the splits
104
+ # dl_manager is a datasets.download.DownloadManager that can be used to
105
+
106
+ # download and extract URLs
107
+ if self.config.name == "v1.1.0":
108
+ dl_dir = dl_manager.download_and_extract(_URLS_V1)
109
+ print(dl_dir)
110
+ # elif self.config.name == "v2.0.0":
111
+ # dl_dir = dl_manager.download_and_extract(_URLS_V2)
112
+ else:
113
+ raise Exception("version does not match any existing one")
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ # These kwargs will be passed to _generate_examples
118
+ gen_kwargs={"filepath": dl_dir["train"]},
119
+ ),
120
+ # datasets.SplitGenerator(
121
+ # name=datasets.Split.VALIDATION,
122
+ # These kwargs will be passed to _generate_examples
123
+ # gen_kwargs={"filepath": dl_dir["dev"]},
124
+ #),
125
+ ]
126
+
127
+ def _generate_examples(self, filepath):
128
+ """Yields examples."""
129
+ # TODO(squad_es): Yields (key, example) tuples from the dataset
130
+ with open(filepath, encoding="utf-8") as f:
131
+ data = json.load(f)
132
+ for example in data["data"]:
133
+ title = example.get("title", "").strip()
134
+ for paragraph in example["paragraphs"]:
135
+ context = paragraph["context"].strip()
136
+ for qa in paragraph["qas"]:
137
+ question = qa["question"].strip()
138
+ id_ = qa["id"]
139
+
140
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
141
+ answers = [answer["text"].strip() for answer in qa["answers"]]
142
+
143
+ yield id_, {
144
+ "title": title,
145
+ "context": context,
146
+ "question": question,
147
+ "id": id_,
148
+ "answers": {
149
+ "answer_start": answer_starts,
150
+ "text": answers,
151
+
152
+ },
153
+
154
+ }