Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
089b5ac
1 Parent(s): faea15e

Delete loading script

Browse files
Files changed (1) hide show
  1. dbpedia_14.py +0 -150
dbpedia_14.py DELETED
@@ -1,150 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The DBpedia dataset for text classification."""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
-
22
-
23
- # TODO: Add BibTeX citation
24
- # Find for instance the citation on arxiv or on the dataset repo/website
25
- _CITATION = """\
26
- @article{lehmann2015dbpedia,
27
- title={DBpedia--a large-scale, multilingual knowledge base extracted from Wikipedia},
28
- author={Lehmann, Jens and Isele, Robert and Jakob, Max and Jentzsch, Anja and Kontokostas,
29
- Dimitris and Mendes, Pablo N and Hellmann, Sebastian and Morsey, Mohamed and Van Kleef,
30
- Patrick and Auer, S{\"o}ren and others},
31
- journal={Semantic web},
32
- volume={6},
33
- number={2},
34
- pages={167--195},
35
- year={2015},
36
- publisher={IOS Press}
37
- }
38
- """
39
-
40
- _DESCRIPTION = """\
41
- The DBpedia ontology classification dataset is constructed by picking 14 non-overlapping classes
42
- from DBpedia 2014. They are listed in classes.txt. From each of thse 14 ontology classes, we
43
- randomly choose 40,000 training samples and 5,000 testing samples. Therefore, the total size
44
- of the training dataset is 560,000 and testing dataset 70,000.
45
- There are 3 columns in the dataset (same for train and test splits), corresponding to class index
46
- (1 to 14), title and content. The title and content are escaped using double quotes ("), and any
47
- internal double quote is escaped by 2 double quotes (""). There are no new lines in title or content.
48
- """
49
-
50
- _HOMEPAGE = "https://wiki.dbpedia.org/develop/datasets"
51
-
52
- _LICENSE = "Creative Commons Attribution-ShareAlike 3.0 and the GNU Free Documentation License"
53
-
54
- _URLs = {
55
- "dbpedia_14": "https://s3.amazonaws.com/fast-ai-nlp/dbpedia_csv.tgz",
56
- }
57
-
58
-
59
- class DBpedia14Config(datasets.BuilderConfig):
60
- """BuilderConfig for DBpedia."""
61
-
62
- def __init__(self, **kwargs):
63
- """BuilderConfig for DBpedia.
64
-
65
- Args:
66
- **kwargs: keyword arguments forwarded to super.
67
- """
68
- super(DBpedia14Config, self).__init__(**kwargs)
69
-
70
-
71
- class DBpedia14(datasets.GeneratorBasedBuilder):
72
- """DBpedia 2014 Ontology Classification Dataset."""
73
-
74
- VERSION = datasets.Version("2.0.0")
75
-
76
- BUILDER_CONFIGS = [
77
- DBpedia14Config(
78
- name="dbpedia_14", version=VERSION, description="DBpedia 2014 Ontology Classification Dataset."
79
- ),
80
- ]
81
-
82
- def _info(self):
83
- features = datasets.Features(
84
- {
85
- "label": datasets.features.ClassLabel(
86
- names=[
87
- "Company",
88
- "EducationalInstitution",
89
- "Artist",
90
- "Athlete",
91
- "OfficeHolder",
92
- "MeanOfTransportation",
93
- "Building",
94
- "NaturalPlace",
95
- "Village",
96
- "Animal",
97
- "Plant",
98
- "Album",
99
- "Film",
100
- "WrittenWork",
101
- ]
102
- ),
103
- "title": datasets.Value("string"),
104
- "content": datasets.Value("string"),
105
- }
106
- )
107
- return datasets.DatasetInfo(
108
- description=_DESCRIPTION,
109
- features=features,
110
- supervised_keys=None,
111
- homepage=_HOMEPAGE,
112
- license=_LICENSE,
113
- citation=_CITATION,
114
- )
115
-
116
- def _split_generators(self, dl_manager):
117
- """Returns SplitGenerators."""
118
- my_urls = _URLs[self.config.name]
119
- archive = dl_manager.download(my_urls)
120
- return [
121
- datasets.SplitGenerator(
122
- name=datasets.Split.TRAIN,
123
- gen_kwargs={
124
- "filepath": "dbpedia_csv/train.csv",
125
- "files": dl_manager.iter_archive(archive),
126
- },
127
- ),
128
- datasets.SplitGenerator(
129
- name=datasets.Split.TEST,
130
- gen_kwargs={
131
- "filepath": "dbpedia_csv/test.csv",
132
- "files": dl_manager.iter_archive(archive),
133
- },
134
- ),
135
- ]
136
-
137
- def _generate_examples(self, filepath, files):
138
- """Yields examples."""
139
-
140
- for path, f in files:
141
- if path == filepath:
142
- lines = (line.decode("utf-8") for line in f)
143
- data = csv.reader(lines, delimiter=",", quoting=csv.QUOTE_NONNUMERIC)
144
- for id_, row in enumerate(data):
145
- yield id_, {
146
- "title": row[1],
147
- "content": row[2],
148
- "label": int(row[0]) - 1,
149
- }
150
- break