ju-resplande commited on
Commit
c114718
1 Parent(s): bb36a31

Upload fakebr.py

Browse files
Files changed (1) hide show
  1. fakebr.py +202 -0
fakebr.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Fake.br dataset"""
15
+
16
+
17
+ import os
18
+ from pprint import pprint
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @article{silva:20,
25
+ title = "Towards automatically filtering fake news in Portuguese",
26
+ journal = "Expert Systems with Applications",
27
+ volume = "146",
28
+ pages = "113199",
29
+ year = "2020",
30
+ issn = "0957-4174",
31
+ doi = "https://doi.org/10.1016/j.eswa.2020.113199",
32
+ url = "http://www.sciencedirect.com/science/article/pii/S0957417420300257",
33
+ author = "Renato M. Silva and Roney L.S. Santos and Tiago A. Almeida and Thiago A.S. Pardo",
34
+ }
35
+ """
36
+
37
+
38
+ _DESCRIPTION = """\
39
+ Fake.Br Corpus is composed of aligned true and fake news written in Brazilian Portuguese.
40
+ """
41
+
42
+ _HOMEPAGE = "https://github.com/roneysco/Fake.br-Corpus"
43
+
44
+ # TODO: Add the licence for the dataset here if you can find it
45
+ _LICENSE = ""
46
+
47
+
48
+ _URL = "https://github.com/roneysco/Fake.br-Corpus/archive/refs/heads/master.zip"
49
+
50
+ # column names in metadata texts
51
+ _METADATA_COLS = [
52
+ "author",
53
+ "link",
54
+ "category",
55
+ "date of publication",
56
+ "number of tokens",
57
+ "number of words without punctuation",
58
+ "number of types",
59
+ "number of links inside the news",
60
+ "number of words in upper case",
61
+ "number of verbs",
62
+ "number of subjuntive and imperative verbs",
63
+ "number of nouns",
64
+ "number of adjectives",
65
+ "number of adverbs",
66
+ "number of modal verbs (mainly auxiliary verbs)",
67
+ "number of singular first and second personal pronouns",
68
+ "number of plural first personal pronouns",
69
+ "number of pronouns",
70
+ "pausality",
71
+ "number of characters",
72
+ "average sentence length",
73
+ "average word length",
74
+ "percentage of news with speeling errors",
75
+ "emotiveness",
76
+ "diversity",
77
+ ]
78
+
79
+
80
+ class Fakebr(datasets.GeneratorBasedBuilder):
81
+ """Fake.Br Corpus is composed of aligned true and fake news written in Brazilian Portuguese."""
82
+
83
+ VERSION = datasets.Version("1.0.0")
84
+
85
+ BUILDER_CONFIGS = [
86
+ datasets.BuilderConfig(
87
+ name="full_texts",
88
+ version=VERSION,
89
+ description="full texts, as collected from their websites",
90
+ ),
91
+ datasets.BuilderConfig(
92
+ name="size_normalized_texts",
93
+ version=VERSION,
94
+ description="in each fake-true pair, the longer text is truncated (in number of words) to the size of the shorter text",
95
+ ),
96
+ ]
97
+
98
+ DEFAULT_CONFIG_NAME = "full_texts"
99
+
100
+ def _info(self):
101
+ if self.config.name == "full_texts":
102
+ features = datasets.Features(
103
+ {
104
+ "text": datasets.Value("string"),
105
+ "label": datasets.ClassLabel(num_classes=2, names=["fake", "true"]),
106
+ "author": datasets.Value("string"),
107
+ "link": datasets.Value("string"),
108
+ "category": datasets.Value("string"),
109
+ "date of publication": datasets.Value("string"),
110
+ "number of tokens": datasets.Value("int32"),
111
+ "number of words without punctuation": datasets.Value("int32"),
112
+ "number of types": datasets.Value("int32"),
113
+ "number of links inside the news": datasets.Value("int32"),
114
+ "number of words in upper case": datasets.Value("int32"),
115
+ "number of verbs": datasets.Value("int32"),
116
+ "number of subjuntive and imperative verbs": datasets.Value(
117
+ "int32"
118
+ ),
119
+ "number of nouns": datasets.Value("int32"),
120
+ "number of adjectives": datasets.Value("int32"),
121
+ "number of adverbs": datasets.Value("int32"),
122
+ "number of modal verbs (mainly auxiliary verbs)": datasets.Value(
123
+ "int32"
124
+ ),
125
+ "number of singular first and second personal pronouns": datasets.Value(
126
+ "int32"
127
+ ),
128
+ "number of plural first personal pronouns": datasets.Value("int32"),
129
+ "number of pronouns": datasets.Value("int32"),
130
+ "pausality": datasets.Value("float"),
131
+ "number of characters": datasets.Value("int32"),
132
+ "average sentence length": datasets.Value("float"),
133
+ "average word length": datasets.Value("float"),
134
+ "percentage of news with speeling errors": datasets.Value("float"),
135
+ "emotiveness": datasets.Value("float"),
136
+ "diversity": datasets.Value("float"),
137
+ }
138
+ )
139
+ elif self.config.name == "size_normalized_texts":
140
+ features = datasets.Features(
141
+ {
142
+ "text": datasets.Value("string"),
143
+ "label": datasets.ClassLabel(num_classes=2, names=["fake", "true"]),
144
+ }
145
+ )
146
+ return datasets.DatasetInfo(
147
+ description=_DESCRIPTION,
148
+ features=features,
149
+ supervised_keys=("text", "label"),
150
+ homepage=_HOMEPAGE,
151
+ license=_LICENSE,
152
+ citation=_CITATION,
153
+ )
154
+
155
+ def _split_generators(self, dl_manager):
156
+ urls = _URL
157
+ data_dir = dl_manager.download_and_extract(urls)
158
+ return [
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TRAIN,
161
+ gen_kwargs={
162
+ "data_dir": os.path.join(data_dir, "Fake.br-Corpus-master"),
163
+ },
164
+ ),
165
+ ]
166
+
167
+ def _generate_examples(self, data_dir):
168
+ config_dir = os.path.join(data_dir, self.config.name)
169
+
170
+ for label in ["fake", "true"]:
171
+ label_dir = os.path.join(config_dir, label)
172
+
173
+ for example in os.listdir(label_dir):
174
+ key = label + "_" + example.replace(".txt", "")
175
+ example_path = os.path.join(label_dir, example)
176
+
177
+ with open(example_path, "r") as f:
178
+ text = f.read()
179
+
180
+ row = {"text": text, "label": label}
181
+
182
+ if self.config.name == "full_texts":
183
+ metadata_path = os.path.join(
184
+ config_dir,
185
+ f"{label}-meta-information",
186
+ example.replace(".txt", "-meta.txt"),
187
+ )
188
+
189
+ with open(metadata_path, "r") as f:
190
+ metadata = f.read().split("\n")
191
+
192
+ metadata = dict(zip(_METADATA_COLS, metadata))
193
+
194
+ if metadata["author"] == "None":
195
+ metadata["author"] = ""
196
+
197
+ if metadata["number of links inside the news"] == "None":
198
+ metadata["number of links inside the news"] = "0"
199
+
200
+ row.update(metadata)
201
+
202
+ yield key, row