holylovenia commited on
Commit
bfe256a
1 Parent(s): ee10963

Upload unisent.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. unisent.py +292 -0
unisent.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ from pathlib import Path
5
+ from typing import Dict, List, Tuple
6
+
7
+ import datasets
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import Licenses, Tasks
12
+
13
+ _CITATION = """\
14
+ @inproceedings{asgari2020unisent,
15
+ title={UniSent: Universal Adaptable Sentiment Lexica for 1000+ Languages},
16
+ author={Asgari, Ehsaneddin and Braune, Fabienne and Ringlstetter, Christoph and Mofrad, Mohammad RK},
17
+ booktitle={Proceedings of the International Conference on Language Resources and Evaluation (LREC-2020)},
18
+ year={2020},
19
+ organization={European Language Resources Association (ELRA)}
20
+ }
21
+ """
22
+ _DATASETNAME = "unisent"
23
+ _DESCRIPTION = """\
24
+ UniSent is a universal sentiment lexica for 1000+ languages.
25
+ To build UniSent, the authors use a massively parallel Bible
26
+ corpus to project sentiment information from English to other
27
+ languages for sentiment analysis on Twitter data. 173 of 1404
28
+ languages are spoken in Southeast Asia
29
+ """
30
+ _URLS = "https://raw.githubusercontent.com/ehsanasgari/UniSent/master/unisent_lexica_v1/{}_unisent_lexicon.txt"
31
+ _HOMEPAGE = "https://github.com/ehsanasgari/UniSent"
32
+ _LANGUAGES = [
33
+ 'aaz',
34
+ 'abx',
35
+ 'ace',
36
+ 'acn',
37
+ 'agn',
38
+ 'agt',
39
+ 'ahk',
40
+ 'akb',
41
+ 'alj',
42
+ 'alp',
43
+ 'amk',
44
+ 'aoz',
45
+ 'atb',
46
+ 'atd',
47
+ 'att',
48
+ 'ban',
49
+ 'bbc',
50
+ 'bcl',
51
+ 'bgr',
52
+ 'bgs',
53
+ 'bgz',
54
+ 'bhp',
55
+ 'bkd',
56
+ 'bku',
57
+ 'blw',
58
+ 'blz',
59
+ 'bnj',
60
+ 'bpr',
61
+ 'bps',
62
+ 'bru',
63
+ 'btd',
64
+ 'bth',
65
+ 'bto',
66
+ 'bts',
67
+ 'btx',
68
+ 'bug',
69
+ 'bvz',
70
+ 'bzi',
71
+ 'cbk',
72
+ 'ceb',
73
+ 'cfm',
74
+ 'cgc',
75
+ 'clu',
76
+ 'cmo',
77
+ 'cnh',
78
+ 'cnw',
79
+ 'csy',
80
+ 'ctd',
81
+ 'czt',
82
+ 'dgc',
83
+ 'dtp',
84
+ 'due',
85
+ 'duo',
86
+ 'ebk',
87
+ 'fil',
88
+ 'gbi',
89
+ 'gdg',
90
+ 'gor',
91
+ 'heg',
92
+ 'hil',
93
+ 'hlt',
94
+ 'hnj',
95
+ 'hnn',
96
+ 'hvn',
97
+ 'iba',
98
+ 'ifa',
99
+ 'ifb',
100
+ 'ifk',
101
+ 'ifu',
102
+ 'ify',
103
+ 'ilo',
104
+ 'ind',
105
+ 'iry',
106
+ 'isd',
107
+ 'itv',
108
+ 'ium',
109
+ 'ivb',
110
+ 'ivv',
111
+ 'jav',
112
+ 'jra',
113
+ 'kac',
114
+ 'khm',
115
+ 'kix',
116
+ 'kje',
117
+ 'kmk',
118
+ 'kne',
119
+ 'kqe',
120
+ 'krj',
121
+ 'ksc',
122
+ 'ksw',
123
+ 'kxm',
124
+ 'lao',
125
+ 'lbk',
126
+ 'lew',
127
+ 'lex',
128
+ 'lhi',
129
+ 'lhu',
130
+ 'ljp',
131
+ 'lsi',
132
+ 'lus',
133
+ 'mad',
134
+ 'mak',
135
+ 'mbb',
136
+ 'mbd',
137
+ 'mbf',
138
+ 'mbi',
139
+ 'mbs',
140
+ 'mbt',
141
+ 'mej',
142
+ 'mkn',
143
+ 'mmn',
144
+ 'mnb',
145
+ 'mnx',
146
+ 'mog',
147
+ 'mqj',
148
+ 'mqy',
149
+ 'mrw',
150
+ 'msb',
151
+ 'msk',
152
+ 'msm',
153
+ 'mta',
154
+ 'mtg',
155
+ 'mtj',
156
+ 'mvp',
157
+ 'mwq',
158
+ 'mwv',
159
+ 'mya',
160
+ 'nbe',
161
+ 'nfa',
162
+ 'nia',
163
+ 'nij',
164
+ 'nlc',
165
+ 'npy',
166
+ 'obo',
167
+ 'pag',
168
+ 'pam',
169
+ 'plw',
170
+ 'pmf',
171
+ 'pne',
172
+ 'ppk',
173
+ 'prf',
174
+ 'prk',
175
+ 'pse',
176
+ 'ptu',
177
+ 'pww',
178
+ 'sas',
179
+ 'sbl',
180
+ 'sda',
181
+ 'sgb',
182
+ 'smk',
183
+ 'sml',
184
+ 'sun',
185
+ 'sxn',
186
+ 'szb',
187
+ 'tbl',
188
+ 'tby',
189
+ 'tcz',
190
+ 'tdt',
191
+ 'tgl',
192
+ 'tha',
193
+ 'tih',
194
+ 'tlb',
195
+ 'twu',
196
+ 'urk',
197
+ 'vie',
198
+ 'war',
199
+ 'whk',
200
+ 'wrs',
201
+ 'xbr',
202
+ 'yli',
203
+ 'yva',
204
+ 'zom',
205
+ 'zyp']
206
+
207
+ _LICENSE = Licenses.CC_BY_NC_ND_4_0.value # cc-by-nc-nd-4.0
208
+ _LOCAL = False
209
+
210
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
211
+
212
+ _SOURCE_VERSION = "1.0.0"
213
+ _SEACROWD_VERSION = "2024.06.20"
214
+
215
+
216
+ class UniSentDataset(datasets.GeneratorBasedBuilder):
217
+ LABELS = ["NEGATIVE", "POSITIVE"]
218
+
219
+ BUILDER_CONFIGS = [
220
+ SEACrowdConfig(
221
+ name=f"{_DATASETNAME}_{lang}_source",
222
+ version=datasets.Version(_SOURCE_VERSION),
223
+ description=_DESCRIPTION, schema="source",
224
+ subset_id=f"{_DATASETNAME}_{lang}"
225
+ )
226
+ for lang in _LANGUAGES
227
+ ] + [
228
+ SEACrowdConfig(
229
+ name=f"{_DATASETNAME}_{lang}_seacrowd_text",
230
+ version=datasets.Version(_SEACROWD_VERSION),
231
+ description=_DESCRIPTION,
232
+ schema="seacrowd_text",
233
+ subset_id=f"{_DATASETNAME}_{lang}"
234
+ )
235
+ for lang in _LANGUAGES
236
+ ]
237
+
238
+ def _info(self) -> datasets.DatasetInfo:
239
+
240
+ if self.config.schema == "source":
241
+ features = datasets.Features(
242
+ {
243
+ "word": datasets.Value("string"),
244
+ "lexicon": datasets.Value("string"),
245
+ }
246
+ )
247
+ elif self.config.schema == "seacrowd_text":
248
+ features = schemas.text_features(label_names=self.LABELS)
249
+ else:
250
+ raise Exception(f"Unsupported schema: {self.config.schema}")
251
+
252
+ return datasets.DatasetInfo(
253
+ description=_DESCRIPTION,
254
+ features=features,
255
+ homepage=_HOMEPAGE,
256
+ license=_LICENSE,
257
+ citation=_CITATION,
258
+ )
259
+
260
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
261
+ lang = self.config.subset_id.split("_")[-1]
262
+ url = _URLS.format(lang)
263
+ data_dir = dl_manager.download_and_extract(url)
264
+ return [
265
+ datasets.SplitGenerator(
266
+ name=datasets.Split.TRAIN,
267
+ gen_kwargs={
268
+ "filepath": data_dir,
269
+ },
270
+ ),
271
+ ]
272
+
273
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
274
+ with open(filepath, "r", encoding="utf-8") as filein:
275
+ data_instances = [inst.strip("\n").split("\t") for inst in filein.readlines()]
276
+
277
+ for di_idx, data_instance in enumerate(data_instances):
278
+ word, lexicon = data_instance
279
+ if self.config.schema == "source":
280
+ yield di_idx, {"word": word, "lexicon": lexicon}
281
+ elif self.config.schema == "seacrowd_text":
282
+ yield di_idx, {"id": di_idx, "text": word, "label": self.LABELS[self._clip_label(int(lexicon))]}
283
+ else:
284
+ raise Exception(f"Unsupported schema: {self.config.schema}")
285
+
286
+ @staticmethod
287
+ def _clip_label(label: int) -> int:
288
+ """
289
+ Original labels are -1, +1.
290
+ Clip the label to 0 or 1 to get right index.
291
+ """
292
+ return 0 if int(label) < 0 else 1