parquet-converter
commited on
Commit
β’
cfe8ab5
1
Parent(s):
7c06415
Update parquet files
Browse files- .gitattributes +0 -27
- README.md +0 -987
- dataset_infos.json +0 -1
- emoji/tweet_eval-test.parquet +3 -0
- emoji/tweet_eval-train.parquet +3 -0
- emoji/tweet_eval-validation.parquet +3 -0
- emotion/tweet_eval-test.parquet +3 -0
- emotion/tweet_eval-train.parquet +3 -0
- emotion/tweet_eval-validation.parquet +3 -0
- hate/tweet_eval-test.parquet +3 -0
- hate/tweet_eval-train.parquet +3 -0
- hate/tweet_eval-validation.parquet +3 -0
- irony/tweet_eval-test.parquet +3 -0
- irony/tweet_eval-train.parquet +3 -0
- irony/tweet_eval-validation.parquet +3 -0
- offensive/tweet_eval-test.parquet +3 -0
- offensive/tweet_eval-train.parquet +3 -0
- offensive/tweet_eval-validation.parquet +3 -0
- sentiment/tweet_eval-test.parquet +3 -0
- sentiment/tweet_eval-train.parquet +3 -0
- sentiment/tweet_eval-validation.parquet +3 -0
- stance_abortion/tweet_eval-test.parquet +3 -0
- stance_abortion/tweet_eval-train.parquet +3 -0
- stance_abortion/tweet_eval-validation.parquet +3 -0
- stance_atheism/tweet_eval-test.parquet +3 -0
- stance_atheism/tweet_eval-train.parquet +3 -0
- stance_atheism/tweet_eval-validation.parquet +3 -0
- stance_climate/tweet_eval-test.parquet +3 -0
- stance_climate/tweet_eval-train.parquet +3 -0
- stance_climate/tweet_eval-validation.parquet +3 -0
- stance_feminist/tweet_eval-test.parquet +3 -0
- stance_feminist/tweet_eval-train.parquet +3 -0
- stance_feminist/tweet_eval-validation.parquet +3 -0
- stance_hillary/tweet_eval-test.parquet +3 -0
- stance_hillary/tweet_eval-train.parquet +3 -0
- stance_hillary/tweet_eval-validation.parquet +3 -0
- tweet_eval.py +0 -249
.gitattributes
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
@@ -1,987 +0,0 @@
|
|
1 |
-
---
|
2 |
-
annotations_creators:
|
3 |
-
- found
|
4 |
-
language_creators:
|
5 |
-
- found
|
6 |
-
language:
|
7 |
-
- en
|
8 |
-
license:
|
9 |
-
- unknown
|
10 |
-
multilinguality:
|
11 |
-
- monolingual
|
12 |
-
size_categories:
|
13 |
-
- 100K<n<1M
|
14 |
-
- 10K<n<100K
|
15 |
-
- 1K<n<10K
|
16 |
-
- n<1K
|
17 |
-
source_datasets:
|
18 |
-
- extended|other-tweet-datasets
|
19 |
-
task_categories:
|
20 |
-
- text-classification
|
21 |
-
task_ids:
|
22 |
-
- intent-classification
|
23 |
-
- multi-class-classification
|
24 |
-
- sentiment-classification
|
25 |
-
paperswithcode_id: tweeteval
|
26 |
-
pretty_name: TweetEval
|
27 |
-
train-eval-index:
|
28 |
-
- config: emotion
|
29 |
-
task: text-classification
|
30 |
-
task_id: multi_class_classification
|
31 |
-
splits:
|
32 |
-
train_split: train
|
33 |
-
eval_split: test
|
34 |
-
col_mapping:
|
35 |
-
text: text
|
36 |
-
label: target
|
37 |
-
metrics:
|
38 |
-
- type: accuracy
|
39 |
-
name: Accuracy
|
40 |
-
- type: f1
|
41 |
-
name: F1 macro
|
42 |
-
args:
|
43 |
-
average: macro
|
44 |
-
- type: f1
|
45 |
-
name: F1 micro
|
46 |
-
args:
|
47 |
-
average: micro
|
48 |
-
- type: f1
|
49 |
-
name: F1 weighted
|
50 |
-
args:
|
51 |
-
average: weighted
|
52 |
-
- type: precision
|
53 |
-
name: Precision macro
|
54 |
-
args:
|
55 |
-
average: macro
|
56 |
-
- type: precision
|
57 |
-
name: Precision micro
|
58 |
-
args:
|
59 |
-
average: micro
|
60 |
-
- type: precision
|
61 |
-
name: Precision weighted
|
62 |
-
args:
|
63 |
-
average: weighted
|
64 |
-
- type: recall
|
65 |
-
name: Recall macro
|
66 |
-
args:
|
67 |
-
average: macro
|
68 |
-
- type: recall
|
69 |
-
name: Recall micro
|
70 |
-
args:
|
71 |
-
average: micro
|
72 |
-
- type: recall
|
73 |
-
name: Recall weighted
|
74 |
-
args:
|
75 |
-
average: weighted
|
76 |
-
- config: hate
|
77 |
-
task: text-classification
|
78 |
-
task_id: binary_classification
|
79 |
-
splits:
|
80 |
-
train_split: train
|
81 |
-
eval_split: test
|
82 |
-
col_mapping:
|
83 |
-
text: text
|
84 |
-
label: target
|
85 |
-
metrics:
|
86 |
-
- type: accuracy
|
87 |
-
name: Accuracy
|
88 |
-
- type: f1
|
89 |
-
name: F1 binary
|
90 |
-
args:
|
91 |
-
average: binary
|
92 |
-
- type: precision
|
93 |
-
name: Precision macro
|
94 |
-
args:
|
95 |
-
average: macro
|
96 |
-
- type: precision
|
97 |
-
name: Precision micro
|
98 |
-
args:
|
99 |
-
average: micro
|
100 |
-
- type: precision
|
101 |
-
name: Precision weighted
|
102 |
-
args:
|
103 |
-
average: weighted
|
104 |
-
- type: recall
|
105 |
-
name: Recall macro
|
106 |
-
args:
|
107 |
-
average: macro
|
108 |
-
- type: recall
|
109 |
-
name: Recall micro
|
110 |
-
args:
|
111 |
-
average: micro
|
112 |
-
- type: recall
|
113 |
-
name: Recall weighted
|
114 |
-
args:
|
115 |
-
average: weighted
|
116 |
-
- config: irony
|
117 |
-
task: text-classification
|
118 |
-
task_id: binary_classification
|
119 |
-
splits:
|
120 |
-
train_split: train
|
121 |
-
eval_split: test
|
122 |
-
col_mapping:
|
123 |
-
text: text
|
124 |
-
label: target
|
125 |
-
metrics:
|
126 |
-
- type: accuracy
|
127 |
-
name: Accuracy
|
128 |
-
- type: f1
|
129 |
-
name: F1 binary
|
130 |
-
args:
|
131 |
-
average: binary
|
132 |
-
- type: precision
|
133 |
-
name: Precision macro
|
134 |
-
args:
|
135 |
-
average: macro
|
136 |
-
- type: precision
|
137 |
-
name: Precision micro
|
138 |
-
args:
|
139 |
-
average: micro
|
140 |
-
- type: precision
|
141 |
-
name: Precision weighted
|
142 |
-
args:
|
143 |
-
average: weighted
|
144 |
-
- type: recall
|
145 |
-
name: Recall macro
|
146 |
-
args:
|
147 |
-
average: macro
|
148 |
-
- type: recall
|
149 |
-
name: Recall micro
|
150 |
-
args:
|
151 |
-
average: micro
|
152 |
-
- type: recall
|
153 |
-
name: Recall weighted
|
154 |
-
args:
|
155 |
-
average: weighted
|
156 |
-
- config: offensive
|
157 |
-
task: text-classification
|
158 |
-
task_id: binary_classification
|
159 |
-
splits:
|
160 |
-
train_split: train
|
161 |
-
eval_split: test
|
162 |
-
col_mapping:
|
163 |
-
text: text
|
164 |
-
label: target
|
165 |
-
metrics:
|
166 |
-
- type: accuracy
|
167 |
-
name: Accuracy
|
168 |
-
- type: f1
|
169 |
-
name: F1 binary
|
170 |
-
args:
|
171 |
-
average: binary
|
172 |
-
- type: precision
|
173 |
-
name: Precision macro
|
174 |
-
args:
|
175 |
-
average: macro
|
176 |
-
- type: precision
|
177 |
-
name: Precision micro
|
178 |
-
args:
|
179 |
-
average: micro
|
180 |
-
- type: precision
|
181 |
-
name: Precision weighted
|
182 |
-
args:
|
183 |
-
average: weighted
|
184 |
-
- type: recall
|
185 |
-
name: Recall macro
|
186 |
-
args:
|
187 |
-
average: macro
|
188 |
-
- type: recall
|
189 |
-
name: Recall micro
|
190 |
-
args:
|
191 |
-
average: micro
|
192 |
-
- type: recall
|
193 |
-
name: Recall weighted
|
194 |
-
args:
|
195 |
-
average: weighted
|
196 |
-
- config: sentiment
|
197 |
-
task: text-classification
|
198 |
-
task_id: multi_class_classification
|
199 |
-
splits:
|
200 |
-
train_split: train
|
201 |
-
eval_split: test
|
202 |
-
col_mapping:
|
203 |
-
text: text
|
204 |
-
label: target
|
205 |
-
metrics:
|
206 |
-
- type: accuracy
|
207 |
-
name: Accuracy
|
208 |
-
- type: f1
|
209 |
-
name: F1 macro
|
210 |
-
args:
|
211 |
-
average: macro
|
212 |
-
- type: f1
|
213 |
-
name: F1 micro
|
214 |
-
args:
|
215 |
-
average: micro
|
216 |
-
- type: f1
|
217 |
-
name: F1 weighted
|
218 |
-
args:
|
219 |
-
average: weighted
|
220 |
-
- type: precision
|
221 |
-
name: Precision macro
|
222 |
-
args:
|
223 |
-
average: macro
|
224 |
-
- type: precision
|
225 |
-
name: Precision micro
|
226 |
-
args:
|
227 |
-
average: micro
|
228 |
-
- type: precision
|
229 |
-
name: Precision weighted
|
230 |
-
args:
|
231 |
-
average: weighted
|
232 |
-
- type: recall
|
233 |
-
name: Recall macro
|
234 |
-
args:
|
235 |
-
average: macro
|
236 |
-
- type: recall
|
237 |
-
name: Recall micro
|
238 |
-
args:
|
239 |
-
average: micro
|
240 |
-
- type: recall
|
241 |
-
name: Recall weighted
|
242 |
-
args:
|
243 |
-
average: weighted
|
244 |
-
configs:
|
245 |
-
- emoji
|
246 |
-
- emotion
|
247 |
-
- hate
|
248 |
-
- irony
|
249 |
-
- offensive
|
250 |
-
- sentiment
|
251 |
-
- stance_abortion
|
252 |
-
- stance_atheism
|
253 |
-
- stance_climate
|
254 |
-
- stance_feminist
|
255 |
-
- stance_hillary
|
256 |
-
dataset_info:
|
257 |
-
- config_name: emoji
|
258 |
-
features:
|
259 |
-
- name: text
|
260 |
-
dtype: string
|
261 |
-
- name: label
|
262 |
-
dtype:
|
263 |
-
class_label:
|
264 |
-
names:
|
265 |
-
0: β€
|
266 |
-
1: π
|
267 |
-
2: π
|
268 |
-
3: π
|
269 |
-
4: π₯
|
270 |
-
5: π
|
271 |
-
6: π
|
272 |
-
7: β¨
|
273 |
-
8: π
|
274 |
-
9: π
|
275 |
-
10: π·
|
276 |
-
11: πΊπΈ
|
277 |
-
12: β
|
278 |
-
13: π
|
279 |
-
14: π
|
280 |
-
15: π―
|
281 |
-
16: π
|
282 |
-
17: π
|
283 |
-
18: πΈ
|
284 |
-
19: π
|
285 |
-
splits:
|
286 |
-
- name: train
|
287 |
-
num_bytes: 3803187
|
288 |
-
num_examples: 45000
|
289 |
-
- name: test
|
290 |
-
num_bytes: 4255921
|
291 |
-
num_examples: 50000
|
292 |
-
- name: validation
|
293 |
-
num_bytes: 396083
|
294 |
-
num_examples: 5000
|
295 |
-
download_size: 7628721
|
296 |
-
dataset_size: 8455191
|
297 |
-
- config_name: emotion
|
298 |
-
features:
|
299 |
-
- name: text
|
300 |
-
dtype: string
|
301 |
-
- name: label
|
302 |
-
dtype:
|
303 |
-
class_label:
|
304 |
-
names:
|
305 |
-
0: anger
|
306 |
-
1: joy
|
307 |
-
2: optimism
|
308 |
-
3: sadness
|
309 |
-
splits:
|
310 |
-
- name: train
|
311 |
-
num_bytes: 338875
|
312 |
-
num_examples: 3257
|
313 |
-
- name: test
|
314 |
-
num_bytes: 146649
|
315 |
-
num_examples: 1421
|
316 |
-
- name: validation
|
317 |
-
num_bytes: 38277
|
318 |
-
num_examples: 374
|
319 |
-
download_size: 483813
|
320 |
-
dataset_size: 523801
|
321 |
-
- config_name: hate
|
322 |
-
features:
|
323 |
-
- name: text
|
324 |
-
dtype: string
|
325 |
-
- name: label
|
326 |
-
dtype:
|
327 |
-
class_label:
|
328 |
-
names:
|
329 |
-
0: non-hate
|
330 |
-
1: hate
|
331 |
-
splits:
|
332 |
-
- name: train
|
333 |
-
num_bytes: 1223654
|
334 |
-
num_examples: 9000
|
335 |
-
- name: test
|
336 |
-
num_bytes: 428938
|
337 |
-
num_examples: 2970
|
338 |
-
- name: validation
|
339 |
-
num_bytes: 154148
|
340 |
-
num_examples: 1000
|
341 |
-
download_size: 1703208
|
342 |
-
dataset_size: 1806740
|
343 |
-
- config_name: irony
|
344 |
-
features:
|
345 |
-
- name: text
|
346 |
-
dtype: string
|
347 |
-
- name: label
|
348 |
-
dtype:
|
349 |
-
class_label:
|
350 |
-
names:
|
351 |
-
0: non_irony
|
352 |
-
1: irony
|
353 |
-
splits:
|
354 |
-
- name: train
|
355 |
-
num_bytes: 259191
|
356 |
-
num_examples: 2862
|
357 |
-
- name: test
|
358 |
-
num_bytes: 75901
|
359 |
-
num_examples: 784
|
360 |
-
- name: validation
|
361 |
-
num_bytes: 86021
|
362 |
-
num_examples: 955
|
363 |
-
download_size: 385613
|
364 |
-
dataset_size: 421113
|
365 |
-
- config_name: offensive
|
366 |
-
features:
|
367 |
-
- name: text
|
368 |
-
dtype: string
|
369 |
-
- name: label
|
370 |
-
dtype:
|
371 |
-
class_label:
|
372 |
-
names:
|
373 |
-
0: non-offensive
|
374 |
-
1: offensive
|
375 |
-
splits:
|
376 |
-
- name: train
|
377 |
-
num_bytes: 1648069
|
378 |
-
num_examples: 11916
|
379 |
-
- name: test
|
380 |
-
num_bytes: 135477
|
381 |
-
num_examples: 860
|
382 |
-
- name: validation
|
383 |
-
num_bytes: 192421
|
384 |
-
num_examples: 1324
|
385 |
-
download_size: 1863383
|
386 |
-
dataset_size: 1975967
|
387 |
-
- config_name: sentiment
|
388 |
-
features:
|
389 |
-
- name: text
|
390 |
-
dtype: string
|
391 |
-
- name: label
|
392 |
-
dtype:
|
393 |
-
class_label:
|
394 |
-
names:
|
395 |
-
0: negative
|
396 |
-
1: neutral
|
397 |
-
2: positive
|
398 |
-
splits:
|
399 |
-
- name: train
|
400 |
-
num_bytes: 5425142
|
401 |
-
num_examples: 45615
|
402 |
-
- name: test
|
403 |
-
num_bytes: 1279548
|
404 |
-
num_examples: 12284
|
405 |
-
- name: validation
|
406 |
-
num_bytes: 239088
|
407 |
-
num_examples: 2000
|
408 |
-
download_size: 6465841
|
409 |
-
dataset_size: 6943778
|
410 |
-
- config_name: stance_abortion
|
411 |
-
features:
|
412 |
-
- name: text
|
413 |
-
dtype: string
|
414 |
-
- name: label
|
415 |
-
dtype:
|
416 |
-
class_label:
|
417 |
-
names:
|
418 |
-
0: none
|
419 |
-
1: against
|
420 |
-
2: favor
|
421 |
-
splits:
|
422 |
-
- name: train
|
423 |
-
num_bytes: 68698
|
424 |
-
num_examples: 587
|
425 |
-
- name: test
|
426 |
-
num_bytes: 33175
|
427 |
-
num_examples: 280
|
428 |
-
- name: validation
|
429 |
-
num_bytes: 7661
|
430 |
-
num_examples: 66
|
431 |
-
download_size: 102062
|
432 |
-
dataset_size: 109534
|
433 |
-
- config_name: stance_atheism
|
434 |
-
features:
|
435 |
-
- name: text
|
436 |
-
dtype: string
|
437 |
-
- name: label
|
438 |
-
dtype:
|
439 |
-
class_label:
|
440 |
-
names:
|
441 |
-
0: none
|
442 |
-
1: against
|
443 |
-
2: favor
|
444 |
-
splits:
|
445 |
-
- name: train
|
446 |
-
num_bytes: 54779
|
447 |
-
num_examples: 461
|
448 |
-
- name: test
|
449 |
-
num_bytes: 25720
|
450 |
-
num_examples: 220
|
451 |
-
- name: validation
|
452 |
-
num_bytes: 6324
|
453 |
-
num_examples: 52
|
454 |
-
download_size: 80947
|
455 |
-
dataset_size: 86823
|
456 |
-
- config_name: stance_climate
|
457 |
-
features:
|
458 |
-
- name: text
|
459 |
-
dtype: string
|
460 |
-
- name: label
|
461 |
-
dtype:
|
462 |
-
class_label:
|
463 |
-
names:
|
464 |
-
0: none
|
465 |
-
1: against
|
466 |
-
2: favor
|
467 |
-
splits:
|
468 |
-
- name: train
|
469 |
-
num_bytes: 40253
|
470 |
-
num_examples: 355
|
471 |
-
- name: test
|
472 |
-
num_bytes: 19929
|
473 |
-
num_examples: 169
|
474 |
-
- name: validation
|
475 |
-
num_bytes: 4805
|
476 |
-
num_examples: 40
|
477 |
-
download_size: 60463
|
478 |
-
dataset_size: 64987
|
479 |
-
- config_name: stance_feminist
|
480 |
-
features:
|
481 |
-
- name: text
|
482 |
-
dtype: string
|
483 |
-
- name: label
|
484 |
-
dtype:
|
485 |
-
class_label:
|
486 |
-
names:
|
487 |
-
0: none
|
488 |
-
1: against
|
489 |
-
2: favor
|
490 |
-
splits:
|
491 |
-
- name: train
|
492 |
-
num_bytes: 70513
|
493 |
-
num_examples: 597
|
494 |
-
- name: test
|
495 |
-
num_bytes: 33309
|
496 |
-
num_examples: 285
|
497 |
-
- name: validation
|
498 |
-
num_bytes: 8039
|
499 |
-
num_examples: 67
|
500 |
-
download_size: 104257
|
501 |
-
dataset_size: 111861
|
502 |
-
- config_name: stance_hillary
|
503 |
-
features:
|
504 |
-
- name: text
|
505 |
-
dtype: string
|
506 |
-
- name: label
|
507 |
-
dtype:
|
508 |
-
class_label:
|
509 |
-
names:
|
510 |
-
0: none
|
511 |
-
1: against
|
512 |
-
2: favor
|
513 |
-
splits:
|
514 |
-
- name: train
|
515 |
-
num_bytes: 69600
|
516 |
-
num_examples: 620
|
517 |
-
- name: test
|
518 |
-
num_bytes: 34491
|
519 |
-
num_examples: 295
|
520 |
-
- name: validation
|
521 |
-
num_bytes: 7536
|
522 |
-
num_examples: 69
|
523 |
-
download_size: 103745
|
524 |
-
dataset_size: 111627
|
525 |
-
---
|
526 |
-
|
527 |
-
# Dataset Card for tweet_eval
|
528 |
-
|
529 |
-
## Table of Contents
|
530 |
-
- [Dataset Description](#dataset-description)
|
531 |
-
- [Dataset Summary](#dataset-summary)
|
532 |
-
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
533 |
-
- [Languages](#languages)
|
534 |
-
- [Dataset Structure](#dataset-structure)
|
535 |
-
- [Data Instances](#data-instances)
|
536 |
-
- [Data Fields](#data-fields)
|
537 |
-
- [Data Splits](#data-splits)
|
538 |
-
- [Dataset Creation](#dataset-creation)
|
539 |
-
- [Curation Rationale](#curation-rationale)
|
540 |
-
- [Source Data](#source-data)
|
541 |
-
- [Annotations](#annotations)
|
542 |
-
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
543 |
-
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
544 |
-
- [Social Impact of Dataset](#social-impact-of-dataset)
|
545 |
-
- [Discussion of Biases](#discussion-of-biases)
|
546 |
-
- [Other Known Limitations](#other-known-limitations)
|
547 |
-
- [Additional Information](#additional-information)
|
548 |
-
- [Dataset Curators](#dataset-curators)
|
549 |
-
- [Licensing Information](#licensing-information)
|
550 |
-
- [Citation Information](#citation-information)
|
551 |
-
- [Contributions](#contributions)
|
552 |
-
|
553 |
-
## Dataset Description
|
554 |
-
|
555 |
-
- **Homepage:** [Needs More Information]
|
556 |
-
- **Repository:** [GitHub](https://github.com/cardiffnlp/tweeteval)
|
557 |
-
- **Paper:** [EMNLP Paper](https://arxiv.org/pdf/2010.12421.pdf)
|
558 |
-
- **Leaderboard:** [GitHub Leaderboard](https://github.com/cardiffnlp/tweeteval)
|
559 |
-
- **Point of Contact:** [Needs More Information]
|
560 |
-
|
561 |
-
### Dataset Summary
|
562 |
-
|
563 |
-
TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. The tasks include - irony, hate, offensive, stance, emoji, emotion, and sentiment. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.
|
564 |
-
|
565 |
-
### Supported Tasks and Leaderboards
|
566 |
-
|
567 |
-
- `text_classification`: The dataset can be trained using a SentenceClassification model from HuggingFace transformers.
|
568 |
-
|
569 |
-
### Languages
|
570 |
-
|
571 |
-
The text in the dataset is in English, as spoken by Twitter users.
|
572 |
-
|
573 |
-
## Dataset Structure
|
574 |
-
|
575 |
-
### Data Instances
|
576 |
-
|
577 |
-
An instance from `emoji` config:
|
578 |
-
|
579 |
-
```
|
580 |
-
{'label': 12, 'text': 'Sunday afternoon walking through Venice in the sun with @user οΈ οΈ οΈ @ Abbot Kinney, Venice'}
|
581 |
-
```
|
582 |
-
|
583 |
-
An instance from `emotion` config:
|
584 |
-
|
585 |
-
```
|
586 |
-
{'label': 2, 'text': "βWorry is a down payment on a problem you may never have'. \xa0Joyce Meyer. #motivation #leadership #worry"}
|
587 |
-
```
|
588 |
-
|
589 |
-
An instance from `hate` config:
|
590 |
-
|
591 |
-
```
|
592 |
-
{'label': 0, 'text': '@user nice new signage. Are you not concerned by Beatlemania -style hysterical crowds crongregating on youβ¦'}
|
593 |
-
```
|
594 |
-
|
595 |
-
An instance from `irony` config:
|
596 |
-
|
597 |
-
```
|
598 |
-
{'label': 1, 'text': 'seeing ppl walking w/ crutches makes me really excited for the next 3 weeks of my life'}
|
599 |
-
```
|
600 |
-
|
601 |
-
An instance from `offensive` config:
|
602 |
-
|
603 |
-
```
|
604 |
-
{'label': 0, 'text': '@user Bono... who cares. Soon people will understand that they gain nothing from following a phony celebrity. Become a Leader of your people instead or help and support your fellow countrymen.'}
|
605 |
-
```
|
606 |
-
|
607 |
-
An instance from `sentiment` config:
|
608 |
-
|
609 |
-
```
|
610 |
-
{'label': 2, 'text': '"QT @user In the original draft of the 7th book, Remus Lupin survived the Battle of Hogwarts. #HappyBirthdayRemusLupin"'}
|
611 |
-
```
|
612 |
-
|
613 |
-
An instance from `stance_abortion` config:
|
614 |
-
|
615 |
-
```
|
616 |
-
{'label': 1, 'text': 'we remind ourselves that love means to be willing to give until it hurts - Mother Teresa'}
|
617 |
-
```
|
618 |
-
|
619 |
-
An instance from `stance_atheism` config:
|
620 |
-
|
621 |
-
```
|
622 |
-
{'label': 1, 'text': '@user Bless Almighty God, Almighty Holy Spirit and the Messiah. #SemST'}
|
623 |
-
```
|
624 |
-
|
625 |
-
An instance from `stance_climate` config:
|
626 |
-
|
627 |
-
```
|
628 |
-
{'label': 0, 'text': 'Why Is The Pope Upset? via @user #UnzippedTruth #PopeFrancis #SemST'}
|
629 |
-
```
|
630 |
-
|
631 |
-
An instance from `stance_feminist` config:
|
632 |
-
|
633 |
-
```
|
634 |
-
{'label': 1, 'text': "@user @user is the UK's answer to @user and @user #GamerGate #SemST"}
|
635 |
-
```
|
636 |
-
|
637 |
-
An instance from `stance_hillary` config:
|
638 |
-
|
639 |
-
```
|
640 |
-
{'label': 1, 'text': "If a man demanded staff to get him an ice tea he'd be called a sexists elitist pig.. Oink oink #Hillary #SemST"}
|
641 |
-
```
|
642 |
-
|
643 |
-
### Data Fields
|
644 |
-
For `emoji` config:
|
645 |
-
|
646 |
-
- `text`: a `string` feature containing the tweet.
|
647 |
-
|
648 |
-
- `label`: an `int` classification label with the following mapping:
|
649 |
-
|
650 |
-
`0`: β€
|
651 |
-
|
652 |
-
`1`: π
|
653 |
-
|
654 |
-
`2`: π
|
655 |
-
|
656 |
-
`3`: π
|
657 |
-
|
658 |
-
`4`: π₯
|
659 |
-
|
660 |
-
`5`: π
|
661 |
-
|
662 |
-
`6`: π
|
663 |
-
|
664 |
-
`7`: β¨
|
665 |
-
|
666 |
-
`8`: π
|
667 |
-
|
668 |
-
`9`: π
|
669 |
-
|
670 |
-
`10`: π·
|
671 |
-
|
672 |
-
`11`: πΊπΈ
|
673 |
-
|
674 |
-
`12`: β
|
675 |
-
|
676 |
-
`13`: π
|
677 |
-
|
678 |
-
`14`: π
|
679 |
-
|
680 |
-
`15`: π―
|
681 |
-
|
682 |
-
`16`: π
|
683 |
-
|
684 |
-
`17`: π
|
685 |
-
|
686 |
-
`18`: πΈ
|
687 |
-
|
688 |
-
`19`: π
|
689 |
-
|
690 |
-
For `emotion` config:
|
691 |
-
|
692 |
-
- `text`: a `string` feature containing the tweet.
|
693 |
-
|
694 |
-
- `label`: an `int` classification label with the following mapping:
|
695 |
-
|
696 |
-
`0`: anger
|
697 |
-
|
698 |
-
`1`: joy
|
699 |
-
|
700 |
-
`2`: optimism
|
701 |
-
|
702 |
-
`3`: sadness
|
703 |
-
|
704 |
-
For `hate` config:
|
705 |
-
|
706 |
-
- `text`: a `string` feature containing the tweet.
|
707 |
-
|
708 |
-
- `label`: an `int` classification label with the following mapping:
|
709 |
-
|
710 |
-
`0`: non-hate
|
711 |
-
|
712 |
-
`1`: hate
|
713 |
-
|
714 |
-
For `irony` config:
|
715 |
-
|
716 |
-
- `text`: a `string` feature containing the tweet.
|
717 |
-
|
718 |
-
- `label`: an `int` classification label with the following mapping:
|
719 |
-
|
720 |
-
`0`: non_irony
|
721 |
-
|
722 |
-
`1`: irony
|
723 |
-
|
724 |
-
For `offensive` config:
|
725 |
-
|
726 |
-
- `text`: a `string` feature containing the tweet.
|
727 |
-
|
728 |
-
- `label`: an `int` classification label with the following mapping:
|
729 |
-
|
730 |
-
`0`: non-offensive
|
731 |
-
|
732 |
-
`1`: offensive
|
733 |
-
|
734 |
-
For `sentiment` config:
|
735 |
-
|
736 |
-
- `text`: a `string` feature containing the tweet.
|
737 |
-
|
738 |
-
- `label`: an `int` classification label with the following mapping:
|
739 |
-
|
740 |
-
`0`: negative
|
741 |
-
|
742 |
-
`1`: neutral
|
743 |
-
|
744 |
-
`2`: positive
|
745 |
-
|
746 |
-
For `stance_abortion` config:
|
747 |
-
|
748 |
-
- `text`: a `string` feature containing the tweet.
|
749 |
-
|
750 |
-
- `label`: an `int` classification label with the following mapping:
|
751 |
-
|
752 |
-
`0`: none
|
753 |
-
|
754 |
-
`1`: against
|
755 |
-
|
756 |
-
`2`: favor
|
757 |
-
|
758 |
-
For `stance_atheism` config:
|
759 |
-
|
760 |
-
- `text`: a `string` feature containing the tweet.
|
761 |
-
|
762 |
-
- `label`: an `int` classification label with the following mapping:
|
763 |
-
|
764 |
-
`0`: none
|
765 |
-
|
766 |
-
`1`: against
|
767 |
-
|
768 |
-
`2`: favor
|
769 |
-
|
770 |
-
For `stance_climate` config:
|
771 |
-
|
772 |
-
- `text`: a `string` feature containing the tweet.
|
773 |
-
|
774 |
-
- `label`: an `int` classification label with the following mapping:
|
775 |
-
|
776 |
-
`0`: none
|
777 |
-
|
778 |
-
`1`: against
|
779 |
-
|
780 |
-
`2`: favor
|
781 |
-
|
782 |
-
For `stance_feminist` config:
|
783 |
-
|
784 |
-
- `text`: a `string` feature containing the tweet.
|
785 |
-
|
786 |
-
- `label`: an `int` classification label with the following mapping:
|
787 |
-
|
788 |
-
`0`: none
|
789 |
-
|
790 |
-
`1`: against
|
791 |
-
|
792 |
-
`2`: favor
|
793 |
-
|
794 |
-
For `stance_hillary` config:
|
795 |
-
|
796 |
-
- `text`: a `string` feature containing the tweet.
|
797 |
-
|
798 |
-
- `label`: an `int` classification label with the following mapping:
|
799 |
-
|
800 |
-
`0`: none
|
801 |
-
|
802 |
-
`1`: against
|
803 |
-
|
804 |
-
`2`: favor
|
805 |
-
|
806 |
-
|
807 |
-
|
808 |
-
### Data Splits
|
809 |
-
|
810 |
-
| name | train | validation | test |
|
811 |
-
| --------------- | ----- | ---------- | ----- |
|
812 |
-
| emoji | 45000 | 5000 | 50000 |
|
813 |
-
| emotion | 3257 | 374 | 1421 |
|
814 |
-
| hate | 9000 | 1000 | 2970 |
|
815 |
-
| irony | 2862 | 955 | 784 |
|
816 |
-
| offensive | 11916 | 1324 | 860 |
|
817 |
-
| sentiment | 45615 | 2000 | 12284 |
|
818 |
-
| stance_abortion | 587 | 66 | 280 |
|
819 |
-
| stance_atheism | 461 | 52 | 220 |
|
820 |
-
| stance_climate | 355 | 40 | 169 |
|
821 |
-
| stance_feminist | 597 | 67 | 285 |
|
822 |
-
| stance_hillary | 620 | 69 | 295 |
|
823 |
-
|
824 |
-
## Dataset Creation
|
825 |
-
|
826 |
-
### Curation Rationale
|
827 |
-
|
828 |
-
[Needs More Information]
|
829 |
-
|
830 |
-
### Source Data
|
831 |
-
|
832 |
-
#### Initial Data Collection and Normalization
|
833 |
-
|
834 |
-
[Needs More Information]
|
835 |
-
|
836 |
-
#### Who are the source language producers?
|
837 |
-
|
838 |
-
[Needs More Information]
|
839 |
-
|
840 |
-
### Annotations
|
841 |
-
|
842 |
-
#### Annotation process
|
843 |
-
|
844 |
-
[Needs More Information]
|
845 |
-
|
846 |
-
#### Who are the annotators?
|
847 |
-
|
848 |
-
[Needs More Information]
|
849 |
-
|
850 |
-
### Personal and Sensitive Information
|
851 |
-
|
852 |
-
[Needs More Information]
|
853 |
-
|
854 |
-
## Considerations for Using the Data
|
855 |
-
|
856 |
-
### Social Impact of Dataset
|
857 |
-
|
858 |
-
[Needs More Information]
|
859 |
-
|
860 |
-
### Discussion of Biases
|
861 |
-
|
862 |
-
[Needs More Information]
|
863 |
-
|
864 |
-
### Other Known Limitations
|
865 |
-
|
866 |
-
[Needs More Information]
|
867 |
-
|
868 |
-
## Additional Information
|
869 |
-
|
870 |
-
### Dataset Curators
|
871 |
-
|
872 |
-
Francesco Barbieri, Jose Camacho-Collados, Luis Espiinosa-Anke and Leonardo Neves through Cardiff NLP.
|
873 |
-
|
874 |
-
### Licensing Information
|
875 |
-
|
876 |
-
This is not a single dataset, therefore each subset has its own license (the collection itself does not have additional restrictions).
|
877 |
-
|
878 |
-
All of the datasets require complying with Twitter [Terms Of Service](https://twitter.com/tos) and Twitter API [Terms Of Service](https://developer.twitter.com/en/developer-terms/agreement-and-policy)
|
879 |
-
|
880 |
-
Additionally the license are:
|
881 |
-
- emoji: Undefined
|
882 |
-
- emotion(EmoInt): Undefined
|
883 |
-
- hate (HateEval): Need permission [here](http://hatespeech.di.unito.it/hateval.html)
|
884 |
-
- irony: Undefined
|
885 |
-
- Offensive: Undefined
|
886 |
-
- Sentiment: [Creative Commons Attribution 3.0 Unported License](https://groups.google.com/g/semevaltweet/c/k5DDcvVb_Vo/m/zEOdECFyBQAJ)
|
887 |
-
- Stance: Undefined
|
888 |
-
|
889 |
-
|
890 |
-
### Citation Information
|
891 |
-
|
892 |
-
```
|
893 |
-
@inproceedings{barbieri2020tweeteval,
|
894 |
-
title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},
|
895 |
-
author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},
|
896 |
-
booktitle={Proceedings of Findings of EMNLP},
|
897 |
-
year={2020}
|
898 |
-
}
|
899 |
-
```
|
900 |
-
|
901 |
-
If you use any of the TweetEval datasets, please cite their original publications:
|
902 |
-
|
903 |
-
#### Emotion Recognition:
|
904 |
-
```
|
905 |
-
@inproceedings{mohammad2018semeval,
|
906 |
-
title={Semeval-2018 task 1: Affect in tweets},
|
907 |
-
author={Mohammad, Saif and Bravo-Marquez, Felipe and Salameh, Mohammad and Kiritchenko, Svetlana},
|
908 |
-
booktitle={Proceedings of the 12th international workshop on semantic evaluation},
|
909 |
-
pages={1--17},
|
910 |
-
year={2018}
|
911 |
-
}
|
912 |
-
|
913 |
-
```
|
914 |
-
#### Emoji Prediction:
|
915 |
-
```
|
916 |
-
@inproceedings{barbieri2018semeval,
|
917 |
-
title={Semeval 2018 task 2: Multilingual emoji prediction},
|
918 |
-
author={Barbieri, Francesco and Camacho-Collados, Jose and Ronzano, Francesco and Espinosa-Anke, Luis and
|
919 |
-
Ballesteros, Miguel and Basile, Valerio and Patti, Viviana and Saggion, Horacio},
|
920 |
-
booktitle={Proceedings of The 12th International Workshop on Semantic Evaluation},
|
921 |
-
pages={24--33},
|
922 |
-
year={2018}
|
923 |
-
}
|
924 |
-
```
|
925 |
-
|
926 |
-
#### Irony Detection:
|
927 |
-
```
|
928 |
-
@inproceedings{van2018semeval,
|
929 |
-
title={Semeval-2018 task 3: Irony detection in english tweets},
|
930 |
-
author={Van Hee, Cynthia and Lefever, Els and Hoste, V{\'e}ronique},
|
931 |
-
booktitle={Proceedings of The 12th International Workshop on Semantic Evaluation},
|
932 |
-
pages={39--50},
|
933 |
-
year={2018}
|
934 |
-
}
|
935 |
-
```
|
936 |
-
|
937 |
-
#### Hate Speech Detection:
|
938 |
-
```
|
939 |
-
@inproceedings{basile-etal-2019-semeval,
|
940 |
-
title = "{S}em{E}val-2019 Task 5: Multilingual Detection of Hate Speech Against Immigrants and Women in {T}witter",
|
941 |
-
author = "Basile, Valerio and Bosco, Cristina and Fersini, Elisabetta and Nozza, Debora and Patti, Viviana and
|
942 |
-
Rangel Pardo, Francisco Manuel and Rosso, Paolo and Sanguinetti, Manuela",
|
943 |
-
booktitle = "Proceedings of the 13th International Workshop on Semantic Evaluation",
|
944 |
-
year = "2019",
|
945 |
-
address = "Minneapolis, Minnesota, USA",
|
946 |
-
publisher = "Association for Computational Linguistics",
|
947 |
-
url = "https://www.aclweb.org/anthology/S19-2007",
|
948 |
-
doi = "10.18653/v1/S19-2007",
|
949 |
-
pages = "54--63"
|
950 |
-
}
|
951 |
-
```
|
952 |
-
#### Offensive Language Identification:
|
953 |
-
```
|
954 |
-
@inproceedings{zampieri2019semeval,
|
955 |
-
title={SemEval-2019 Task 6: Identifying and Categorizing Offensive Language in Social Media (OffensEval)},
|
956 |
-
author={Zampieri, Marcos and Malmasi, Shervin and Nakov, Preslav and Rosenthal, Sara and Farra, Noura and Kumar, Ritesh},
|
957 |
-
booktitle={Proceedings of the 13th International Workshop on Semantic Evaluation},
|
958 |
-
pages={75--86},
|
959 |
-
year={2019}
|
960 |
-
}
|
961 |
-
```
|
962 |
-
|
963 |
-
#### Sentiment Analysis:
|
964 |
-
```
|
965 |
-
@inproceedings{rosenthal2017semeval,
|
966 |
-
title={SemEval-2017 task 4: Sentiment analysis in Twitter},
|
967 |
-
author={Rosenthal, Sara and Farra, Noura and Nakov, Preslav},
|
968 |
-
booktitle={Proceedings of the 11th international workshop on semantic evaluation (SemEval-2017)},
|
969 |
-
pages={502--518},
|
970 |
-
year={2017}
|
971 |
-
}
|
972 |
-
```
|
973 |
-
|
974 |
-
#### Stance Detection:
|
975 |
-
```
|
976 |
-
@inproceedings{mohammad2016semeval,
|
977 |
-
title={Semeval-2016 task 6: Detecting stance in tweets},
|
978 |
-
author={Mohammad, Saif and Kiritchenko, Svetlana and Sobhani, Parinaz and Zhu, Xiaodan and Cherry, Colin},
|
979 |
-
booktitle={Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)},
|
980 |
-
pages={31--41},
|
981 |
-
year={2016}
|
982 |
-
}
|
983 |
-
```
|
984 |
-
|
985 |
-
### Contributions
|
986 |
-
|
987 |
-
Thanks to [@gchhablani](https://github.com/gchhablani) and [@abhishekkrthakur](https://github.com/abhishekkrthakur) for adding this dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"emoji": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 20, "names": ["\u2764", "\ud83d\ude0d", "\ud83d\ude02", "\ud83d\udc95", "\ud83d\udd25", "\ud83d\ude0a", "\ud83d\ude0e", "\u2728", "\ud83d\udc99", "\ud83d\ude18", "\ud83d\udcf7", "\ud83c\uddfa\ud83c\uddf8", "\u2600", "\ud83d\udc9c", "\ud83d\ude09", "\ud83d\udcaf", "\ud83d\ude01", "\ud83c\udf84", "\ud83d\udcf8", "\ud83d\ude1c"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "emoji", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3803187, "num_examples": 45000, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 4255921, "num_examples": 50000, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 396083, "num_examples": 5000, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/train_text.txt": {"num_bytes": 3353167, "checksum": "eacb6b0ee1fe2803d72a009c2e731fe07659f604318a979951d2f07c23c564a1"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/train_labels.txt": {"num_bytes": 102760, "checksum": "daee7da826683dbfa50ad3a29c60bc527e498f06c70eabee3745a99cc37ab3a5"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/test_text.txt": {"num_bytes": 3705901, "checksum": "e4de11de1597842c431dd67868e83322f5a432564dfd8558889ed8ac6a1a5e09"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/test_labels.txt": {"num_bytes": 114435, "checksum": "c1662b84788f36674ab8f0106f3e2e7d3e258ddf4959086ac7cc75b1e68dd1f6"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/val_text.txt": {"num_bytes": 341079, "checksum": "3bc3742d6af404cea792671878684d110f3bc02fd79a2e34643789a521d81a26"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/val_labels.txt": {"num_bytes": 11379, "checksum": "21ba456f688668d049ff0fb1fa04469ee684cf4e2467c71d2c3fe5ca2ba1bd1a"}}, "download_size": 7628721, "post_processing_size": null, "dataset_size": 8455191, "size_in_bytes": 16083912}, "emotion": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["anger", "joy", "optimism", "sadness"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "emotion", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 338875, "num_examples": 3257, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 146649, "num_examples": 1421, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 38277, "num_examples": 374, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/train_text.txt": {"num_bytes": 306630, "checksum": "2c62f67aeb3eac1aea0e5a9c3d0f4bc337992581f3f858061786a1fb4d79d95e"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/train_labels.txt": {"num_bytes": 6514, "checksum": "987e767d8679e18abdf7de37a6d2bcd0a40a296ddd704e8d515cf0e3033c8d9c"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/test_text.txt": {"num_bytes": 132523, "checksum": "7e1070f5d3e3fcece5bc73680bff9981e90d8f7b2f1009bfe7a01d059d1c6091"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/test_labels.txt": {"num_bytes": 2842, "checksum": "245072348c711961785be6d395997f97cf7fcda3effeae7805664171dc75f913"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/val_text.txt": {"num_bytes": 34556, "checksum": "e2e30c86b8cbb97944d6543aedc06eace3bb275cb2f381aba787b838b4f23ca5"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/val_labels.txt": {"num_bytes": 748, "checksum": "313730630160b7e0a6b4235b800c76683f4aeeb72d094eb69646630cd5cfe338"}}, "download_size": 483813, "post_processing_size": null, "dataset_size": 523801, "size_in_bytes": 1007614}, "hate": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["non-hate", "hate"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "hate", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1223654, "num_examples": 9000, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 428938, "num_examples": 2970, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 154148, "num_examples": 1000, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/train_text.txt": {"num_bytes": 1133852, "checksum": "6572bb3a42143128a5dfa99af8debeb0668e637c34b2d1e3140dac47316fe2c2"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/train_labels.txt": {"num_bytes": 18000, "checksum": "4e8fde025a453a25c94632794254131dedeac4e57228ad64157c41571cc88f71"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/test_text.txt": {"num_bytes": 399242, "checksum": "bc4762876a8dd8baa55c3cd7b03108e3231a5d691e80b8b1ef97c5be31b9da9a"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/test_labels.txt": {"num_bytes": 5940, "checksum": "c14adca6b3627616a835c5ccea8a1cceb0235cd79417257f093eb0e16a69c62f"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/val_text.txt": {"num_bytes": 144174, "checksum": "1ff78b1ed4c5ce43284b9eba32eb7d60c6d45d0d1b3b4d6df456ae01640764f1"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/val_labels.txt": {"num_bytes": 2000, "checksum": "5092badf1a0e70036ea6264bcd0b78afc07d0f4a512fa6af34c2c4973600656b"}}, "download_size": 1703208, "post_processing_size": null, "dataset_size": 1806740, "size_in_bytes": 3509948}, "irony": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["non_irony", "irony"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "irony", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 259191, "num_examples": 2862, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 75901, "num_examples": 784, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 86021, "num_examples": 955, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/train_text.txt": {"num_bytes": 231594, "checksum": "a888125a44f7dfaa25b026318748d0e62cc9a300d20f66eafd62011a19eaea23"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/train_labels.txt": {"num_bytes": 5724, "checksum": "fc69e6106c0f1f433a91536e08f83c71a391d7b219f7684d42f243a8089af77d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/test_text.txt": {"num_bytes": 68057, "checksum": "53103da934a7308eee82f05f2a9781a8ea3e88604fdc1e02d3101108505c64be"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/test_labels.txt": {"num_bytes": 1568, "checksum": "08e2095e1725e74907a380614c220204e356bb46e3e8c93deb74e83e5b15ab38"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/val_text.txt": {"num_bytes": 76760, "checksum": "8806cf3793e300a485cfae34892fc3a0a2f9a183deb06c750c6531515c83051e"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/val_labels.txt": {"num_bytes": 1910, "checksum": "ccf429f63b4e8d0e7f425ca09445f7c31f7cea8a1b7c283b015b117c4002fd07"}}, "download_size": 385613, "post_processing_size": null, "dataset_size": 421113, "size_in_bytes": 806726}, "offensive": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["non-offensive", "offensive"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "offensive", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1648069, "num_examples": 11916, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 135477, "num_examples": 860, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 192421, "num_examples": 1324, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/train_text.txt": {"num_bytes": 1529074, "checksum": "78a7a32e38b10af7d8970b008bf17f661c8d0a90dad145fa0fa6a944669650db"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/train_labels.txt": {"num_bytes": 23832, "checksum": "c0b7d6ebdaa4ebcf6fc557ef1e775d92eda160218a0e3b1dd48eb8234dc892a6"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/test_text.txt": {"num_bytes": 126921, "checksum": "25b08c3333c26190f1023961c4508ec9aab24d4722b1a3ea7a6040724c120547"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/test_labels.txt": {"num_bytes": 1720, "checksum": "41d05a7aa0b01f5dafab21b95adb4f979cb4226c046ff315702774d10dac1605"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/val_text.txt": {"num_bytes": 179188, "checksum": "816f36d180c35f15a5104838cb73856a0bef42043482fe738f3481b06242a55c"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/val_labels.txt": {"num_bytes": 2648, "checksum": "ed2deb776bd1c52fb8221fadd3360e32d9dfe46842d78053528126e46363a258"}}, "download_size": 1863383, "post_processing_size": null, "dataset_size": 1975967, "size_in_bytes": 3839350}, "sentiment": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["negative", "neutral", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "sentiment", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5425142, "num_examples": 45615, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 1279548, "num_examples": 12284, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 239088, "num_examples": 2000, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/train_text.txt": {"num_bytes": 4970029, "checksum": "368f01052ea6fd8ffc408a2a2e6ac9669e31542581a0396ef16591ea26eb98a6"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/train_labels.txt": {"num_bytes": 91230, "checksum": "122bfb1732fb6995b0e5c5f726c0ba457c469c3b6e60513007ce5037f23e65d4"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/test_text.txt": {"num_bytes": 1156877, "checksum": "09a93a55c63fd93f97485ef7302889d7edb4091cd49733aa37da094f0bfa0675"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/test_labels.txt": {"num_bytes": 24568, "checksum": "6afb4afe9374d1f983bcf9a7c79b108d0f37fdf020a83f30488309bed215db9d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/val_text.txt": {"num_bytes": 219137, "checksum": "e5b021e6fc45064c260b09814b803d8f56cada519c4d952d72f43d48a350a964"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/val_labels.txt": {"num_bytes": 4000, "checksum": "b4566926c72e2e4e2916c864def94e76c4cdde52446af2c7ba4fc2006e057e51"}}, "download_size": 6465841, "post_processing_size": null, "dataset_size": 6943778, "size_in_bytes": 13409619}, "stance_abortion": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_abortion", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 68698, "num_examples": 587, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 33175, "num_examples": 280, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 7661, "num_examples": 66, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/train_text.txt": {"num_bytes": 62828, "checksum": "a421d5b8fd9f972970b9275b83f65745bf81986d2a412b4caa2ba071f3efa916"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/train_labels.txt": {"num_bytes": 1174, "checksum": "e6786a594bd9a083c524a0f420c690351140b52af288f487cb4772d29675b014"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/test_text.txt": {"num_bytes": 30371, "checksum": "bf0e16a0b8ca4cf0ab90efbc560db3151c288fc842f5e3c6554e8589d521556a"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/test_labels.txt": {"num_bytes": 560, "checksum": "c90e6d36d863f876d6661620d37b613b4b07858a5277c8d6623713ee59ca451c"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/val_text.txt": {"num_bytes": 6997, "checksum": "0428ab3f2894936f2445a9020763c2bd19ed42986872168bb65886dede5843fd"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/val_labels.txt": {"num_bytes": 132, "checksum": "8df57a50823d5f3683ecf75d824a42e3b08eb52e25e3e2d6928f523097a0c050"}}, "download_size": 102062, "post_processing_size": null, "dataset_size": 109534, "size_in_bytes": 211596}, "stance_atheism": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_atheism", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 54779, "num_examples": 461, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 25720, "num_examples": 220, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 6324, "num_examples": 52, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/train_text.txt": {"num_bytes": 50165, "checksum": "0e82f1d4a16d79a38a68aee761762cf8a846bc8f7f9395670ca44e2ecf2f58f7"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/train_labels.txt": {"num_bytes": 922, "checksum": "a764aac1a75ccb32c4ffc4c03c66dc365cb50f013d3e94549bf775636cbc8373"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/test_text.txt": {"num_bytes": 23516, "checksum": "16c5336b2cba606ca63a6afcc50241be63a8fccf021628c6505449439b9d54b3"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/test_labels.txt": {"num_bytes": 440, "checksum": "4ef7c9398d265cfac625092c834e43cef9da9cb318e563493abb64f65dfe1b52"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/val_text.txt": {"num_bytes": 5800, "checksum": "5fe14c4c01f87a45dba640dddbb1d1909a893f9565f159c48fa1ba35bb46c209"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/val_labels.txt": {"num_bytes": 104, "checksum": "638095b3582f927fd1481cdb8d1f9f670f8d27880baf32c0b26c5946fd8f8292"}}, "download_size": 80947, "post_processing_size": null, "dataset_size": 86823, "size_in_bytes": 167770}, "stance_climate": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_climate", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 40253, "num_examples": 355, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 19929, "num_examples": 169, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 4805, "num_examples": 40, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_text.txt": {"num_bytes": 36699, "checksum": "4803211832d318026323a8e5014cff1b95e1c8c3854378101e5d1a8c82582eb7"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_labels.txt": {"num_bytes": 710, "checksum": "d6274f55bc95f5a7f2ae591b886c1414a7664aaf4e0c609f4ba6cf377929af18"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_text.txt": {"num_bytes": 18235, "checksum": "41ee8ee2ad3c36e0629654fdb271f37775197c79be8b299adbeadd2003b63c53"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_labels.txt": {"num_bytes": 338, "checksum": "193c9f2358f61d9efe558324ec89ecaf08e600a44b68128f47838c01d9f98dfd"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_text.txt": {"num_bytes": 4401, "checksum": "fc5714703add266801ee2fd98296ea20ec0879e89cdb9f906d9812d9f640f2ba"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_labels.txt": {"num_bytes": 80, "checksum": "0cb133ab9b137292f075210db45f7e293dc52798a4e21e59037bfcfe66c97aa6"}}, "download_size": 60463, "post_processing_size": null, "dataset_size": 64987, "size_in_bytes": 125450}, "stance_feminist": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_feminist", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 70513, "num_examples": 597, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 33309, "num_examples": 285, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 8039, "num_examples": 67, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_text.txt": {"num_bytes": 64539, "checksum": "c176e6663973c8e78bfa92ba1e8874a70cc5358567d71584a90943bc6525eaab"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_labels.txt": {"num_bytes": 1194, "checksum": "abd4f196d801423bb0daba8c0ecf5b3efba1f10e8f410c3dfa360b50c8b9c685"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_text.txt": {"num_bytes": 30455, "checksum": "1bfdbdc2af64fd62dcc775d1288e192ac8ff805ef27ccf3aaac54a98616eefda"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_labels.txt": {"num_bytes": 570, "checksum": "ddbde6d253ee47c5d5ef8bc5386270fde45cf088d3be70bba9c382b8a024897a"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_text.txt": {"num_bytes": 7365, "checksum": "3518b2ddcf696626a7243d7cea720a975718c7a52a5a086931be87897c1de58b"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_labels.txt": {"num_bytes": 134, "checksum": "399e0d468d0e4ead7a445f69efdf35876c835acf4cefc00a16f451a5d42e5c13"}}, "download_size": 104257, "post_processing_size": null, "dataset_size": 111861, "size_in_bytes": 216118}, "stance_hillary": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_hillary", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 69600, "num_examples": 620, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 34491, "num_examples": 295, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 7536, "num_examples": 69, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_text.txt": {"num_bytes": 63398, "checksum": "0bd735de895cb74d63c224e64e3d955cac99be97aa225f803fe4d2f5978a2c99"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_labels.txt": {"num_bytes": 1240, "checksum": "0ea5753d13a717a9e91581d1d89c0b5206c8f905f0a717b2b27d02dbf419250d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_text.txt": {"num_bytes": 31537, "checksum": "5c4e020285a62cfd88f264849e1db242ded356c171b1a68dd0050b76635053aa"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_labels.txt": {"num_bytes": 590, "checksum": "068468f6a72b85dfb65bf10e45f2453fa082d1ea9d7a40e7f560d5b6d75027f3"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_text.txt": {"num_bytes": 6842, "checksum": "9714b7dcc8617e095433d7b63df8aa155eb84216b9ac9195105ab83d85cd248d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_labels.txt": {"num_bytes": 138, "checksum": "e5d44c771b7349a4a74309f56ca072fdf8f1c015068d519ca2ed3a931c833606"}}, "download_size": 103745, "post_processing_size": null, "dataset_size": 111627, "size_in_bytes": 215372}}
|
|
|
|
emoji/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e0163905e7235a1abcbd0f5781993c8a932aa13bb78db6578f5d7f03f6459f14
|
3 |
+
size 3047340
|
emoji/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05a726d556b0e17ab434cb43f531805363f8b26a5a96492d792fa192390d607b
|
3 |
+
size 2609972
|
emoji/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3df64aea4ffe4c8caa840c901adc18c9f754c60ab1dbc66644d20d4d2579e320
|
3 |
+
size 281993
|
emotion/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85030000f0a7288ad2afb4b272a8c4efe8d2f40edad90078de8d064647d88cf0
|
3 |
+
size 105420
|
emotion/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a68d61d65c2abbd5ae9d5807758b45fd1b03e17a62f44b245f6db532dc39cd8b
|
3 |
+
size 233003
|
emotion/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:953c3d7141b3425888b182e4d51fcc070be3ca529c8df3132ead0f0fb97785c5
|
3 |
+
size 28590
|
hate/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abec003e21016abb0ad27d52d34494e89e389ceff9f1dcc6a9c4c6b47fb8efb4
|
3 |
+
size 277845
|
hate/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef06c882e4211f4e8031c98ab45b9f87a89c4075234283a093b510b2a163d581
|
3 |
+
size 815721
|
hate/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:645318c59bb2ebc2ccefef3c937ed19b3483c7af92ea41437093d9d16923f0e1
|
3 |
+
size 102777
|
irony/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2ffe0d79c90cfe4f95ff2b59fd5686237d1293f820233cec01f20f04a1cbdeeb
|
3 |
+
size 53983
|
irony/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1de97a3b1c4e168e82f6935ad18771889214d6c1c88935225aa9907312a33a27
|
3 |
+
size 182570
|
irony/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2268f2342a81532d239e50b4f76525cc68d7c9d3ff85056278d874d2999eb01c
|
3 |
+
size 61091
|
offensive/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d1211a9fc79ffa2eff099f79e3190d8604ff76262251e278b961bf6cb69d87d7
|
3 |
+
size 93730
|
offensive/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b6f7720430a0a7dfe01687c1956177c34c67979113cfb050869f21502db50d5
|
3 |
+
size 1019131
|
offensive/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0653b8d5ea22e90183c304a1d35a5aa5040e35436e2b09a5957bacc73ddcfdf9
|
3 |
+
size 121664
|
sentiment/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40d0084ff2345011509781ed52f317e427a23d7b2897b3f65e4b605040dcdbf5
|
3 |
+
size 900552
|
sentiment/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bde3fb4a398fcf9228e7a3274325ec6c7f48edfe514066e9cc6b75d13996385f
|
3 |
+
size 3781981
|
sentiment/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71f9ee60637ec5e910b88174b72f03960f8dc67fa63a68b91230119f1ce5d618
|
3 |
+
size 167139
|
stance_abortion/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b47e3934cc311ecb21ce4470229e7b7ab97945603e46c5ed6760dbe6b41d8625
|
3 |
+
size 22512
|
stance_abortion/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e372eb7412a5c74c0bbf9349e16efa52cdaea522eaffe29811484f7f2aff5e59
|
3 |
+
size 43711
|
stance_abortion/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:870cee37f3ce7342bb5ba6d847f525a47f1ce42eb55d2d05c4803206503d11fc
|
3 |
+
size 7291
|
stance_atheism/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d68af728a95ca7e71133d4dcd52d0761473527976d0b6cd100ce31794765cedd
|
3 |
+
size 19357
|
stance_atheism/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d5a295f9cb717584da6738d679a4d5157719bf5ab061063fdee25d83715fb8d
|
3 |
+
size 36466
|
stance_atheism/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b7f2226730c2fc0acf93cb29482f9fa044b1f8cab85ffa679123f2a2d57933a
|
3 |
+
size 6439
|
stance_climate/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a6f421815161bb5c5740c14b86367afab0df9ef988f58a873406a26975c0d9c4
|
3 |
+
size 14896
|
stance_climate/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8704f73765bba4f3a8c9c53745384d8a5a4158a89ae07f9d345d45aa0a0f110f
|
3 |
+
size 28126
|
stance_climate/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b8eb748c973c5a7f3fe6d7cc5b7a4cc5406ab83a31f7328a6d1f1be0f659b05c
|
3 |
+
size 5468
|
stance_feminist/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:436ce5cdbf5072c9359a3a05a79a82673ac329ddc1aee6b53b4dec5b602c92e2
|
3 |
+
size 23415
|
stance_feminist/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5d1d39992ffd9ff4df053683645da466c5eefed157522328c6fd56cd000b60e
|
3 |
+
size 45300
|
stance_feminist/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78e7e962e257b9d67189f3e0edd452ad20661cd0bcb968191747ee66c20917a8
|
3 |
+
size 7627
|
stance_hillary/tweet_eval-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0cf4926de16e7e7026fda04fa48145bb54325065922494dc9466df2f35f00ab3
|
3 |
+
size 23515
|
stance_hillary/tweet_eval-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c89d0003a040624ee7e630f8af25a7363103ddecbcf1abfac1be3cd7e6734f2
|
3 |
+
size 43296
|
stance_hillary/tweet_eval-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:283a80338f9d59c7150ef6a41345cb903f51142fea4d51ecc5ddcf761d77531e
|
3 |
+
size 7243
|
tweet_eval.py
DELETED
@@ -1,249 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""The Tweet Eval Datasets"""
|
16 |
-
|
17 |
-
|
18 |
-
import datasets
|
19 |
-
|
20 |
-
|
21 |
-
_CITATION = """\
|
22 |
-
@inproceedings{barbieri2020tweeteval,
|
23 |
-
title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},
|
24 |
-
author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},
|
25 |
-
booktitle={Proceedings of Findings of EMNLP},
|
26 |
-
year={2020}
|
27 |
-
}
|
28 |
-
"""
|
29 |
-
|
30 |
-
_DESCRIPTION = """\
|
31 |
-
TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.
|
32 |
-
"""
|
33 |
-
|
34 |
-
_HOMEPAGE = "https://github.com/cardiffnlp/tweeteval"
|
35 |
-
|
36 |
-
_LICENSE = ""
|
37 |
-
|
38 |
-
URL = "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/"
|
39 |
-
|
40 |
-
_URLs = {
|
41 |
-
"emoji": {
|
42 |
-
"train_text": URL + "emoji/train_text.txt",
|
43 |
-
"train_labels": URL + "emoji/train_labels.txt",
|
44 |
-
"test_text": URL + "emoji/test_text.txt",
|
45 |
-
"test_labels": URL + "emoji/test_labels.txt",
|
46 |
-
"val_text": URL + "emoji/val_text.txt",
|
47 |
-
"val_labels": URL + "emoji/val_labels.txt",
|
48 |
-
},
|
49 |
-
"emotion": {
|
50 |
-
"train_text": URL + "emotion/train_text.txt",
|
51 |
-
"train_labels": URL + "emotion/train_labels.txt",
|
52 |
-
"test_text": URL + "emotion/test_text.txt",
|
53 |
-
"test_labels": URL + "emotion/test_labels.txt",
|
54 |
-
"val_text": URL + "emotion/val_text.txt",
|
55 |
-
"val_labels": URL + "emotion/val_labels.txt",
|
56 |
-
},
|
57 |
-
"hate": {
|
58 |
-
"train_text": URL + "hate/train_text.txt",
|
59 |
-
"train_labels": URL + "hate/train_labels.txt",
|
60 |
-
"test_text": URL + "hate/test_text.txt",
|
61 |
-
"test_labels": URL + "hate/test_labels.txt",
|
62 |
-
"val_text": URL + "hate/val_text.txt",
|
63 |
-
"val_labels": URL + "hate/val_labels.txt",
|
64 |
-
},
|
65 |
-
"irony": {
|
66 |
-
"train_text": URL + "irony/train_text.txt",
|
67 |
-
"train_labels": URL + "irony/train_labels.txt",
|
68 |
-
"test_text": URL + "irony/test_text.txt",
|
69 |
-
"test_labels": URL + "irony/test_labels.txt",
|
70 |
-
"val_text": URL + "irony/val_text.txt",
|
71 |
-
"val_labels": URL + "irony/val_labels.txt",
|
72 |
-
},
|
73 |
-
"offensive": {
|
74 |
-
"train_text": URL + "offensive/train_text.txt",
|
75 |
-
"train_labels": URL + "offensive/train_labels.txt",
|
76 |
-
"test_text": URL + "offensive/test_text.txt",
|
77 |
-
"test_labels": URL + "offensive/test_labels.txt",
|
78 |
-
"val_text": URL + "offensive/val_text.txt",
|
79 |
-
"val_labels": URL + "offensive/val_labels.txt",
|
80 |
-
},
|
81 |
-
"sentiment": {
|
82 |
-
"train_text": URL + "sentiment/train_text.txt",
|
83 |
-
"train_labels": URL + "sentiment/train_labels.txt",
|
84 |
-
"test_text": URL + "sentiment/test_text.txt",
|
85 |
-
"test_labels": URL + "sentiment/test_labels.txt",
|
86 |
-
"val_text": URL + "sentiment/val_text.txt",
|
87 |
-
"val_labels": URL + "sentiment/val_labels.txt",
|
88 |
-
},
|
89 |
-
"stance": {
|
90 |
-
"abortion": {
|
91 |
-
"train_text": URL + "stance/abortion/train_text.txt",
|
92 |
-
"train_labels": URL + "stance/abortion/train_labels.txt",
|
93 |
-
"test_text": URL + "stance/abortion/test_text.txt",
|
94 |
-
"test_labels": URL + "stance/abortion/test_labels.txt",
|
95 |
-
"val_text": URL + "stance/abortion/val_text.txt",
|
96 |
-
"val_labels": URL + "stance/abortion/val_labels.txt",
|
97 |
-
},
|
98 |
-
"atheism": {
|
99 |
-
"train_text": URL + "stance/atheism/train_text.txt",
|
100 |
-
"train_labels": URL + "stance/atheism/train_labels.txt",
|
101 |
-
"test_text": URL + "stance/atheism/test_text.txt",
|
102 |
-
"test_labels": URL + "stance/atheism/test_labels.txt",
|
103 |
-
"val_text": URL + "stance/atheism/val_text.txt",
|
104 |
-
"val_labels": URL + "stance/atheism/val_labels.txt",
|
105 |
-
},
|
106 |
-
"climate": {
|
107 |
-
"train_text": URL + "stance/climate/train_text.txt",
|
108 |
-
"train_labels": URL + "stance/climate/train_labels.txt",
|
109 |
-
"test_text": URL + "stance/climate/test_text.txt",
|
110 |
-
"test_labels": URL + "stance/climate/test_labels.txt",
|
111 |
-
"val_text": URL + "stance/climate/val_text.txt",
|
112 |
-
"val_labels": URL + "stance/climate/val_labels.txt",
|
113 |
-
},
|
114 |
-
"feminist": {
|
115 |
-
"train_text": URL + "stance/feminist/train_text.txt",
|
116 |
-
"train_labels": URL + "stance/feminist/train_labels.txt",
|
117 |
-
"test_text": URL + "stance/feminist/test_text.txt",
|
118 |
-
"test_labels": URL + "stance/feminist/test_labels.txt",
|
119 |
-
"val_text": URL + "stance/feminist/val_text.txt",
|
120 |
-
"val_labels": URL + "stance/feminist/val_labels.txt",
|
121 |
-
},
|
122 |
-
"hillary": {
|
123 |
-
"train_text": URL + "stance/hillary/train_text.txt",
|
124 |
-
"train_labels": URL + "stance/hillary/train_labels.txt",
|
125 |
-
"test_text": URL + "stance/hillary/test_text.txt",
|
126 |
-
"test_labels": URL + "stance/hillary/test_labels.txt",
|
127 |
-
"val_text": URL + "stance/hillary/val_text.txt",
|
128 |
-
"val_labels": URL + "stance/hillary/val_labels.txt",
|
129 |
-
},
|
130 |
-
},
|
131 |
-
}
|
132 |
-
|
133 |
-
|
134 |
-
class TweetEvalConfig(datasets.BuilderConfig):
|
135 |
-
def __init__(self, *args, type=None, sub_type=None, **kwargs):
|
136 |
-
super().__init__(
|
137 |
-
*args,
|
138 |
-
name=f"{type}" if type != "stance" else f"{type}_{sub_type}",
|
139 |
-
**kwargs,
|
140 |
-
)
|
141 |
-
self.type = type
|
142 |
-
self.sub_type = sub_type
|
143 |
-
|
144 |
-
|
145 |
-
class TweetEval(datasets.GeneratorBasedBuilder):
|
146 |
-
"""TweetEval Dataset."""
|
147 |
-
|
148 |
-
BUILDER_CONFIGS = [
|
149 |
-
TweetEvalConfig(
|
150 |
-
type=key,
|
151 |
-
sub_type=None,
|
152 |
-
version=datasets.Version("1.1.0"),
|
153 |
-
description=f"This part of my dataset covers {key} part of TweetEval Dataset.",
|
154 |
-
)
|
155 |
-
for key in list(_URLs.keys())
|
156 |
-
if key != "stance"
|
157 |
-
] + [
|
158 |
-
TweetEvalConfig(
|
159 |
-
type="stance",
|
160 |
-
sub_type=key,
|
161 |
-
version=datasets.Version("1.1.0"),
|
162 |
-
description=f"This part of my dataset covers stance_{key} part of TweetEval Dataset.",
|
163 |
-
)
|
164 |
-
for key in list(_URLs["stance"].keys())
|
165 |
-
]
|
166 |
-
|
167 |
-
def _info(self):
|
168 |
-
if self.config.type == "stance":
|
169 |
-
names = ["none", "against", "favor"]
|
170 |
-
elif self.config.type == "sentiment":
|
171 |
-
names = ["negative", "neutral", "positive"]
|
172 |
-
elif self.config.type == "offensive":
|
173 |
-
names = ["non-offensive", "offensive"]
|
174 |
-
elif self.config.type == "irony":
|
175 |
-
names = ["non_irony", "irony"]
|
176 |
-
elif self.config.type == "hate":
|
177 |
-
names = ["non-hate", "hate"]
|
178 |
-
elif self.config.type == "emoji":
|
179 |
-
names = [
|
180 |
-
"β€",
|
181 |
-
"π",
|
182 |
-
"π",
|
183 |
-
"π",
|
184 |
-
"π₯",
|
185 |
-
"π",
|
186 |
-
"π",
|
187 |
-
"β¨",
|
188 |
-
"π",
|
189 |
-
"π",
|
190 |
-
"π·",
|
191 |
-
"πΊπΈ",
|
192 |
-
"β",
|
193 |
-
"π",
|
194 |
-
"π",
|
195 |
-
"π―",
|
196 |
-
"π",
|
197 |
-
"π",
|
198 |
-
"πΈ",
|
199 |
-
"π",
|
200 |
-
]
|
201 |
-
|
202 |
-
else:
|
203 |
-
names = ["anger", "joy", "optimism", "sadness"]
|
204 |
-
|
205 |
-
return datasets.DatasetInfo(
|
206 |
-
description=_DESCRIPTION,
|
207 |
-
features=datasets.Features(
|
208 |
-
{"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=names)}
|
209 |
-
),
|
210 |
-
supervised_keys=None,
|
211 |
-
homepage=_HOMEPAGE,
|
212 |
-
license=_LICENSE,
|
213 |
-
citation=_CITATION,
|
214 |
-
)
|
215 |
-
|
216 |
-
def _split_generators(self, dl_manager):
|
217 |
-
"""Returns SplitGenerators."""
|
218 |
-
if self.config.type != "stance":
|
219 |
-
my_urls = _URLs[self.config.type]
|
220 |
-
else:
|
221 |
-
my_urls = _URLs[self.config.type][self.config.sub_type]
|
222 |
-
data_dir = dl_manager.download_and_extract(my_urls)
|
223 |
-
return [
|
224 |
-
datasets.SplitGenerator(
|
225 |
-
name=datasets.Split.TRAIN,
|
226 |
-
# These kwargs will be passed to _generate_examples
|
227 |
-
gen_kwargs={"text_path": data_dir["train_text"], "labels_path": data_dir["train_labels"]},
|
228 |
-
),
|
229 |
-
datasets.SplitGenerator(
|
230 |
-
name=datasets.Split.TEST,
|
231 |
-
# These kwargs will be passed to _generate_examples
|
232 |
-
gen_kwargs={"text_path": data_dir["test_text"], "labels_path": data_dir["test_labels"]},
|
233 |
-
),
|
234 |
-
datasets.SplitGenerator(
|
235 |
-
name=datasets.Split.VALIDATION,
|
236 |
-
# These kwargs will be passed to _generate_examples
|
237 |
-
gen_kwargs={"text_path": data_dir["val_text"], "labels_path": data_dir["val_labels"]},
|
238 |
-
),
|
239 |
-
]
|
240 |
-
|
241 |
-
def _generate_examples(self, text_path, labels_path):
|
242 |
-
"""Yields examples."""
|
243 |
-
|
244 |
-
with open(text_path, encoding="utf-8") as f:
|
245 |
-
texts = f.readlines()
|
246 |
-
with open(labels_path, encoding="utf-8") as f:
|
247 |
-
labels = f.readlines()
|
248 |
-
for i, text in enumerate(texts):
|
249 |
-
yield i, {"text": text.strip(), "label": int(labels[i].strip())}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|