tweet_eval / dataset_infos.json
albertvillanova's picture
Add stance_atheism data files
449c8d5
raw
history blame
28.3 kB
{
"emoji": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"\u2764",
"\ud83d\ude0d",
"\ud83d\ude02",
"\ud83d\udc95",
"\ud83d\udd25",
"\ud83d\ude0a",
"\ud83d\ude0e",
"\u2728",
"\ud83d\udc99",
"\ud83d\ude18",
"\ud83d\udcf7",
"\ud83c\uddfa\ud83c\uddf8",
"\u2600",
"\ud83d\udc9c",
"\ud83d\ude09",
"\ud83d\udcaf",
"\ud83d\ude01",
"\ud83c\udf84",
"\ud83d\udcf8",
"\ud83d\ude1c"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "emoji",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 3803167,
"num_examples": 45000,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 4255901,
"num_examples": 50000,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 396079,
"num_examples": 5000,
"dataset_name": null
}
},
"download_size": 5939308,
"dataset_size": 8455147,
"size_in_bytes": 14394455
},
"emotion": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"anger",
"joy",
"optimism",
"sadness"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "emotion",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 338871,
"num_examples": 3257,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 146645,
"num_examples": 1421,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 38273,
"num_examples": 374,
"dataset_name": null
}
},
"download_size": 367016,
"dataset_size": 523789,
"size_in_bytes": 890805
},
"hate": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"non-hate",
"hate"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "hate",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 1223650,
"num_examples": 9000,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 428934,
"num_examples": 2970,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 154144,
"num_examples": 1000,
"dataset_name": null
}
},
"download_size": 1196346,
"dataset_size": 1806728,
"size_in_bytes": 3003074
},
"irony": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"non_irony",
"irony"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "irony",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 259187,
"num_examples": 2862,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 75897,
"num_examples": 784,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 86017,
"num_examples": 955,
"dataset_name": null
}
},
"download_size": 297647,
"dataset_size": 421101,
"size_in_bytes": 718748
},
"offensive": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"non-offensive",
"offensive"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "offensive",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 1648061,
"num_examples": 11916,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 135473,
"num_examples": 860,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 192417,
"num_examples": 1324,
"dataset_name": null
}
},
"download_size": 1234528,
"dataset_size": 1975951,
"size_in_bytes": 3210479
},
"sentiment": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"negative",
"neutral",
"positive"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "sentiment",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 5425122,
"num_examples": 45615,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 1279540,
"num_examples": 12284,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 239084,
"num_examples": 2000,
"dataset_name": null
}
},
"download_size": 4849675,
"dataset_size": 6943746,
"size_in_bytes": 11793421
},
"stance_abortion": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"none",
"against",
"favor"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "stance_abortion",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 68694,
"num_examples": 587,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 33171,
"num_examples": 280,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 7657,
"num_examples": 66,
"dataset_name": null
}
},
"download_size": 73517,
"dataset_size": 109522,
"size_in_bytes": 183039
},
"stance_atheism": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
},
"label": {
"names": [
"none",
"against",
"favor"
],
"_type": "ClassLabel"
}
},
"builder_name": "tweet_eval",
"dataset_name": "tweet_eval",
"config_name": "stance_atheism",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 54775,
"num_examples": 461,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 25716,
"num_examples": 220,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 6320,
"num_examples": 52,
"dataset_name": null
}
},
"download_size": 62265,
"dataset_size": 86811,
"size_in_bytes": 149076
},
"stance_climate": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_climate",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 40253,
"num_examples": 355,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 19929,
"num_examples": 169,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 4805,
"num_examples": 40,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_text.txt": {
"num_bytes": 36699,
"checksum": "4803211832d318026323a8e5014cff1b95e1c8c3854378101e5d1a8c82582eb7"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_labels.txt": {
"num_bytes": 710,
"checksum": "d6274f55bc95f5a7f2ae591b886c1414a7664aaf4e0c609f4ba6cf377929af18"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_text.txt": {
"num_bytes": 18235,
"checksum": "41ee8ee2ad3c36e0629654fdb271f37775197c79be8b299adbeadd2003b63c53"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_labels.txt": {
"num_bytes": 338,
"checksum": "193c9f2358f61d9efe558324ec89ecaf08e600a44b68128f47838c01d9f98dfd"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_text.txt": {
"num_bytes": 4401,
"checksum": "fc5714703add266801ee2fd98296ea20ec0879e89cdb9f906d9812d9f640f2ba"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_labels.txt": {
"num_bytes": 80,
"checksum": "0cb133ab9b137292f075210db45f7e293dc52798a4e21e59037bfcfe66c97aa6"
}
},
"download_size": 60463,
"post_processing_size": null,
"dataset_size": 64987,
"size_in_bytes": 125450
},
"stance_feminist": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_feminist",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 70513,
"num_examples": 597,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 33309,
"num_examples": 285,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 8039,
"num_examples": 67,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_text.txt": {
"num_bytes": 64539,
"checksum": "c176e6663973c8e78bfa92ba1e8874a70cc5358567d71584a90943bc6525eaab"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_labels.txt": {
"num_bytes": 1194,
"checksum": "abd4f196d801423bb0daba8c0ecf5b3efba1f10e8f410c3dfa360b50c8b9c685"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_text.txt": {
"num_bytes": 30455,
"checksum": "1bfdbdc2af64fd62dcc775d1288e192ac8ff805ef27ccf3aaac54a98616eefda"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_labels.txt": {
"num_bytes": 570,
"checksum": "ddbde6d253ee47c5d5ef8bc5386270fde45cf088d3be70bba9c382b8a024897a"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_text.txt": {
"num_bytes": 7365,
"checksum": "3518b2ddcf696626a7243d7cea720a975718c7a52a5a086931be87897c1de58b"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_labels.txt": {
"num_bytes": 134,
"checksum": "399e0d468d0e4ead7a445f69efdf35876c835acf4cefc00a16f451a5d42e5c13"
}
},
"download_size": 104257,
"post_processing_size": null,
"dataset_size": 111861,
"size_in_bytes": 216118
},
"stance_hillary": {
"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n",
"citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n",
"homepage": "https://github.com/cardiffnlp/tweeteval",
"license": "",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
},
"label": {
"num_classes": 3,
"names": [
"none",
"against",
"favor"
],
"names_file": null,
"id": null,
"_type": "ClassLabel"
}
},
"post_processed": null,
"supervised_keys": null,
"builder_name": " tweet_eval",
"config_name": "stance_hillary",
"version": {
"version_str": "1.1.0",
"description": null,
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 69600,
"num_examples": 620,
"dataset_name": " tweet_eval"
},
"test": {
"name": "test",
"num_bytes": 34491,
"num_examples": 295,
"dataset_name": " tweet_eval"
},
"validation": {
"name": "validation",
"num_bytes": 7536,
"num_examples": 69,
"dataset_name": " tweet_eval"
}
},
"download_checksums": {
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_text.txt": {
"num_bytes": 63398,
"checksum": "0bd735de895cb74d63c224e64e3d955cac99be97aa225f803fe4d2f5978a2c99"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_labels.txt": {
"num_bytes": 1240,
"checksum": "0ea5753d13a717a9e91581d1d89c0b5206c8f905f0a717b2b27d02dbf419250d"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_text.txt": {
"num_bytes": 31537,
"checksum": "5c4e020285a62cfd88f264849e1db242ded356c171b1a68dd0050b76635053aa"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_labels.txt": {
"num_bytes": 590,
"checksum": "068468f6a72b85dfb65bf10e45f2453fa082d1ea9d7a40e7f560d5b6d75027f3"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_text.txt": {
"num_bytes": 6842,
"checksum": "9714b7dcc8617e095433d7b63df8aa155eb84216b9ac9195105ab83d85cd248d"
},
"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_labels.txt": {
"num_bytes": 138,
"checksum": "e5d44c771b7349a4a74309f56ca072fdf8f1c015068d519ca2ed3a931c833606"
}
},
"download_size": 103745,
"post_processing_size": null,
"dataset_size": 111627,
"size_in_bytes": 215372
}
}