|
--- |
|
license: cc-by-4.0 |
|
task_categories: |
|
- text-generation |
|
- text-classification |
|
- token-classification |
|
- question-answering |
|
- zero-shot-classification |
|
- translation |
|
- summarization |
|
language: |
|
- en |
|
size_categories: |
|
- 10M<n<100M |
|
--- |
|
# Task Name |
|
- **FLAN-2021 -> 70** |
|
```json |
|
{ |
|
"ag_news_subset": null, |
|
"ai2_arc_ARC-Challenge": null, |
|
"ai2_arc_ARC-Easy": null, |
|
"aeslc": null, |
|
"anli_r1": null, |
|
"anli_r2": null, |
|
"anli_r3": null, |
|
"bool_q": null, |
|
"cnn_dailymail": null, |
|
"coqa": null, |
|
"cosmos_qa": null, |
|
"definite_pronoun_resolution": null, |
|
"drop": null, |
|
"fix_punct": null, |
|
"gem_common_gen": null, |
|
"gem_dart": null, |
|
"gem_e2e_nlg": null, |
|
"gem_web_nlg_en": null, |
|
"gem_wiki_lingua_english_en": null, |
|
"gigaword": null, |
|
"glue_cola": null, |
|
"glue_mnli": null, |
|
"glue_mrpc": null, |
|
"glue_qnli": null, |
|
"glue_qqp": null, |
|
"glue_sst2": null, |
|
"glue_stsb": null, |
|
"glue_wnli": null, |
|
"hellaswag": null, |
|
"huggingface_xsum": null, |
|
"imdb_reviews_plain_text": null, |
|
"lambada": null, |
|
"math_dataset_algebra__linear_1d": null, |
|
"multi_news": null, |
|
"natural_questions_open": null, |
|
"newsroom": null, |
|
"openbookqa": null, |
|
"opinion_abstracts_idebate": null, |
|
"opinion_abstracts_rotten_tomatoes": null, |
|
"para_crawl_enes": null, |
|
"paws_wiki": null, |
|
"piqa": null, |
|
"quac": null, |
|
"samsum": null, |
|
"sentiment140": null, |
|
"snli": null, |
|
"squad_v1_1": null, |
|
"squad_v2_0": null, |
|
"story_cloze_2016": null, |
|
"super_glue_cb": null, |
|
"super_glue_copa": null, |
|
"super_glue_multirc": null, |
|
"super_glue_record": null, |
|
"super_glue_rte": null, |
|
"super_glue_wic": null, |
|
"super_glue_wsc_fixed": null, |
|
"trec": null, |
|
"trivia_qa_rc": null, |
|
"true_case": null, |
|
"unified_qa_science_inst": null, |
|
"winogrande": null, |
|
"word_segment": null, |
|
"wmt14_translate_fr-en": null, |
|
"wmt16_translate_cs-en": null, |
|
"wmt16_translate_de-en": null, |
|
"wmt16_translate_fi-en": null, |
|
"wmt16_translate_ro-en": null, |
|
"wmt16_translate_ru-en": null, |
|
"wmt16_translate_tr-en": null, |
|
"yelp_polarity_reviews": null |
|
} |
|
``` |
|
```python |
|
text_classification = [ |
|
"ag_news_subset", |
|
"glue_cola", |
|
"glue_sst2", |
|
"imdb_reviews_plain_text", |
|
"yelp_polarity_reviews" |
|
] |
|
|
|
question_answering = [ |
|
"ai2_arc_ARC-Challenge", |
|
"ai2_arc_ARC-Easy", |
|
"bool_q", |
|
"coqa", |
|
"cosmos_qa", |
|
"drop", |
|
"natural_questions_open", |
|
"openbookqa", |
|
"quac", |
|
"squad_v1_1", |
|
"squad_v2_0", |
|
"trivia_qa_rc" |
|
] |
|
|
|
text_generation = [ |
|
"aeslc", |
|
"cnn_dailymail", |
|
"gem_common_gen", |
|
"gem_dart", |
|
"gem_e2e_nlg", |
|
"gem_web_nlg_en", |
|
"gem_wiki_lingua_english_en", |
|
"gigaword", |
|
"huggingface_xsum", |
|
"lambada", |
|
"multi_news", |
|
"newsroom", |
|
"samsum" |
|
] |
|
|
|
translation = [ |
|
"wmt14_translate_fr-en", |
|
"wmt16_translate_cs-en", |
|
"wmt16_translate_de-en", |
|
"wmt16_translate_fi-en", |
|
"wmt16_translate_ro-en", |
|
"wmt16_translate_ru-en", |
|
"wmt16_translate_tr-en" |
|
] |
|
|
|
sentiment_analysis = [ |
|
"sentiment140" |
|
] |
|
|
|
textual_entailment = [ |
|
"anli_r1", |
|
"anli_r2", |
|
"anli_r3", |
|
"glue_mnli", |
|
"glue_rte", |
|
"snli", |
|
"super_glue_cb", |
|
"super_glue_copa", |
|
"super_glue_rte" |
|
] |
|
|
|
paraphrase_detection = [ |
|
"glue_mrpc", |
|
"glue_qqp", |
|
"paws_wiki" |
|
] |
|
|
|
commonsense_reasoning = [ |
|
"hellaswag", |
|
"piqa", |
|
"super_glue_multirc", |
|
"super_glue_record", |
|
"super_glue_wic", |
|
"super_glue_wsc_fixed", |
|
"winogrande" |
|
] |
|
|
|
textual_similarity = [ |
|
"glue_stsb" |
|
] |
|
|
|
named_entity_recognition = [ |
|
"glue_wnli" |
|
] |
|
|
|
text_correction = [ |
|
"fix_punct", |
|
"true_case" |
|
] |
|
|
|
text_segmentation = [ |
|
"word_segment" |
|
] |
|
|
|
argument_mining = [ |
|
"opinion_abstracts_idebate", |
|
"opinion_abstracts_rotten_tomatoes" |
|
] |
|
|
|
machine_reading_comprehension = [ |
|
"glue_qnli" |
|
] |
|
|
|
text_summarization = [ |
|
"trec" |
|
] |
|
|
|
language_modelling = [ |
|
"story_cloze_2016" |
|
] |
|
|
|
math_problem_solving = [ |
|
"math_dataset_algebra__linear_1d", |
|
"unified_qa_science_inst" |
|
] |
|
|
|
cross_lingual_information_retrieval = [ |
|
"para_crawl_enes" |
|
] |
|
``` |