Support streaming
#5
by
albertvillanova
HF staff
- opened
- data/triviaqa-rc.zip +3 -0
- data/triviaqa-unfiltered.zip +3 -0
- dataset_infos.json +0 -1
- trivia_qa.py +1 -1
data/triviaqa-rc.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5c28a667a3cace893b0dafef8293891dd26b03144a53fc045d6db90aa0ea734
|
3 |
+
size 2875302453
|
data/triviaqa-unfiltered.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1954fcd73069aa31f93d274ddc32823de077b9a84056c85a2576ee2bc45be9a5
|
3 |
+
size 632550542
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"rc": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12749652867, "num_examples": 138384, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 1662321436, "num_examples": 17944, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 1577710751, "num_examples": 17210, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 15989685054, "size_in_bytes": 18655464554}, "rc.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 106884466, "num_examples": 138384, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 14060078, "num_examples": 17944, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 3668151, "num_examples": 17210, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 124612695, "size_in_bytes": 2790392195}, "unfiltered": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 25019623548, "num_examples": 87622, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 3038803991, "num_examples": 11313, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 2906455559, "num_examples": 10832, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}, "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 3298328560, "post_processing_size": null, "dataset_size": 30964883098, "size_in_bytes": 34263211658}, "unfiltered.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 63301342, "num_examples": 87622, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 8297118, "num_examples": 11313, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 2320908, "num_examples": 10832, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 632549060, "post_processing_size": null, "dataset_size": 73919368, "size_in_bytes": 706468428}, "rc.web": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.web", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9408852131, "num_examples": 76496, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 1232155262, "num_examples": 9951, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 1171664123, "num_examples": 9509, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 11812671516, "size_in_bytes": 14478451016}, "rc.web.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.web.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 58524077, "num_examples": 76496, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 7694681, "num_examples": 9951, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 2024871, "num_examples": 9509, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 68243629, "size_in_bytes": 2734023129}, "unfiltered.web": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.web", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}, "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 3298328560, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 3298328560}, "unfiltered.web.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.web.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 632549060, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 632549060}, "rc.wikipedia": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.wikipedia", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3340800860, "num_examples": 61888, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 430166174, "num_examples": 7993, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 406046628, "num_examples": 7701, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 4177013662, "size_in_bytes": 6842793162}, "rc.wikipedia.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.wikipedia.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 48360513, "num_examples": 61888, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 6365397, "num_examples": 7993, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 1643280, "num_examples": 7701, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 56369190, "size_in_bytes": 2722148690}, "unfiltered.wikipedia": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.wikipedia", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}, "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 3298328560, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 3298328560}, "unfiltered.wikipedia.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.wikipedia.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 632549060, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 632549060}}
|
|
|
|
trivia_qa.py
CHANGED
@@ -40,7 +40,7 @@ archivePrefix = {arXiv},
|
|
40 |
eprint = {1705.03551},
|
41 |
}
|
42 |
"""
|
43 |
-
_DOWNLOAD_URL_TMPL = "
|
44 |
_WEB_EVIDENCE_DIR = "evidence/web"
|
45 |
_WIKI_EVIDENCE_DIR = "evidence/wikipedia"
|
46 |
|
|
|
40 |
eprint = {1705.03551},
|
41 |
}
|
42 |
"""
|
43 |
+
_DOWNLOAD_URL_TMPL = "data/triviaqa-{}.zip"
|
44 |
_WEB_EVIDENCE_DIR = "evidence/web"
|
45 |
_WIKI_EVIDENCE_DIR = "evidence/wikipedia"
|
46 |
|