beir_trec-covid / beir_trec-covid.py
Sean MacAvaney
commit files to HF hub
38c95af
raw
history blame contribute delete
No virus
3.01 kB
"""
""" # TODO
try:
import ir_datasets
except ImportError as e:
raise ImportError('ir-datasets package missing; `pip install ir-datasets`')
import datasets
IRDS_ID = 'beir/trec-covid'
IRDS_ENTITY_TYPES = {'docs': {'doc_id': 'string', 'text': 'string', 'title': 'string', 'url': 'string', 'pubmed_id': 'string'}, 'queries': {'query_id': 'string', 'text': 'string', 'query': 'string', 'narrative': 'string'}, 'qrels': {'query_id': 'string', 'doc_id': 'string', 'relevance': 'int64', 'iteration': 'string'}}
_CITATION = '@article{Wang2020Cord19,\n title={CORD-19: The Covid-19 Open Research Dataset},\n author={Lucy Lu Wang and Kyle Lo and Yoganand Chandrasekhar and Russell Reas and Jiangjiang Yang and Darrin Eide and K. Funk and Rodney Michael Kinney and Ziyang Liu and W. Merrill and P. Mooney and D. Murdick and Devvret Rishi and Jerry Sheehan and Zhihong Shen and B. Stilson and A. Wade and K. Wang and Christopher Wilhelm and Boya Xie and D. Raymond and Daniel S. Weld and Oren Etzioni and Sebastian Kohlmeier},\n journal={ArXiv},\n year={2020}\n}\n@article{Voorhees2020TrecCovid,\n title={TREC-COVID: Constructing a Pandemic Information Retrieval Test Collection},\n author={E. Voorhees and Tasmeer Alam and Steven Bedrick and Dina Demner-Fushman and W. Hersh and Kyle Lo and Kirk Roberts and I. Soboroff and Lucy Lu Wang},\n journal={ArXiv},\n year={2020},\n volume={abs/2005.04474}\n}\n@article{Thakur2021Beir,\n title = "BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models",\n author = "Thakur, Nandan and Reimers, Nils and Rücklé, Andreas and Srivastava, Abhishek and Gurevych, Iryna", \n journal= "arXiv preprint arXiv:2104.08663",\n month = "4",\n year = "2021",\n url = "https://arxiv.org/abs/2104.08663",\n}'
_DESCRIPTION = "" # TODO
class beir_trec_covid(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [datasets.BuilderConfig(name=e) for e in IRDS_ENTITY_TYPES]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({k: datasets.Value(v) for k, v in IRDS_ENTITY_TYPES[self.config.name].items()}),
homepage=f"https://ir-datasets.com/beir#beir/trec-covid",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=self.config.name)]
def _generate_examples(self):
dataset = ir_datasets.load(IRDS_ID)
for i, item in enumerate(getattr(dataset, self.config.name)):
key = i
if self.config.name == 'docs':
key = item.doc_id
elif self.config.name == 'queries':
key = item.query_id
yield key, item._asdict()
def as_dataset(self, split=None, *args, **kwargs):
split = self.config.name # always return split corresponding with this config to avid returning a redundant DatasetDict layer
return super().as_dataset(split, *args, **kwargs)