import csv import json import os import datasets import pandas as pd import zipfile _CITATION = """ """ _DESCRIPTION = """\ LoCoV1 Dataset - Queries """ _HOMEPAGE = "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries" _LICENSE = "" _URL = { "summ_screen_fd": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/summ_screen_fd", "gov_report": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/gov_report", "qmsum": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/qmsum", "qasper_title": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/qasper_title", "qasper_abstract": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/qasper_abstract", "multifieldqa": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/multifieldqa", "2wikimqa": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/2wikimqa", "passage_retrieval": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/passage_retrieval", "courtlistener_HTML": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/courtlistener_HTML", "courtlistener_Plain_Text": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/courtlistener_Plain_Text", "legal_case_reports": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/legal_case_reports", "stackoverflow": "https://huggingface.co/datasets/hazyresearch/LoCoV1-Queries/raw/main/queries/stackoverflow", } class NewDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="summ_screen_fd", version=VERSION, description=""), datasets.BuilderConfig(name="gov_report", version=VERSION, description=""), datasets.BuilderConfig(name="qmsum", version=VERSION, description=""), datasets.BuilderConfig(name="qasper_title", version=VERSION, description=""), datasets.BuilderConfig(name="qasper_abstract", version=VERSION, description=""), datasets.BuilderConfig(name="multifieldqa", version=VERSION, description=""), datasets.BuilderConfig(name="2wikimqa", version=VERSION, description=""), datasets.BuilderConfig(name="passage_retrieval", version=VERSION, description=""), datasets.BuilderConfig(name="courtlistener_HTML", version=VERSION, description=""), datasets.BuilderConfig(name="courtlistener_Plain_Text", version=VERSION, description=""), datasets.BuilderConfig(name="legal_case_reports", version=VERSION, description=""), datasets.BuilderConfig(name="stackoverflow", version=VERSION, description=""), ] DEFAULT_CONFIG_NAME = "summ_screen_fd" # It's not mandatory to have a default configuration. Just use one if it make sense. def _info(self): features = datasets.Features( { "qid": datasets.Value("string"), "dataset": datasets.Value("string"), "query": datasets.Value("string"), "answer_pids": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, # Here we define them above because they are different between the two configurations homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): _URLS = { "test": _URL[self.config.name] + "_test.jsonl", } downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator(name="test", gen_kwargs={"filepath": downloaded_files["test"]}), ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: for key, row in enumerate(f): data = json.loads(row) yield key, { "qid": data["qid"], "dataset": data["dataset"], "query": data["query"], "answer_pids": data["answer_pids"] }