Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Sub-tasks:
entity-linking-retrieval
Size:
10M - 100M
ArXiv:
License:
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""MedWiki is a large-scale sentence dataset collected from Wikipedia with medical entity (UMLS) annotations. This dataset is intended for pretraining""" | |
import csv | |
import json | |
import os | |
import datasets | |
_CITATION = """\ | |
@inproceedings{medwiki, | |
title={Cross-Domain Data Integration for Named Entity Disambiguation in Biomedical Text}, | |
author={Maya Varma and Laurel Orr and Sen Wu and Megan Leszczynski and Xiao Ling and Christopher Ré}, | |
year={2021}, | |
booktitle={Findings of the Association for Computational Linguistics: EMNLP 2021} | |
} | |
""" | |
_DESCRIPTION = """\ | |
MedWiki is a large-scale sentence dataset collected from Wikipedia with medical entity (UMLS) annotations. This dataset is intended for pretraining. | |
""" | |
_HOMEPAGE = "" | |
_LICENSE = "" | |
_URLs = {"medwiki_full": "medwiki_full.zip", "medwiki_hq": "medwiki_hq.zip"} | |
class MedWiki(datasets.GeneratorBasedBuilder): | |
"""MedWiki: A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations""" | |
VERSION = datasets.Version("1.1.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
name="medwiki", | |
version=VERSION, | |
description="MedWiki: A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations"), | |
] | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="medwiki_full", version=VERSION, description="MedWiki (Full): A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations."), | |
datasets.BuilderConfig(name="medwiki_hq", version=VERSION, description="MedWiki (HQ): A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations. The HQ (high quality) subset of MedWiki includes a portion of the dataset with higher-quality entity annotations."), | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"mentions": datasets.Sequence(datasets.Value("string")), | |
"entities": datasets.Sequence(datasets.Value("string")), | |
"entity_titles": datasets.Sequence(datasets.Value("string")), | |
"types": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), | |
"spans": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))), | |
"sentence": datasets.Value("string"), | |
"sent_idx_unq": datasets.Value("int32"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
supervised_keys=None, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
my_urls = _URLs[self.config.name] | |
data_dir = dl_manager.download_and_extract(my_urls) | |
#Adjust filenames for medwiki_hq subset | |
ext = "" | |
if self.config.name == "medwiki_hq": ext = "_hq" | |
#Load splits | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"filepath": os.path.join(data_dir, f"train{ext}.jsonl"), | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"filepath": os.path.join(data_dir, f"test{ext}.jsonl"), | |
"split": "test" | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"filepath": os.path.join(data_dir, f"dev{ext}.jsonl"), | |
"split": "dev", | |
}, | |
), | |
] | |
def _generate_examples(self, filepath, split ): | |
""" Yields examples as (key, example) tuples. """ | |
with open(filepath, encoding="utf-8") as f: | |
for id_, row in enumerate(f): | |
data = json.loads(row) | |
yield id_, { | |
"mentions": data["mentions"], | |
"entities": data["entities"], | |
"entity_titles": data['entity_titles'], | |
"types": data["types"], | |
"spans": data["spans"], | |
"sentence": data["sentence"], | |
"sent_idx_unq": data["sent_idx_unq"], | |
} | |