|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import json |
|
import os |
|
import datasets |
|
|
|
_BASE_URL = "https://huggingface.co/datasets/EMBO/SourceData/resolve/main/" |
|
|
|
|
|
class SourceData(datasets.GeneratorBasedBuilder): |
|
"""SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology.""" |
|
|
|
_NER_LABEL_NAMES = [ |
|
"O", |
|
"B-SMALL_MOLECULE", |
|
"I-SMALL_MOLECULE", |
|
"B-GENEPROD", |
|
"I-GENEPROD", |
|
"B-SUBCELLULAR", |
|
"I-SUBCELLULAR", |
|
"B-CELL_TYPE", |
|
"I-CELL_TYPE", |
|
"B-TISSUE", |
|
"I-TISSUE", |
|
"B-ORGANISM", |
|
"I-ORGANISM", |
|
"B-EXP_ASSAY", |
|
"I-EXP_ASSAY", |
|
"B-DISEASE", |
|
"I-DISEASE", |
|
"B-CELL_LINE", |
|
"I-CELL_LINE", |
|
] |
|
_SEMANTIC_ROLES = [ |
|
"O", |
|
"B-CONTROLLED_VAR", |
|
"I-CONTROLLED_VAR", |
|
"B-MEASURED_VAR", |
|
"I-MEASURED_VAR", |
|
] |
|
_PANEL_START_NAMES = ["O", "B-PANEL_START", "I-PANEL_START"] |
|
_ROLES_MULTI = ["O", "GENEPROD", "SMALL_MOLECULE"] |
|
|
|
_CITATION = """\ |
|
@article{abreu2023sourcedata, |
|
title={The SourceData-NLP dataset: integrating curation into scientific publishing |
|
for training large language models}, |
|
author={Abreu-Vicente, Jorge and Sonntag, Hannah and Eidens, Thomas and Lemberger, Thomas}, |
|
journal={arXiv preprint arXiv:2310.20440}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This dataset is based on the SourceData database and is intented to facilitate training of NLP tasks in the cell and molecualr biology domain. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/EMBO/SourceData" |
|
|
|
_LICENSE = "CC-BY 4.0" |
|
|
|
DEFAULT_CONFIG_NAME = "NER" |
|
|
|
_LATEST_VERSION = "2.0.3" |
|
|
|
def _info(self): |
|
VERSION = ( |
|
self.config.version |
|
if self.config.version not in ["0.0.0", "latest"] |
|
else self._LATEST_VERSION |
|
) |
|
self._URLS = { |
|
"NER": f"{_BASE_URL}token_classification/v_{VERSION}/ner/", |
|
"PANELIZATION": f"{_BASE_URL}token_classification/v_{VERSION}/panelization/", |
|
"ROLES_GP": f"{_BASE_URL}token_classification/v_{VERSION}/roles_gene/", |
|
"ROLES_SM": f"{_BASE_URL}token_classification/v_{VERSION}/roles_small_mol/", |
|
"ROLES_MULTI": f"{_BASE_URL}token_classification/v_{VERSION}/roles_multi/", |
|
"FULL": os.path.join( |
|
_BASE_URL, |
|
"bigbio", |
|
|
|
), |
|
} |
|
self.BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="NER", |
|
version=VERSION, |
|
description="Dataset for named-entity recognition.", |
|
), |
|
datasets.BuilderConfig( |
|
name="PANELIZATION", |
|
version=VERSION, |
|
description="Dataset to separate figure captions into panels.", |
|
), |
|
datasets.BuilderConfig( |
|
name="ROLES_GP", |
|
version=VERSION, |
|
description="Dataset for semantic roles of gene products.", |
|
), |
|
datasets.BuilderConfig( |
|
name="ROLES_SM", |
|
version=VERSION, |
|
description="Dataset for semantic roles of small molecules.", |
|
), |
|
datasets.BuilderConfig( |
|
name="ROLES_MULTI", |
|
version=VERSION, |
|
description="Dataset to train roles. ROLES_GP and ROLES_SM at once.", |
|
), |
|
datasets.BuilderConfig( |
|
name="FULL", |
|
version=VERSION, |
|
description="Full dataset including all NER + entity linking annotations, links to figure images, etc.", |
|
), |
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
if self.config.name in ["NER", "default"]: |
|
features = datasets.Features( |
|
{ |
|
"words": datasets.Sequence(feature=datasets.Value("string")), |
|
"labels": datasets.Sequence( |
|
feature=datasets.ClassLabel( |
|
num_classes=len(self._NER_LABEL_NAMES), |
|
names=self._NER_LABEL_NAMES, |
|
) |
|
), |
|
|
|
"tag_mask": datasets.Sequence(feature=datasets.Value("int8")), |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "ROLES_GP": |
|
features = datasets.Features( |
|
{ |
|
"words": datasets.Sequence(feature=datasets.Value("string")), |
|
"labels": datasets.Sequence( |
|
feature=datasets.ClassLabel( |
|
num_classes=len(self._SEMANTIC_ROLES), |
|
names=self._SEMANTIC_ROLES, |
|
) |
|
), |
|
|
|
"tag_mask": datasets.Sequence(feature=datasets.Value("int8")), |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "ROLES_SM": |
|
features = datasets.Features( |
|
{ |
|
"words": datasets.Sequence(feature=datasets.Value("string")), |
|
"labels": datasets.Sequence( |
|
feature=datasets.ClassLabel( |
|
num_classes=len(self._SEMANTIC_ROLES), |
|
names=self._SEMANTIC_ROLES, |
|
) |
|
), |
|
|
|
"tag_mask": datasets.Sequence(feature=datasets.Value("int8")), |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "ROLES_MULTI": |
|
features = datasets.Features( |
|
{ |
|
"words": datasets.Sequence(feature=datasets.Value("string")), |
|
"labels": datasets.Sequence( |
|
feature=datasets.ClassLabel( |
|
num_classes=len(self._SEMANTIC_ROLES), |
|
names=self._SEMANTIC_ROLES, |
|
) |
|
), |
|
"is_category": datasets.Sequence( |
|
feature=datasets.ClassLabel( |
|
num_classes=len(self._ROLES_MULTI), names=self._ROLES_MULTI |
|
) |
|
), |
|
"tag_mask": datasets.Sequence(feature=datasets.Value("int8")), |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "PANELIZATION": |
|
features = datasets.Features( |
|
{ |
|
"words": datasets.Sequence(feature=datasets.Value("string")), |
|
"labels": datasets.Sequence( |
|
feature=datasets.ClassLabel( |
|
num_classes=len(self._PANEL_START_NAMES), |
|
names=self._PANEL_START_NAMES, |
|
) |
|
), |
|
"tag_mask": datasets.Sequence(feature=datasets.Value("int8")), |
|
} |
|
) |
|
|
|
elif self.config.name == "FULL": |
|
features = datasets.Features( |
|
{ |
|
"doi": datasets.Value("string"), |
|
"abstract": datasets.Value("string"), |
|
|
|
"figures": [ |
|
{ |
|
"fig_id": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"fig_graphic_url": datasets.Value("string"), |
|
"panels": [ |
|
{ |
|
"panel_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"panel_graphic_url": datasets.Value("string"), |
|
"entities": [ |
|
{ |
|
"annotation_id": datasets.Value("string"), |
|
"source": datasets.Value("string"), |
|
"category": datasets.Value("string"), |
|
"entity_type": datasets.Value("string"), |
|
"role": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"ext_ids": datasets.Value("string"), |
|
"norm_text": datasets.Value("string"), |
|
"ext_dbs": datasets.Value("string"), |
|
"in_caption": datasets.Value("bool"), |
|
"ext_names": datasets.Value("string"), |
|
"ext_tax_ids": datasets.Value("string"), |
|
"ext_tax_names": datasets.Value("string"), |
|
"ext_urls": datasets.Value("string"), |
|
"offsets": [datasets.Value("int64")], |
|
} |
|
], |
|
} |
|
], |
|
} |
|
], |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=self._DESCRIPTION, |
|
features=features, |
|
supervised_keys=("words", "label_ids"), |
|
homepage=self._HOMEPAGE, |
|
license=self._LICENSE, |
|
citation=self._CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
"""Returns SplitGenerators. |
|
Uses local files if a data_dir is specified. Otherwise downloads the files from their official url. |
|
""" |
|
|
|
try: |
|
config_name = self.config.name if self.config.name != "default" else "NER" |
|
|
|
if config_name == "FULL": |
|
url = os.path.join( |
|
self._URLS[config_name], |
|
|
|
"source_data_json_splits_2.0.2.zip", |
|
) |
|
data_dir = dl_manager.download_and_extract(url) |
|
data_files = [ |
|
os.path.join(data_dir, filename) |
|
for filename in ["train.jsonl", "test.jsonl", "validation.jsonl"] |
|
] |
|
else: |
|
urls = [ |
|
os.path.join(self._URLS[config_name], "train.jsonl"), |
|
os.path.join(self._URLS[config_name], "test.jsonl"), |
|
os.path.join(self._URLS[config_name], "validation.jsonl"), |
|
] |
|
data_files = dl_manager.download(urls) |
|
except: |
|
raise ValueError(f"unkonwn config name: {self.config.name}") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"filepath": data_files[0]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": data_files[1]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"filepath": data_files[2]}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples. This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method. |
|
It is in charge of opening the given file and yielding (key, example) tuples from the dataset |
|
The key is not important, it's more here for legacy reason (legacy from tfds)""" |
|
|
|
no_panels = 0 |
|
no_entities = 0 |
|
has_panels = 0 |
|
has_entities = 0 |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
|
for id_, row in enumerate(f): |
|
data = json.loads(row.strip()) |
|
if self.config.name in ["NER", "default"]: |
|
yield id_, { |
|
"words": data["words"], |
|
"labels": data["labels"], |
|
"tag_mask": data["is_category"], |
|
"text": data["text"], |
|
} |
|
elif self.config.name == "ROLES_GP": |
|
yield id_, { |
|
"words": data["words"], |
|
"labels": data["labels"], |
|
"tag_mask": data["is_category"], |
|
"text": data["text"], |
|
} |
|
elif self.config.name == "ROLES_MULTI": |
|
labels = data["labels"] |
|
tag_mask = [1 if t != 0 else 0 for t in labels] |
|
yield id_, { |
|
"words": data["words"], |
|
"labels": data["labels"], |
|
"tag_mask": tag_mask, |
|
"is_category": data["is_category"], |
|
"text": data["text"], |
|
} |
|
elif self.config.name == "ROLES_SM": |
|
yield id_, { |
|
"words": data["words"], |
|
"labels": data["labels"], |
|
"tag_mask": data["is_category"], |
|
"text": data["text"], |
|
} |
|
elif self.config.name == "PANELIZATION": |
|
labels = data["labels"] |
|
tag_mask = [1 if t == "B-PANEL_START" else 0 for t in labels] |
|
yield id_, { |
|
"words": data["words"], |
|
"labels": data["labels"], |
|
"tag_mask": tag_mask, |
|
} |
|
|
|
elif self.config.name == "FULL": |
|
doc_figs = data["figures"] |
|
all_figures = [] |
|
for fig in doc_figs: |
|
all_panels = [] |
|
figure = { |
|
"fig_id": fig["fig_id"], |
|
"label": fig["label"], |
|
"fig_graphic_url": fig["fig_graphic_url"], |
|
} |
|
|
|
for p in fig["panels"]: |
|
panel = { |
|
"panel_id": p["panel_id"], |
|
"text": p["text"].strip(), |
|
"panel_graphic_url": p["panel_graphic_url"], |
|
"entities": [ |
|
{ |
|
"annotation_id": t["tag_id"], |
|
"source": t["source"], |
|
"category": t["category"], |
|
"entity_type": t["entity_type"], |
|
"role": t["role"], |
|
"text": t["text"], |
|
"ext_ids": t["ext_ids"], |
|
"norm_text": t["norm_text"], |
|
"ext_dbs": t["ext_dbs"], |
|
"in_caption": bool(t["in_caption"]), |
|
"ext_names": t["ext_names"], |
|
"ext_tax_ids": t["ext_tax_ids"], |
|
"ext_tax_names": t["ext_tax_names"], |
|
"ext_urls": t["ext_urls"], |
|
"offsets": t["local_offsets"], |
|
} |
|
for t in p["tags"] |
|
], |
|
} |
|
for e in panel["entities"]: |
|
assert type(e["offsets"]) == list |
|
if len(panel["entities"]) == 0: |
|
no_entities += 1 |
|
continue |
|
else: |
|
has_entities += 1 |
|
all_panels.append(panel) |
|
|
|
figure["panels"] = all_panels |
|
|
|
|
|
if len(all_panels) == 0: |
|
no_panels += 1 |
|
continue |
|
else: |
|
has_panels += 1 |
|
all_figures.append(figure) |
|
|
|
output = { |
|
"doi": data["doi"], |
|
"abstract": data["abstract"], |
|
"figures": all_figures, |
|
} |
|
yield id_, output |
|
|
|
|