|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Elsevier OA CC-By Corpus Dataset.""" |
|
|
|
|
|
|
|
import json |
|
import glob |
|
import os |
|
import math |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@article{Kershaw2020ElsevierOC, |
|
title = {Elsevier OA CC-By Corpus}, |
|
author = {Daniel James Kershaw and R. Koeling}, |
|
journal = {ArXiv}, |
|
year = {2020}, |
|
volume = {abs/2008.00774}, |
|
doi = {https://doi.org/10.48550/arXiv.2008.00774}, |
|
url = {https://elsevier.digitalcommonsdata.com/datasets/zm33cdndxs}, |
|
keywords = {Science, Natural Language Processing, Machine Learning, Open Dataset}, |
|
abstract = {We introduce the Elsevier OA CC-BY corpus. This is the first open |
|
corpus of Scientific Research papers which has a representative sample |
|
from across scientific disciplines. This corpus not only includes the |
|
full text of the article, but also the metadata of the documents, |
|
along with the bibliographic information for each reference.} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
Elsevier OA CC-By is a corpus of 40k (40, 091) open access (OA) CC-BY articles |
|
from across Elsevier’s journals and include the full text of the article, the metadata, |
|
the bibliographic information for each reference, and author highlights. |
|
""" |
|
|
|
_HOMEPAGE = "https://elsevier.digitalcommonsdata.com/datasets/zm33cdndxs/3" |
|
|
|
_LICENSE = "CC-BY-4.0" |
|
|
|
_URL = "https://data.mendeley.com/public-files/datasets/zm33cdndxs/files/4e03ae48-04a7-44d4-b103-ce73e548679c/file_downloaded" |
|
|
|
|
|
class ElsevierOaCcBy(datasets.GeneratorBasedBuilder): |
|
"""Elsevier OA CC-By Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="all", version=VERSION, description="Official Mendeley dataset for Elsevier OA CC-By Corpus"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"title": datasets.Value("string"), |
|
"abstract": datasets.Value("string"), |
|
"subjareas": datasets.Sequence(datasets.Value("string")), |
|
"keywords": datasets.Sequence(datasets.Value("string")), |
|
"asjc": datasets.Sequence(datasets.Value("string")), |
|
"body_text": datasets.Sequence(datasets.Value("string")), |
|
"author_highlights": datasets.Sequence(datasets.Value("string")), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URL) |
|
|
|
corpus_path = os.path.join(data_dir, "json") |
|
|
|
doc_count = len(glob.glob(f"{corpus_path}/*.json")) |
|
|
|
train_split = [0, doc_count*80//100] |
|
test_split = [doc_count*80//100+1, doc_count*90//100] |
|
validation_split = [doc_count*90//100+1, doc_count] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": corpus_path, |
|
"split": "train", |
|
"split_range": train_split |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": corpus_path, |
|
"split": "test", |
|
"split_range": test_split |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": corpus_path, |
|
"split": "validation", |
|
"split_range": validation_split |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split, split_range): |
|
|
|
json_files = glob.glob(f"{filepath}/*.json") |
|
for doc in json_files[split_range[0]:split_range[1]]: |
|
with open(doc) as f: |
|
paper = json.loads(f.read()) |
|
|
|
yield paper['docId'], { |
|
'title': paper['metadata']['title'], |
|
'subjareas': paper['metadata']['subjareas'] if 'subjareas' in paper['metadata'] else [], |
|
'keywords': paper['metadata']['keywords'] if 'keywords' in paper['metadata'] else [], |
|
'asjc': paper['metadata']['asjc'] if 'asjc' in paper['metadata'] else [], |
|
'abstract': paper['abstract'] if 'abstract' in paper else "", |
|
"body_text": [s['sentence'] for s in sorted(paper['body_text'], key = lambda i: (i['secId'], i['startOffset']))], |
|
"author_highlights": [s['sentence'] for s in sorted(paper['author_highlights'], key = lambda i: i['startOffset'])] if 'author_highlights' in paper else [], |
|
} |
|
|