# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import io import datasets import pickle import logging # General Constants: LABEL_NAME = 'label' numbers = { 1: "first", 2: "second", 3: "third", 4: "fourth", 5: "fifth", 6: "sixth", 7: "seventh", 8: "eighth", 9: "ninth" } TEXT_COLUMN_NAME = [f"{numbers[i]}_sentence" for i in range(1, 10)] # SSPabs Constants: SSPABS = 'SSPabs' SSPABS_TRAIN_NAME = 'train.txt' SSPABS_VALID_NAME = 'valid.txt' SSPABS_TEST_NAME = 'test.txt' SSPABS_DATA_DIR = 'data/SSP/abs' SSPABS_LABELS = { "0": "Does not belong to abstract", "1": "Belongs to abstract", } SSPABS_TEXT_COLUMNS = 1 # PDTB Constants: PDTB_I = 'PDTB_I' PDTB_E = 'PDTB_E' PDTB_TRAIN_NAME = 'train.txt' PDTB_VALID_NAME = 'valid.txt' PDTB_TEST_NAME = 'test.txt' PDTB_DATA_DIR = 'data/PDTB' PDTB_DIRS = {PDTB_E: 'Explicit', PDTB_I: 'Implicit'} PDTB_E_LABELS = [ 'Comparison.Concession', 'Comparison.Contrast', 'Contingency.Cause', 'Contingency.Condition', 'Contingency.Pragmatic condition', 'Expansion.Alternative', 'Expansion.Conjunction', 'Expansion.Instantiation', 'Expansion.List', 'Expansion.Restatement', 'Temporal.Asynchronous', 'Temporal.Synchrony', ] PDTB_E_LABELS = {str(i): label for i, label in enumerate(PDTB_E_LABELS)} PDTB_I_LABELS = [ 'Comparison.Concession', 'Comparison.Contrast', 'Contingency.Cause', 'Contingency.Pragmatic cause', 'Expansion.Alternative', 'Expansion.Conjunction', 'Expansion.Instantiation', 'Expansion.List', 'Expansion.Restatement', 'Temporal.Asynchronous', 'Temporal.Synchrony', ] PDTB_I_LABELS = {str(i): label for i, label in enumerate(PDTB_I_LABELS)} PDTB_E_TEXT_COLUMNS = 2 PDTB_I_TEXT_COLUMNS = 2 # SP Constants: SPARXIV = 'SParxiv' SPROCSTORY = 'SProcstory' SPWIKI = 'SPwiki' SP_TRAIN_NAME = 'train.txt' SP_VALID_NAME = 'valid.txt' SP_TEST_NAME = 'test.txt' SP_DATA_DIR = 'data/SP' SP_DIRS = {SPARXIV: 'arxiv', SPROCSTORY: 'rocstory', SPWIKI: 'wiki'} SP_LABELS = { "0": 'First sentence', "1": 'Second sentence', "2": 'Third sentence', "3": "Fourth sentence", "4": "Fifth sentence", } SP_TEXT_COLUMNS = 5 # BSO Constants: BSOARXIV = 'BSOarxiv' BSOROCSTORY = 'BSOrocstory' BSOWIKI = 'BSOwiki' BSO_TRAIN_NAME = 'train.txt' BSO_VALID_NAME = 'valid.txt' BSO_TEST_NAME = 'test.txt' BSO_DATA_DIR = 'data/BSO' BSO_DIRS = {BSOARXIV: 'arxiv', BSOROCSTORY: 'rocstory', BSOWIKI: 'wiki'} BSO_LABELS = { "0": 'Incorrect order', "1": 'Correct order', } BSO_TEXT_COLUMNS = 2 # DC Constants: DCCHAT = 'DCchat' DCWIKI = 'DCwiki' DC_TRAIN_NAME = 'train.txt' DC_VALID_NAME = 'valid.txt' DC_TEST_NAME = 'test.txt' DC_DATA_DIR = 'data/DC' DC_DIRS = {DCCHAT: 'chat', DCWIKI: 'wiki'} DC_LABELS = { "0": "Incoherent", "1": "Coherent", } DC_TEXT_COLUMNS = 6 # RST Constants: RST = 'RST' RST_TRAIN_NAME = 'RST_TRAIN.pkl' RST_VALID_NAME = 'RST_DEV.pkl' RST_TEST_NAME = 'RST_TEST.pkl' RST_DATA_DIR = 'data/RST' RST_LABELS = [ 'NS-Explanation', 'NS-Evaluation', 'NN-Condition', 'NS-Summary', 'SN-Cause', 'SN-Background', 'NS-Background', 'SN-Summary', 'NS-Topic-Change', 'NN-Explanation', 'SN-Topic-Comment', 'NS-Elaboration', 'SN-Attribution', 'SN-Manner-Means', 'NN-Evaluation', 'NS-Comparison', 'NS-Contrast', 'SN-Condition', 'NS-Temporal', 'NS-Enablement', 'SN-Evaluation', 'NN-Topic-Comment', 'NN-Temporal', 'NN-Textual-organization', 'NN-Same-unit', 'NN-Comparison', 'NN-Topic-Change', 'SN-Temporal', 'NN-Joint', 'SN-Enablement', 'SN-Explanation', 'NN-Contrast', 'NN-Cause', 'SN-Contrast', 'NS-Attribution', 'NS-Topic-Comment', 'SN-Elaboration', 'SN-Comparison', 'NS-Cause', 'NS-Condition', 'NS-Manner-Means' ] RST_TEXT_COLUMNS = 2 DATASET_NAMES = [ SSPABS, PDTB_I, PDTB_E, SPARXIV, SPROCSTORY, SPWIKI, BSOARXIV, BSOROCSTORY, BSOWIKI, DCCHAT, DCWIKI, RST, ] _CITATION = """\ @InProceedings{mchen-discoeval-19, title = {Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations}, author = {Mingda Chen and Zewei Chu and Kevin Gimpel}, booktitle = {Proc. of {EMNLP}}, year={2019} } """ _DESCRIPTION = """\ This dataset contains all tasks of the DiscoEval benchmark for sentence representation learning. """ _HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval" class DiscoEvalSentence(datasets.GeneratorBasedBuilder): """DiscoEval Benchmark""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name=SPARXIV, version=VERSION, description="Sentence positioning dataset from arXiv", ), datasets.BuilderConfig( name=SPROCSTORY, version=VERSION, description="Sentence positioning dataset from ROCStory", ), datasets.BuilderConfig( name=SPWIKI, version=VERSION, description="Sentence positioning dataset from Wikipedia", ), datasets.BuilderConfig( name=DCCHAT, version=VERSION, description="Discourse Coherence dataset from chat", ), datasets.BuilderConfig( name=DCWIKI, version=VERSION, description="Discourse Coherence dataset from Wikipedia", ), datasets.BuilderConfig( name=RST, version=VERSION, description="The RST Discourse Treebank dataset ", ), datasets.BuilderConfig( name=PDTB_E, version=VERSION, description="The Penn Discourse Treebank - Explicit dataset.", ), datasets.BuilderConfig( name=PDTB_I, version=VERSION, description="The Penn Discourse Treebank - Implicit dataset.", ), datasets.BuilderConfig( name=SSPABS, version=VERSION, description="The SSP dataset.", ), datasets.BuilderConfig( name=BSOARXIV, version=VERSION, description="The BSO Task with the arxiv dataset.", ), datasets.BuilderConfig( name=BSOWIKI, version=VERSION, description="The BSO Task with the wiki dataset.", ), datasets.BuilderConfig( name=BSOROCSTORY, version=VERSION, description="The BSO Task with the rocstory dataset.", ), ] def _info(self): if self.config.name in [SPARXIV, SPROCSTORY, SPWIKI]: features_dict = { TEXT_COLUMN_NAME[i]: datasets.Value('string') for i in range(SP_TEXT_COLUMNS) } features_dict[LABEL_NAME] = datasets.ClassLabel( names=list(SP_LABELS.values()), ) features = datasets.Features(features_dict) elif self.config.name in [BSOARXIV, BSOWIKI, BSOROCSTORY]: features_dict = { TEXT_COLUMN_NAME[i]: datasets.Value('string') for i in range(BSO_TEXT_COLUMNS) } features_dict[LABEL_NAME] = datasets.ClassLabel( names=list(BSO_LABELS.values()) ) features = datasets.Features(features_dict) elif self.config.name in [DCCHAT, DCWIKI]: features_dict = { TEXT_COLUMN_NAME[i]: datasets.Value('string') for i in range(DC_TEXT_COLUMNS) } features_dict[LABEL_NAME] = datasets.ClassLabel( names=list(DC_LABELS.values()) ) features = datasets.Features(features_dict) elif self.config.name in [RST]: features_dict = { TEXT_COLUMN_NAME[i]: [datasets.Value('string')] for i in range(RST_TEXT_COLUMNS) } features_dict[LABEL_NAME] = datasets.ClassLabel( names=RST_LABELS ) features = datasets.Features(features_dict) elif self.config.name in [PDTB_E]: features_dict = { TEXT_COLUMN_NAME[i]: datasets.Value('string') for i in range(PDTB_E_TEXT_COLUMNS) } features_dict[LABEL_NAME] = datasets.ClassLabel( names=list(PDTB_E_LABELS.values()) ) features = datasets.Features(features_dict) elif self.config.name in [PDTB_I]: features_dict = { TEXT_COLUMN_NAME[i]: datasets.Value('string') for i in range(PDTB_I_TEXT_COLUMNS) } features_dict[LABEL_NAME] = datasets.ClassLabel( names=list(PDTB_I_LABELS.values()) ) features = datasets.Features(features_dict) elif self.config.name in [SSPABS]: features_dict = { TEXT_COLUMN_NAME[i]: datasets.Value('string') for i in range(SSPABS_TEXT_COLUMNS) } features_dict[LABEL_NAME] = datasets.ClassLabel( names=list(SSPABS_LABELS.values()) ) features = datasets.Features(features_dict) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): if self.config.name in [SPARXIV, SPROCSTORY, SPWIKI]: data_dir = SP_DATA_DIR + "/" + SP_DIRS[self.config.name] train_name = SP_TRAIN_NAME valid_name = SP_VALID_NAME test_name = SP_TEST_NAME elif self.config.name in [BSOARXIV, BSOWIKI, BSOROCSTORY]: data_dir = BSO_DATA_DIR + "/" + BSO_DIRS[self.config.name] train_name = BSO_TRAIN_NAME valid_name = BSO_VALID_NAME test_name = BSO_TEST_NAME elif self.config.name in [DCCHAT, DCWIKI]: data_dir = DC_DATA_DIR + "/" + DC_DIRS[self.config.name] train_name = DC_TRAIN_NAME valid_name = DC_VALID_NAME test_name = DC_TEST_NAME elif self.config.name in [RST]: data_dir = RST_DATA_DIR train_name = RST_TRAIN_NAME valid_name = RST_VALID_NAME test_name = RST_TEST_NAME elif self.config.name in [PDTB_E, PDTB_I]: data_dir = os.path.join(PDTB_DATA_DIR, PDTB_DIRS[self.config.name]) train_name = PDTB_TRAIN_NAME valid_name = PDTB_VALID_NAME test_name = PDTB_TEST_NAME elif self.config.name in [SSPABS]: data_dir = SSPABS_DATA_DIR train_name = SSPABS_TRAIN_NAME valid_name = SSPABS_VALID_NAME test_name = SSPABS_TEST_NAME urls_to_download = { "train": data_dir + "/" + train_name, "valid": data_dir + "/" + valid_name, "test": data_dir + "/" + test_name, } logger = logging.getLogger(__name__) data_dirs = dl_manager.download_and_extract(urls_to_download) logger.info(f"Data directories: {data_dirs}") downloaded_files = dl_manager.download_and_extract(data_dirs) logger.info(f"Downloading Completed") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": downloaded_files['train'], "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": downloaded_files['valid'], "split": "dev", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": downloaded_files['test'], "split": "test" }, ), ] def _generate_examples(self, filepath, split): logger = logging.getLogger(__name__) logger.info(f"Current working dir: {os.getcwd()}") logger.info("generating examples from = %s", filepath) if self.config.name == RST: data = pickle.load(open(filepath, "rb")) for key, line in enumerate(data): example = {TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])} example[LABEL_NAME] = line[0] yield key, example else: with io.open(filepath, mode='r', encoding='utf-8') as f: for key, line in enumerate(f): line = line.strip().split("\t") example = {TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])} if self.config.name == PDTB_E: example[LABEL_NAME] = PDTB_E_LABELS[line[0]] if self.config.name == PDTB_I: example[LABEL_NAME] = PDTB_I_LABELS[line[0]] elif self.config.name in (DCCHAT, DCWIKI): example[LABEL_NAME] = DC_LABELS[line[0]] elif self.config.name == SSPABS: example[LABEL_NAME] = SSPABS_LABELS[line[0]] elif self.config.name in (SPWIKI, SPROCSTORY, SPARXIV): example[LABEL_NAME] = SP_LABELS[line[0]] elif self.config.name in (BSOARXIV, BSOWIKI, BSOROCSTORY): example[LABEL_NAME] = BSO_LABELS[line[0]] yield key, example