OfekGlick commited on
Commit
61d6bb3
1 Parent(s): ae30e34

Uploaded all data files and script builder

Browse files
Files changed (49) hide show
  1. .gitattributes +1 -1
  2. DiscoEval.py +257 -0
  3. README.md +177 -0
  4. constants.py +138 -0
  5. data/BSO/arxiv/test.txt +0 -0
  6. data/BSO/arxiv/train.txt +0 -0
  7. data/BSO/arxiv/valid.txt +0 -0
  8. data/BSO/rocstory/test.txt +0 -0
  9. data/BSO/rocstory/train.txt +0 -0
  10. data/BSO/rocstory/valid.txt +0 -0
  11. data/BSO/wiki/test.txt +0 -0
  12. data/BSO/wiki/train.txt +0 -0
  13. data/BSO/wiki/valid.txt +0 -0
  14. data/DC/chat/test.txt +0 -0
  15. data/DC/chat/train.txt +0 -0
  16. data/DC/chat/valid.txt +0 -0
  17. data/DC/wiki/test.txt +0 -0
  18. data/DC/wiki/train.txt +0 -0
  19. data/DC/wiki/valid.txt +0 -0
  20. data/PDTB/Explicit/labelset.txt +12 -0
  21. data/PDTB/Explicit/test.txt +0 -0
  22. data/PDTB/Explicit/train.txt +0 -0
  23. data/PDTB/Explicit/valid.txt +0 -0
  24. data/PDTB/Implicit/labelset.txt +11 -0
  25. data/PDTB/Implicit/test.txt +0 -0
  26. data/PDTB/Implicit/train.txt +0 -0
  27. data/PDTB/Implicit/valid.txt +0 -0
  28. data/RST/RST_DEV.pkl +3 -0
  29. data/RST/RST_TEST.pkl +3 -0
  30. data/RST/RST_TRAIN.pkl +3 -0
  31. data/RST/cmds.txt +1 -0
  32. data/RST/py2/RST_DEV.pkl +3 -0
  33. data/RST/py2/RST_TEST.pkl +3 -0
  34. data/RST/py2/RST_TRAIN.pkl +3 -0
  35. data/RST/py3/RST_DEV.pkl +3 -0
  36. data/RST/py3/RST_TEST.pkl +3 -0
  37. data/RST/py3/RST_TRAIN.pkl +3 -0
  38. data/SP/arxiv/test.txt +0 -0
  39. data/SP/arxiv/train.txt +0 -0
  40. data/SP/arxiv/valid.txt +0 -0
  41. data/SP/rocstory/test.txt +0 -0
  42. data/SP/rocstory/train.txt +0 -0
  43. data/SP/rocstory/valid.txt +0 -0
  44. data/SP/wiki/test.txt +0 -0
  45. data/SP/wiki/train.txt +0 -0
  46. data/SP/wiki/valid.txt +0 -0
  47. data/SSP/abs/test.txt +0 -0
  48. data/SSP/abs/train.txt +0 -0
  49. data/SSP/abs/valid.txt +0 -0
.gitattributes CHANGED
@@ -1,4 +1,4 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
1
+ .7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
DiscoEval.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import io
17
+ import datasets
18
+ import constants
19
+ import pickle
20
+ import logging
21
+
22
+ _CITATION = """\
23
+ @InProceedings{mchen-discoeval-19,
24
+ title = {Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations},
25
+ author = {Mingda Chen and Zewei Chu and Kevin Gimpel},
26
+ booktitle = {Proc. of {EMNLP}},
27
+ year={2019}
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ This dataset contains all tasks of the DiscoEval benchmark for sentence representation learning.
33
+ """
34
+
35
+ _HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
36
+
37
+
38
+ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
39
+ """DiscoEval Benchmark"""
40
+ VERSION = datasets.Version("1.1.0")
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name=constants.SPARXIV,
44
+ version=VERSION,
45
+ description="Sentence positioning dataset from arXiv",
46
+ ),
47
+ datasets.BuilderConfig(
48
+ name=constants.SPROCSTORY,
49
+ version=VERSION,
50
+ description="Sentence positioning dataset from ROCStory",
51
+ ),
52
+ datasets.BuilderConfig(
53
+ name=constants.SPWIKI,
54
+ version=VERSION,
55
+ description="Sentence positioning dataset from Wikipedia",
56
+ ),
57
+ datasets.BuilderConfig(
58
+ name=constants.DCCHAT,
59
+ version=VERSION,
60
+ description="Discourse Coherence dataset from chat",
61
+ ),
62
+ datasets.BuilderConfig(
63
+ name=constants.DCWIKI,
64
+ version=VERSION,
65
+ description="Discourse Coherence dataset from Wikipedia",
66
+ ),
67
+ datasets.BuilderConfig(
68
+ name=constants.RST,
69
+ version=VERSION,
70
+ description="The RST Discourse Treebank dataset ",
71
+ ),
72
+ datasets.BuilderConfig(
73
+ name=constants.PDTB_E,
74
+ version=VERSION,
75
+ description="The Penn Discourse Treebank - Explicit dataset.",
76
+ ),
77
+ datasets.BuilderConfig(
78
+ name=constants.PDTB_I,
79
+ version=VERSION,
80
+ description="The Penn Discourse Treebank - Implicit dataset.",
81
+ ),
82
+ datasets.BuilderConfig(
83
+ name=constants.SSPABS,
84
+ version=VERSION,
85
+ description="The SSP dataset.",
86
+ ),
87
+ datasets.BuilderConfig(
88
+ name=constants.BSOARXIV,
89
+ version=VERSION,
90
+ description="The BSO Task with the arxiv dataset.",
91
+ ),
92
+ datasets.BuilderConfig(
93
+ name=constants.BSOWIKI,
94
+ version=VERSION,
95
+ description="The BSO Task with the wiki dataset.",
96
+ ),
97
+ datasets.BuilderConfig(
98
+ name=constants.BSOROCSTORY,
99
+ version=VERSION,
100
+ description="The BSO Task with the rocstory dataset.",
101
+ ),
102
+ ]
103
+
104
+ def _info(self):
105
+ if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
106
+ features_dict = {
107
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
108
+ for i in range(constants.SP_TEXT_COLUMNS)
109
+ }
110
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SP_LABELS)
111
+ features = datasets.Features(features_dict)
112
+
113
+ elif self.config.name in [constants.BSOARXIV, constants.BSOWIKI, constants.BSOROCSTORY]:
114
+ features_dict = {
115
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
116
+ for i in range(constants.BSO_TEXT_COLUMNS)
117
+ }
118
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.BSO_LABELS)
119
+ features = datasets.Features(features_dict)
120
+
121
+ elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
122
+ features_dict = {
123
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
124
+ for i in range(constants.DC_TEXT_COLUMNS)
125
+ }
126
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.DC_LABELS)
127
+ features = datasets.Features(features_dict)
128
+
129
+ elif self.config.name in [constants.RST]:
130
+ features_dict = {
131
+ constants.TEXT_COLUMN_NAME[i]: [datasets.Value('string')]
132
+ for i in range(constants.RST_TEXT_COLUMNS)
133
+ }
134
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.RST_LABELS)
135
+ features = datasets.Features(features_dict)
136
+
137
+ elif self.config.name in [constants.PDTB_E]:
138
+ features_dict = {
139
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
140
+ for i in range(constants.PDTB_E_TEXT_COLUMNS)
141
+ }
142
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.PDTB_E_LABELS)
143
+ features = datasets.Features(features_dict)
144
+
145
+ elif self.config.name in [constants.PDTB_I]:
146
+ features_dict = {
147
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
148
+ for i in range(constants.PDTB_I_TEXT_COLUMNS)
149
+ }
150
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.PDTB_I_LABELS)
151
+ features = datasets.Features(features_dict)
152
+
153
+ elif self.config.name in [constants.SSPABS]:
154
+ features_dict = {
155
+ constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
156
+ for i in range(constants.SSPABS_TEXT_COLUMNS)
157
+ }
158
+ features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SSPABS_LABELS)
159
+ features = datasets.Features(features_dict)
160
+
161
+ return datasets.DatasetInfo(
162
+ description=_DESCRIPTION,
163
+ features=features,
164
+ homepage=_HOMEPAGE,
165
+ citation=_CITATION,
166
+ )
167
+
168
+ def _split_generators(self, dl_manager):
169
+ if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
170
+ data_dir = constants.SP_DATA_DIR + "/" + constants.SP_DIRS[self.config.name]
171
+ train_name = constants.SP_TRAIN_NAME
172
+ valid_name = constants.SP_VALID_NAME
173
+ test_name = constants.SP_TEST_NAME
174
+
175
+ elif self.config.name in [constants.BSOARXIV, constants.BSOWIKI, constants.BSOROCSTORY]:
176
+ data_dir = constants.BSO_DATA_DIR + "/" + constants.BSO_DIRS[self.config.name]
177
+ train_name = constants.BSO_TRAIN_NAME
178
+ valid_name = constants.BSO_VALID_NAME
179
+ test_name = constants.BSO_TEST_NAME
180
+
181
+ elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
182
+ data_dir = constants.DC_DATA_DIR + "/" + constants.DC_DIRS[self.config.name]
183
+ train_name = constants.DC_TRAIN_NAME
184
+ valid_name = constants.DC_VALID_NAME
185
+ test_name = constants.DC_TEST_NAME
186
+
187
+ elif self.config.name in [constants.RST]:
188
+ data_dir = constants.RST_DATA_DIR
189
+ train_name = constants.RST_TRAIN_NAME
190
+ valid_name = constants.RST_VALID_NAME
191
+ test_name = constants.RST_TEST_NAME
192
+
193
+ elif self.config.name in [constants.PDTB_E, constants.PDTB_I]:
194
+ data_dir = os.path.join(constants.PDTB_DATA_DIR, constants.PDTB_DIRS[self.config.name])
195
+ train_name = constants.PDTB_TRAIN_NAME
196
+ valid_name = constants.PDTB_VALID_NAME
197
+ test_name = constants.PDTB_TEST_NAME
198
+
199
+ elif self.config.name in [constants.SSPABS]:
200
+ data_dir = constants.SSPABS_DATA_DIR
201
+ train_name = constants.SSPABS_TRAIN_NAME
202
+ valid_name = constants.SSPABS_VALID_NAME
203
+ test_name = constants.SSPABS_TEST_NAME
204
+
205
+ urls_to_download = {
206
+ "train": data_dir + "/" + train_name,
207
+ "valid": data_dir + "/" + valid_name,
208
+ "test": data_dir + "/" + test_name,
209
+ }
210
+ logger = logging.getLogger(__name__)
211
+ data_dirs = dl_manager.download_and_extract(urls_to_download)
212
+ logger.info(f"Data directories: {data_dirs}")
213
+ downloaded_files = dl_manager.download_and_extract(data_dirs)
214
+ logger.info(f"Downloading Completed")
215
+
216
+ return [
217
+ datasets.SplitGenerator(
218
+ name=datasets.Split.TRAIN,
219
+ gen_kwargs={
220
+ "filepath": downloaded_files['train'],
221
+ "split": "train",
222
+ },
223
+ ),
224
+ datasets.SplitGenerator(
225
+ name=datasets.Split.VALIDATION,
226
+ gen_kwargs={
227
+ "filepath": downloaded_files['valid'],
228
+ "split": "dev",
229
+ },
230
+ ),
231
+ datasets.SplitGenerator(
232
+ name=datasets.Split.TEST,
233
+ gen_kwargs={
234
+ "filepath": downloaded_files['test'],
235
+ "split": "test"
236
+ },
237
+ ),
238
+ ]
239
+
240
+ def _generate_examples(self, filepath, split):
241
+ logger = logging.getLogger(__name__)
242
+ logger.info(f"Current working dir: {os.getcwd()}")
243
+ logger.info("generating examples from = %s", filepath)
244
+ if self.config.name == constants.RST:
245
+ data = pickle.load(open(filepath, "rb"))
246
+ for key, line in enumerate(data):
247
+ example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
248
+ example[constants.LABEL_NAME] = line[0]
249
+ yield key, example
250
+
251
+ else:
252
+ with io.open(filepath, mode='r', encoding='utf-8') as f:
253
+ for key, line in enumerate(f):
254
+ line = line.strip().split("\t")
255
+ example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
256
+ example[constants.LABEL_NAME] = line[0]
257
+ yield key, example
README.md ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: bsd
3
+ task_categories:
4
+ - text-classification
5
+ language:
6
+ - en
7
+ tags:
8
+ - Discourse
9
+ - Discourse Evaluation
10
+ - NLP
11
+ pretty_name: DiscoEval
12
+ size_categories:
13
+ - 100K<n<1M
14
+ ---
15
+
16
+ # DiscoEval Benchmark Datasets
17
+
18
+ ## Table of Contents
19
+ - [Dataset Description](#dataset-description)
20
+ - [Dataset Summary](#dataset-summary)
21
+ - [Dataset Sources](#dataset-sources)
22
+ - [Supported Tasks](#supported-tasks)
23
+ - [Languages](#languages)
24
+ - [Dataset Structure](#dataset-structure)
25
+ - [Data Instances](#data-instances)
26
+ - [Data Fields](#data-fields)
27
+ - [Data Splits](#data-splits)
28
+ - [Additional Information](#additional-information)
29
+ - [Benchmark Creators](#benchmark-creators)
30
+ - [Citation Information](#citation-information)
31
+ - [Loading Data Examples](#loading-data-examples)
32
+ - [Loading Data for Sentence Positioning Task with the Arxiv data source](#loading-data-for-sentence-positioning-task-with-the-arxiv-data-source)
33
+
34
+ ## Dataset Description
35
+
36
+ - **Repository:** [DiscoEval repository](https://github.com/ZeweiChu/DiscoEval)
37
+ - **Paper:** [Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations](https://arxiv.org/pdf/1909.00142)
38
+
39
+ ### Dataset Summary
40
+
41
+ The DiscoEval is an English-language Benchmark that contains a test suite of 7
42
+ tasks to evaluate whether sentence representations include semantic information
43
+ relevant to discourse processing. The benchmark datasets offer a collection of
44
+ tasks designed to evaluate natural language understanding models in the context
45
+ of discourse analysis and coherence.
46
+
47
+ ### Dataset Sources
48
+
49
+ - **Arxiv**: A repository of scientific papers and research articles.
50
+ - **Wikipedia**: An extensive online encyclopedia with articles on diverse topics.
51
+ - **Rocstory**: A dataset consisting of fictional stories.
52
+ - **Ubuntu IRC channel**: Conversational data extracted from the Ubuntu Internet Relay Chat (IRC) channel.
53
+ - **PeerRead**: A dataset of scientific papers frequently used for discourse-related tasks.
54
+ - **RST Discourse Treebank**: A dataset annotated with Rhetorical Structure Theory (RST) discourse relations.
55
+ - **Penn Discourse Treebank**: Another dataset with annotated discourse relations, facilitating the study of discourse structure.
56
+
57
+
58
+ ### Supported Tasks
59
+
60
+ 1. **Sentence Positioning**
61
+ - **Datasets Sources**: Arxiv, Wikipedia, Rocstory
62
+ - **Description**: Determine the correct placement of a sentence within a given context of five sentences. To form the input when training classifiers encode the five sentences to vector representations \\(x_i\\). As input to the classfier we include \\(x_1\\) and the contcatination of \\(x_1 - x_i\\) for all \\(i\\): \\([x_1, x_1 - x_2, x_1-x_3,x_1-x_4,x_1-x_5]\\)
63
+
64
+ 2. **Binary Sentence Ordering**
65
+ - **Datasets Sources**: Arxiv, Wikipedia, Rocstory
66
+ - **Description**: Determining whether two sentences are in the correct consecutive order, identifying the more coherent structure. To form the input when training classifiers, we concatenate the embeddings of both sentences with their element-wise difference: \\([x_1, x_2, x_1-x_2]\\)
67
+
68
+ 3. **Discourse Coherence**
69
+ - **Datasets Sources**: Ubuntu IRC channel, Wikipedia
70
+ - **Description**: Determine whether a sequence of six sentences form a coherent paragraph. To form the input when training classifiers, encode all sentences to vector representations and concatenate all of them: \\([x_1, x_2, x_3, x_4, x_5, x_6]\\)
71
+
72
+ 4. **Sentence Section Prediction**
73
+ - **Datasets Sources**: Constructed from PeerRead
74
+ - **Description**: Determine the section or category to which a sentence belongs within a scientific paper, based on the content and context. To form the input when training classifiers, simply input the sentence embedding.
75
+
76
+ 5. **Discourse Relations**
77
+ - **Datasets Sources**: RST Discourse Treebank, Penn Discourse Treebank
78
+ - **Description**: Identify and classify discourse relations between sentences or text segments, helping to reveal the structure and flow of discourse. To form the input when training classifiers, refer to the [original paper](https://arxiv.org/pdf/1909.00142) for instructions
79
+
80
+
81
+ ### Languages
82
+
83
+ The text in all datasets is in English. The associated BCP-47 code is `en`.
84
+
85
+
86
+ ## Dataset Structure
87
+
88
+ ### Data Instances
89
+
90
+ All tasks are classification tasks, and they differ by the number of sentences per example and the type of label.
91
+
92
+ An example from the Sentence Positioning task would look as follows:
93
+ ```
94
+ {
95
+ 'sentence_1': 'Dan was overweight as well.',
96
+ 'sentence_2': 'Dan's parents were overweight.',
97
+ 'sentence_3': 'The doctors told his parents it was unhealthy.',
98
+ 'sentence_4': 'His parents understood and decided to make a change.',
99
+ 'sentence_5': 'They got themselves and Dan on a diet.'
100
+ 'label': '1'
101
+ }
102
+ ```
103
+ The label is '1' since the first sentence should go at position number 1 (counting from zero)
104
+
105
+ Another example from the Binary Sentence Ordering task would look as follows:
106
+ ```
107
+ {
108
+ 'sentence_1': 'When she walked in, she felt awkward.',
109
+ 'sentence_2': 'Janet decided to go to her high school's party.',
110
+ 'label': '0'
111
+ }
112
+ ```
113
+ The label is '0' because this is not the correct order of the sentences. It should be sentence_2 and then sentence_1.
114
+
115
+ For more examples, you can refer the [original paper]((https://arxiv.org/pdf/1909.00142).
116
+
117
+ ### Data Fields
118
+ In this benchmark, all data fields are string, including the labels.
119
+
120
+ ### Data Splits
121
+
122
+ The data is split into training, validation and test set for each of the tasks in the benchmark.
123
+
124
+ | Task and Dataset | Train | Valid | Test |
125
+ | ----- | ------ | ----- | ---- |
126
+ | Sentence Positioning: Arxiv| 10000 | 4000 | 4000|
127
+ | Sentence Positioning: Rocstory| 10000 | 4000 | 4000|
128
+ | Sentence Positioning: Wiki| 10000 | 4000 | 4000|
129
+ | Binary Sentence Ordering: Arxiv| 20000 | 8000 | 8000|
130
+ | Binary Sentence Ordering: Rocstory| 20000 | 8000 | 8000|
131
+ | Binary Sentence Ordering: Wiki| 20000 | 8000 | 8000|
132
+ | Discourse Coherence: Chat| 5816 | 1834 | 2418|
133
+ | Discourse Coherence: Wiki| 10000 | 4000 | 4000|
134
+ | Sentence Section Prediction | 10000 | 4000 | 4000 |
135
+ | Discourse Relation: Penn Discourse Tree Bank: Implicit | 8693 | 2972 | 3024 |
136
+ | Discourse Relation: Penn Discourse Tree Bank: Explicit | 9383 | 3613 | 3758 |
137
+ | Discourse Relation: RST Discourse Tree Bank | 17051 | 2045 | 2308 |
138
+
139
+ ## Additional Information
140
+
141
+ ### Benchmark Creators
142
+
143
+ This benchmark was created by Mingda Chen, Zewei Chu and Kevin Gimpel during work done at the University of Chicago and the Toyota Technologival Institute at Chicago.
144
+
145
+ ### Citation Information
146
+
147
+ ```
148
+ @inproceedings{mchen-discoeval-19,
149
+ title = {Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations},
150
+ author = {Mingda Chen and Zewei Chu and Kevin Gimpel},
151
+ booktitle = {Proc. of {EMNLP}},
152
+ year={2019}
153
+ }
154
+ ```
155
+
156
+ ## Loading Data Examples
157
+
158
+ ### Loading Data for Sentence Positioning Task with the Arxiv data source
159
+
160
+ ```python
161
+ from datasets import load_dataset
162
+
163
+ # Load the Sentence Positioning dataset
164
+ dataset = load_dataset(path="OfekGlick/DiscoEval", name="SParxiv")
165
+
166
+ # Access the train, validation, and test splits
167
+ train_data = dataset["train"]
168
+ validation_data = dataset["validation"]
169
+ test_data = dataset["test"]
170
+
171
+ # Example usage: Print the first few training examples
172
+ for example in train_data[:5]:
173
+ print(example)
174
+ ```
175
+
176
+ The other possible inputs for the `name` parameter are:
177
+ `SParxiv`, `SProcstory`, `SPwiki`, `SSPabs`, `PDTB-I`, `PDTB-E`, `BSOarxiv`, `BSOrocstory`, `BSOwiki`, `DCchat`, `DCwiki`, `RST`
constants.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # General Constants:
2
+ LABEL_NAME = 'label'
3
+ TEXT_COLUMN_NAME = [f"sentence_{i}" for i in range(1, 10)]
4
+
5
+ # SSPabs Constants:
6
+ SSPABS = 'SSPabs'
7
+ SSPABS_TRAIN_NAME = 'train.txt'
8
+ SSPABS_VALID_NAME = 'valid.txt'
9
+ SSPABS_TEST_NAME = 'test.txt'
10
+ SSPABS_DATA_DIR = 'data/SSP/abs'
11
+ SSPABS_LABELS = ["0", "1"]
12
+ SSPABS_TEXT_COLUMNS = 1
13
+
14
+ # PDTB Constants:
15
+ PDTB_I = 'PDTB-I'
16
+ PDTB_E = 'PDTB-E'
17
+ PDTB_TRAIN_NAME = 'train.txt'
18
+ PDTB_VALID_NAME = 'valid.txt'
19
+ PDTB_TEST_NAME = 'test.txt'
20
+ PDTB_DATA_DIR = 'data/PDTB'
21
+ PDTB_DIRS = {PDTB_E: 'Explicit', PDTB_I: 'Implicit'}
22
+ PDTB_E_LABELS = [
23
+ 'Comparison.Concession',
24
+ 'Comparison.Contrast',
25
+ 'Contingency.Cause',
26
+ 'Contingency.Condition',
27
+ 'Contingency.Pragmatic condition',
28
+ 'Expansion.Alternative',
29
+ 'Expansion.Conjunction',
30
+ 'Expansion.Instantiation',
31
+ 'Expansion.List',
32
+ 'Expansion.Restatement',
33
+ 'Temporal.Asynchronous',
34
+ 'Temporal.Synchrony',
35
+ ]
36
+ PDTB_I_LABELS = [
37
+ 'Comparison.Concession',
38
+ 'Comparison.Contrast',
39
+ 'Contingency.Cause',
40
+ 'Contingency.Pragmatic cause',
41
+ 'Expansion.Alternative',
42
+ 'Expansion.Conjunction',
43
+ 'Expansion.Instantiation',
44
+ 'Expansion.List',
45
+ 'Expansion.Restatement',
46
+ 'Temporal.Asynchronous',
47
+ 'Temporal.Synchrony',
48
+ ]
49
+ PDTB_E_TEXT_COLUMNS = 2
50
+ PDTB_I_TEXT_COLUMNS = 2
51
+
52
+
53
+ # SP Constants:
54
+ SPARXIV = 'SParxiv'
55
+ SPROCSTORY = 'SProcstory'
56
+ SPWIKI = 'SPwiki'
57
+ SP_TRAIN_NAME = 'train.txt'
58
+ SP_VALID_NAME = 'valid.txt'
59
+ SP_TEST_NAME = 'test.txt'
60
+ SP_DATA_DIR = 'data/SP'
61
+ SP_DIRS = {SPARXIV: 'arxiv', SPROCSTORY: 'rocstory', SPWIKI: 'wiki'}
62
+ SP_LABELS = ["0", "1", "2", "3", "4"]
63
+ SP_TEXT_COLUMNS = 5
64
+
65
+ # BSO Constants:
66
+ BSOARXIV = 'BSOarxiv'
67
+ BSOROCSTORY = 'BSOrocstory'
68
+ BSOWIKI = 'BSOwiki'
69
+ BSO_TRAIN_NAME = 'train.txt'
70
+ BSO_VALID_NAME = 'valid.txt'
71
+ BSO_TEST_NAME = 'test.txt'
72
+ BSO_DATA_DIR = 'data/BSO'
73
+ BSO_DIRS = {BSOARXIV: 'arxiv', BSOROCSTORY: 'rocstory', BSOWIKI: 'wiki'}
74
+ BSO_LABELS = ["0", "1"]
75
+ BSO_TEXT_COLUMNS = 2
76
+
77
+ # DC Constants:
78
+ DCCHAT = 'DCchat'
79
+ DCWIKI = 'DCwiki'
80
+ DC_TRAIN_NAME = 'train.txt'
81
+ DC_VALID_NAME = 'valid.txt'
82
+ DC_TEST_NAME = 'test.txt'
83
+ DC_DATA_DIR = 'data/DC'
84
+ DC_DIRS = {DCCHAT: 'chat', DCWIKI: 'wiki'}
85
+ DC_LABELS = ["0", "1"]
86
+ DC_TEXT_COLUMNS = 6
87
+
88
+
89
+ # RST Constants:
90
+ RST = 'RST'
91
+ RST_TRAIN_NAME = 'RST_TRAIN.pkl'
92
+ RST_VALID_NAME = 'RST_DEV.pkl'
93
+ RST_TEST_NAME = 'RST_TEST.pkl'
94
+ RST_DATA_DIR = 'data/RST'
95
+ RST_LABELS = [
96
+ 'NS-Explanation',
97
+ 'NS-Evaluation',
98
+ 'NN-Condition',
99
+ 'NS-Summary',
100
+ 'SN-Cause',
101
+ 'SN-Background',
102
+ 'NS-Background',
103
+ 'SN-Summary',
104
+ 'NS-Topic-Change',
105
+ 'NN-Explanation',
106
+ 'SN-Topic-Comment',
107
+ 'NS-Elaboration',
108
+ 'SN-Attribution',
109
+ 'SN-Manner-Means',
110
+ 'NN-Evaluation',
111
+ 'NS-Comparison',
112
+ 'NS-Contrast',
113
+ 'SN-Condition',
114
+ 'NS-Temporal',
115
+ 'NS-Enablement',
116
+ 'SN-Evaluation',
117
+ 'NN-Topic-Comment',
118
+ 'NN-Temporal',
119
+ 'NN-Textual-organization',
120
+ 'NN-Same-unit',
121
+ 'NN-Comparison',
122
+ 'NN-Topic-Change',
123
+ 'SN-Temporal',
124
+ 'NN-Joint',
125
+ 'SN-Enablement',
126
+ 'SN-Explanation',
127
+ 'NN-Contrast',
128
+ 'NN-Cause',
129
+ 'SN-Contrast',
130
+ 'NS-Attribution',
131
+ 'NS-Topic-Comment',
132
+ 'SN-Elaboration',
133
+ 'SN-Comparison',
134
+ 'NS-Cause',
135
+ 'NS-Condition',
136
+ 'NS-Manner-Means'
137
+ ]
138
+ RST_TEXT_COLUMNS = 2
data/BSO/arxiv/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/arxiv/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/arxiv/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/rocstory/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/rocstory/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/rocstory/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/wiki/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/wiki/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/BSO/wiki/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/chat/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/chat/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/chat/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/wiki/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/wiki/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/DC/wiki/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Explicit/labelset.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Comparison.Concession
2
+ Comparison.Contrast
3
+ Contingency.Cause
4
+ Contingency.Condition
5
+ Contingency.Pragmatic condition
6
+ Expansion.Alternative
7
+ Expansion.Conjunction
8
+ Expansion.Instantiation
9
+ Expansion.List
10
+ Expansion.Restatement
11
+ Temporal.Asynchronous
12
+ Temporal.Synchrony
data/PDTB/Explicit/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Explicit/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Explicit/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Implicit/labelset.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Comparison.Concession
2
+ Comparison.Contrast
3
+ Contingency.Cause
4
+ Contingency.Pragmatic cause
5
+ Expansion.Alternative
6
+ Expansion.Conjunction
7
+ Expansion.Instantiation
8
+ Expansion.List
9
+ Expansion.Restatement
10
+ Temporal.Asynchronous
11
+ Temporal.Synchrony
data/PDTB/Implicit/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Implicit/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/PDTB/Implicit/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/RST/RST_DEV.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccce6a70b54d5aa8c6b07718eb5d609216842b6b86109d84ecd1e5ca4a12931
3
+ size 288410
data/RST/RST_TEST.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:441da3fbb3a4a8fc9b84e07c7897decca76ea043c789ceb918955b9be7232d1f
3
+ size 324662
data/RST/RST_TRAIN.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da537c3259a007d3f7e3ef7c08953b522768b4fd2fc2b930c1856704f639f78
3
+ size 2467975
data/RST/cmds.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ cp /share/data/speech/mingda/data/for-zewei/SentEval/data/*.pkl .
data/RST/py2/RST_DEV.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccce6a70b54d5aa8c6b07718eb5d609216842b6b86109d84ecd1e5ca4a12931
3
+ size 288410
data/RST/py2/RST_TEST.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:441da3fbb3a4a8fc9b84e07c7897decca76ea043c789ceb918955b9be7232d1f
3
+ size 324662
data/RST/py2/RST_TRAIN.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da537c3259a007d3f7e3ef7c08953b522768b4fd2fc2b930c1856704f639f78
3
+ size 2467975
data/RST/py3/RST_DEV.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c77fc13ce8f31220182a93a97372e6c8c160869349af3c98d0f3c3f04b5f1dab
3
+ size 288410
data/RST/py3/RST_TEST.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c7ff5e45cde0a339598dfae57d93a7283159d66a0186cd4028987cb65649b5
3
+ size 324662
data/RST/py3/RST_TRAIN.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190971e116ee66a356e3fbe897a8c6f9e895920daf72d77cecea53e3722b133c
3
+ size 2467975
data/SP/arxiv/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/arxiv/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/arxiv/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/rocstory/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/rocstory/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/rocstory/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/wiki/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/wiki/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SP/wiki/valid.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SSP/abs/test.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SSP/abs/train.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/SSP/abs/valid.txt ADDED
The diff for this file is too large to render. See raw diff