Datasets:
Upload 3 files
Browse files- Biosses.py +128 -0
- README.md +170 -1
- test_dataset.py +17 -0
Biosses.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
|
3 |
+
"""BIOSSES: a semantic sentence similarity estimation system for the biomedical domain"""
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
|
10 |
+
logger = datasets.logging.get_logger(__name__)
|
11 |
+
|
12 |
+
_CITATION = """
|
13 |
+
@article{10.1093/bioinformatics/btx238,
|
14 |
+
author = {Soğancıoğlu, Gizem and Öztürk, Hakime and Özgür, Arzucan},
|
15 |
+
title = "{BIOSSES: a semantic sentence similarity estimation system for the biomedical domain}",
|
16 |
+
journal = {Bioinformatics},
|
17 |
+
volume = {33},
|
18 |
+
number = {14},
|
19 |
+
pages = {i49-i58},
|
20 |
+
year = {2017},
|
21 |
+
month = {07},
|
22 |
+
abstract = "{The amount of information available in textual format is rapidly increasing in the biomedical domain. Therefore, natural language processing (NLP) applications are becoming increasingly important to facilitate the retrieval and analysis of these data. Computing the semantic similarity between sentences is an important component in many NLP tasks including text retrieval and summarization. A number of approaches have been proposed for semantic sentence similarity estimation for generic English. However, our experiments showed that such approaches do not effectively cover biomedical knowledge and produce poor results for biomedical text.We propose several approaches for sentence-level semantic similarity computation in the biomedical domain, including string similarity measures and measures based on the distributed vector representations of sentences learned in an unsupervised manner from a large biomedical corpus. In addition, ontology-based approaches are presented that utilize general and domain-specific ontologies. Finally, a supervised regression based model is developed that effectively combines the different similarity computation metrics. A benchmark data set consisting of 100 sentence pairs from the biomedical literature is manually annotated by five human experts and used for evaluating the proposed methods.The experiments showed that the supervised semantic sentence similarity computation approach obtained the best performance (0.836 correlation with gold standard human annotations) and improved over the state-of-the-art domain-independent systems up to 42.6\\% in terms of the Pearson correlation metric.A web-based system for biomedical semantic sentence similarity computation, the source code, and the annotated benchmark data set are available at: http://tabilab.cmpe.boun.edu.tr/BIOSSES/.}",
|
23 |
+
issn = {1367-4803},
|
24 |
+
doi = {10.1093/bioinformatics/btx238},
|
25 |
+
url = {https://doi.org/10.1093/bioinformatics/btx238},
|
26 |
+
eprint = {https://academic.oup.com/bioinformatics/article-pdf/33/14/i49/25157316/btx238.pdf},
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_LICENSE = """
|
31 |
+
GNU General Public License v3.0
|
32 |
+
"""
|
33 |
+
|
34 |
+
_DESCRIPTION = """
|
35 |
+
BIOSSES is a benchmark dataset for biomedical sentence similarity estimation.
|
36 |
+
The dataset comprises 100 sentence pairs, in which each sentence was selected
|
37 |
+
from the TAC (Text Analysis Conference) Biomedical Summarization Track Training
|
38 |
+
Dataset containing articles from the biomedical domain. The sentence pairs in
|
39 |
+
BIOSSES were selected from citing sentences, i.e. sentences that have a citation
|
40 |
+
to a reference article.
|
41 |
+
|
42 |
+
The sentence pairs were evaluated by five different human experts that judged
|
43 |
+
their similarity and gave scores ranging from 0 (no relation) to 4 (equivalent).
|
44 |
+
In the original paper the mean of the scores assigned by the five human annotators
|
45 |
+
was taken as the gold standard. The Pearson correlation between the gold standard
|
46 |
+
scores and the scores estimated by the models was used as the evaluation metric.
|
47 |
+
The strength of correlation can be assessed by the general guideline proposed by
|
48 |
+
Evans (1996) as follows:
|
49 |
+
|
50 |
+
very strong: 0.80–1.00
|
51 |
+
strong: 0.60–0.79
|
52 |
+
moderate: 0.40–0.59
|
53 |
+
weak: 0.20–0.39
|
54 |
+
very weak: 0.00–0.19
|
55 |
+
"""
|
56 |
+
|
57 |
+
_HOMEPAGE = "https://tabilab.cmpe.boun.edu.tr/BIOSSES/"
|
58 |
+
|
59 |
+
_URL = "https://github.com/ncbi-nlp/BLUE_Benchmark/releases/download/0.1/data_v0.2.zip"
|
60 |
+
|
61 |
+
class Biosses(datasets.GeneratorBasedBuilder):
|
62 |
+
"""BIOSSES: a semantic sentence similarity estimation system for the biomedical domain"""
|
63 |
+
|
64 |
+
BUILDER_CONFIGS = [
|
65 |
+
datasets.BuilderConfig(
|
66 |
+
name = "default",
|
67 |
+
version = datasets.Version("0.2.0"),
|
68 |
+
description = f"The Biosses corpora",
|
69 |
+
)
|
70 |
+
]
|
71 |
+
|
72 |
+
DEFAULT_CONFIG_NAME = "default"
|
73 |
+
|
74 |
+
def _info(self):
|
75 |
+
return datasets.DatasetInfo(
|
76 |
+
description=_DESCRIPTION,
|
77 |
+
features=datasets.Features(
|
78 |
+
{
|
79 |
+
"id": datasets.Value("string"),
|
80 |
+
"sentence1": datasets.Value("string"),
|
81 |
+
"sentence2": datasets.Value("string"),
|
82 |
+
"score": datasets.Value("float"),
|
83 |
+
},
|
84 |
+
),
|
85 |
+
supervised_keys=None,
|
86 |
+
homepage=_HOMEPAGE,
|
87 |
+
citation=_CITATION,
|
88 |
+
license=_LICENSE,
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
+
def _split_generators(self, dl_manager):
|
93 |
+
"""Returns SplitGenerators."""
|
94 |
+
|
95 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
96 |
+
|
97 |
+
return [
|
98 |
+
datasets.SplitGenerator(
|
99 |
+
name=datasets.Split.TRAIN,
|
100 |
+
gen_kwargs={
|
101 |
+
"corpus_path": Path(data_dir) / "data/BIOSSES/train.tsv",
|
102 |
+
},
|
103 |
+
),
|
104 |
+
datasets.SplitGenerator(
|
105 |
+
name=datasets.Split.VALIDATION,
|
106 |
+
gen_kwargs={
|
107 |
+
"corpus_path": Path(data_dir) / "data/BIOSSES/dev.tsv",
|
108 |
+
},
|
109 |
+
),
|
110 |
+
datasets.SplitGenerator(
|
111 |
+
name=datasets.Split.TEST,
|
112 |
+
gen_kwargs={
|
113 |
+
"corpus_path": Path(data_dir) / "data/BIOSSES/test.tsv",
|
114 |
+
},
|
115 |
+
),
|
116 |
+
]
|
117 |
+
def _generate_examples(self, corpus_path: Path):
|
118 |
+
|
119 |
+
df = pd.read_csv(corpus_path, sep="\t")
|
120 |
+
|
121 |
+
for index, r in df.iterrows():
|
122 |
+
|
123 |
+
yield int(r['index']), {
|
124 |
+
"id": int(r['index']),
|
125 |
+
"sentence1": r['sentence1'],
|
126 |
+
"sentence2": r['sentence2'],
|
127 |
+
"score": float(r['score']),
|
128 |
+
}
|
README.md
CHANGED
@@ -1,3 +1,172 @@
|
|
1 |
---
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
annotations_creators:
|
3 |
+
- expert-generated
|
4 |
+
language_creators:
|
5 |
+
- found
|
6 |
+
language:
|
7 |
+
- en
|
8 |
+
license:
|
9 |
+
- gpl-3.0
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
size_categories:
|
13 |
+
- n<1K
|
14 |
+
source_datasets:
|
15 |
+
- original
|
16 |
+
task_categories:
|
17 |
+
- text-classification
|
18 |
+
task_ids:
|
19 |
+
- text-scoring
|
20 |
+
- semantic-similarity-scoring
|
21 |
+
paperswithcode_id: biosses
|
22 |
+
pretty_name: BIOSSES
|
23 |
+
dataset_info:
|
24 |
+
features:
|
25 |
+
- name: sentence1
|
26 |
+
dtype: string
|
27 |
+
- name: sentence2
|
28 |
+
dtype: string
|
29 |
+
- name: score
|
30 |
+
dtype: float32
|
31 |
+
splits:
|
32 |
+
- name: train
|
33 |
+
num_bytes: 32783
|
34 |
+
num_examples: 100
|
35 |
+
download_size: 36324
|
36 |
+
dataset_size: 32783
|
37 |
---
|
38 |
+
|
39 |
+
# Dataset Card for BIOSSES
|
40 |
+
|
41 |
+
## Table of Contents
|
42 |
+
- [Dataset Description](#dataset-description)
|
43 |
+
- [Dataset Summary](#dataset-summary)
|
44 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
45 |
+
- [Languages](#languages)
|
46 |
+
- [Dataset Structure](#dataset-structure)
|
47 |
+
- [Data Instances](#data-instances)
|
48 |
+
- [Data Fields](#data-fields)
|
49 |
+
- [Data Splits](#data-splits)
|
50 |
+
- [Dataset Creation](#dataset-creation)
|
51 |
+
- [Curation Rationale](#curation-rationale)
|
52 |
+
- [Source Data](#source-data)
|
53 |
+
- [Annotations](#annotations)
|
54 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
55 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
56 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
57 |
+
- [Discussion of Biases](#discussion-of-biases)
|
58 |
+
- [Other Known Limitations](#other-known-limitations)
|
59 |
+
- [Additional Information](#additional-information)
|
60 |
+
- [Dataset Curators](#dataset-curators)
|
61 |
+
- [Licensing Information](#licensing-information)
|
62 |
+
- [Citation Information](#citation-information)
|
63 |
+
- [Contributions](#contributions)
|
64 |
+
|
65 |
+
## Dataset Description
|
66 |
+
|
67 |
+
- **Homepage:** https://tabilab.cmpe.boun.edu.tr/BIOSSES/DataSet.html
|
68 |
+
- **Repository:** https://github.com/gizemsogancioglu/biosses
|
69 |
+
- **Paper:** [BIOSSES: a semantic sentence similarity estimation system for the biomedical domain](https://academic.oup.com/bioinformatics/article/33/14/i49/3953954)
|
70 |
+
- **Point of Contact:** [Gizem Soğancıoğlu](gizemsogancioglu@gmail.com) and [Arzucan Özgür](gizemsogancioglu@gmail.com)
|
71 |
+
|
72 |
+
### Dataset Summary
|
73 |
+
|
74 |
+
BIOSSES is a benchmark dataset for biomedical sentence similarity estimation. The dataset comprises 100 sentence pairs, in which each sentence was selected from the [TAC (Text Analysis Conference) Biomedical Summarization Track Training Dataset](https://tac.nist.gov/2014/BiomedSumm/) containing articles from the biomedical domain. The sentence pairs in BIOSSES were selected from citing sentences, i.e. sentences that have a citation to a reference article.
|
75 |
+
|
76 |
+
The sentence pairs were evaluated by five different human experts that judged their similarity and gave scores ranging from 0 (no relation) to 4 (equivalent). In the original paper the mean of the scores assigned by the five human annotators was taken as the gold standard. The Pearson correlation between the gold standard scores and the scores estimated by the models was used as the evaluation metric. The strength of correlation can be assessed by the general guideline proposed by Evans (1996) as follows:
|
77 |
+
|
78 |
+
- very strong: 0.80–1.00
|
79 |
+
- strong: 0.60–0.79
|
80 |
+
- moderate: 0.40–0.59
|
81 |
+
- weak: 0.20–0.39
|
82 |
+
- very weak: 0.00–0.19
|
83 |
+
|
84 |
+
### Supported Tasks and Leaderboards
|
85 |
+
|
86 |
+
Biomedical Semantic Similarity Scoring.
|
87 |
+
|
88 |
+
### Languages
|
89 |
+
|
90 |
+
English.
|
91 |
+
|
92 |
+
## Dataset Structure
|
93 |
+
|
94 |
+
### Data Instances
|
95 |
+
|
96 |
+
For each instance, there are two sentences (i.e. sentence 1 and 2), and its corresponding similarity score (the mean of the scores assigned by the five human annotators).
|
97 |
+
|
98 |
+
```json
|
99 |
+
{
|
100 |
+
"id": "0",
|
101 |
+
"sentence1": "Centrosomes increase both in size and in microtubule-nucleating capacity just before mitotic entry.",
|
102 |
+
"sentence2": "Functional studies showed that, when introduced into cell lines, miR-146a was found to promote cell proliferation in cervical cancer cells, which suggests that miR-146a works as an oncogenic miRNA in these cancers.",
|
103 |
+
"score": 0.0
|
104 |
+
}
|
105 |
+
```
|
106 |
+
|
107 |
+
### Data Fields
|
108 |
+
|
109 |
+
- `sentence 1`: string
|
110 |
+
- `sentence 2`: string
|
111 |
+
- `score`: float ranging from 0 (no relation) to 4 (equivalent)
|
112 |
+
|
113 |
+
## Dataset Creation
|
114 |
+
|
115 |
+
### Curation Rationale
|
116 |
+
|
117 |
+
### Source Data
|
118 |
+
|
119 |
+
The [TAC (Text Analysis Conference) Biomedical Summarization Track Training Dataset](https://tac.nist.gov/2014/BiomedSumm/).
|
120 |
+
|
121 |
+
### Annotations
|
122 |
+
|
123 |
+
#### Annotation process
|
124 |
+
|
125 |
+
The sentence pairs were evaluated by five different human experts that judged their similarity and gave scores ranging from 0 (no relation) to 4 (equivalent). The score range was described based on the guidelines of SemEval 2012 Task 6 on STS (Agirre et al., 2012). Besides the annotation instructions, example sentences from the biomedical literature were provided to the annotators for each of the similarity degrees.
|
126 |
+
|
127 |
+
The table below shows the Pearson correlation of the scores of each annotator with respect to the average scores of the remaining four annotators. It is observed that there is strong association among the scores of the annotators. The lowest correlations are 0.902, which can be considered as an upper bound for an algorithmic measure evaluated on this dataset.
|
128 |
+
|
129 |
+
| |Correlation r |
|
130 |
+
|----------:|--------------:|
|
131 |
+
|Annotator A| 0.952|
|
132 |
+
|Annotator B| 0.958|
|
133 |
+
|Annotator C| 0.917|
|
134 |
+
|Annotator D| 0.902|
|
135 |
+
|Annotator E| 0.941|
|
136 |
+
|
137 |
+
## Additional Information
|
138 |
+
|
139 |
+
### Dataset Curators
|
140 |
+
|
141 |
+
- Gizem Soğancıoğlu, gizemsogancioglu@gmail.com
|
142 |
+
- Hakime Öztürk, hakime.ozturk@boun.edu.tr
|
143 |
+
- Arzucan Özgür, gizemsogancioglu@gmail.com
|
144 |
+
Bogazici University, Istanbul, Turkey
|
145 |
+
|
146 |
+
### Licensing Information
|
147 |
+
|
148 |
+
BIOSSES is made available under the terms of [The GNU Common Public License v.3.0](https://www.gnu.org/licenses/gpl-3.0.en.html).
|
149 |
+
|
150 |
+
### Citation Information
|
151 |
+
|
152 |
+
```bibtex
|
153 |
+
@article{10.1093/bioinformatics/btx238,
|
154 |
+
author = {Soğancıoğlu, Gizem and Öztürk, Hakime and Özgür, Arzucan},
|
155 |
+
title = "{BIOSSES: a semantic sentence similarity estimation system for the biomedical domain}",
|
156 |
+
journal = {Bioinformatics},
|
157 |
+
volume = {33},
|
158 |
+
number = {14},
|
159 |
+
pages = {i49-i58},
|
160 |
+
year = {2017},
|
161 |
+
month = {07},
|
162 |
+
abstract = "{The amount of information available in textual format is rapidly increasing in the biomedical domain. Therefore, natural language processing (NLP) applications are becoming increasingly important to facilitate the retrieval and analysis of these data. Computing the semantic similarity between sentences is an important component in many NLP tasks including text retrieval and summarization. A number of approaches have been proposed for semantic sentence similarity estimation for generic English. However, our experiments showed that such approaches do not effectively cover biomedical knowledge and produce poor results for biomedical text.We propose several approaches for sentence-level semantic similarity computation in the biomedical domain, including string similarity measures and measures based on the distributed vector representations of sentences learned in an unsupervised manner from a large biomedical corpus. In addition, ontology-based approaches are presented that utilize general and domain-specific ontologies. Finally, a supervised regression based model is developed that effectively combines the different similarity computation metrics. A benchmark data set consisting of 100 sentence pairs from the biomedical literature is manually annotated by five human experts and used for evaluating the proposed methods.The experiments showed that the supervised semantic sentence similarity computation approach obtained the best performance (0.836 correlation with gold standard human annotations) and improved over the state-of-the-art domain-independent systems up to 42.6\\% in terms of the Pearson correlation metric.A web-based system for biomedical semantic sentence similarity computation, the source code, and the annotated benchmark data set are available at: http://tabilab.cmpe.boun.edu.tr/BIOSSES/.}",
|
163 |
+
issn = {1367-4803},
|
164 |
+
doi = {10.1093/bioinformatics/btx238},
|
165 |
+
url = {https://doi.org/10.1093/bioinformatics/btx238},
|
166 |
+
eprint = {https://academic.oup.com/bioinformatics/article-pdf/33/14/i49/25157316/btx238.pdf},
|
167 |
+
}
|
168 |
+
```
|
169 |
+
|
170 |
+
### Contributions
|
171 |
+
|
172 |
+
Thanks to [@qanastek](https://github.com/qanastek) for adding this dataset.
|
test_dataset.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import Counter
|
2 |
+
|
3 |
+
from datasets import load_dataset, set_caching_enabled
|
4 |
+
|
5 |
+
# If you need to force clear the cache
|
6 |
+
# set_caching_enabled(False)
|
7 |
+
|
8 |
+
source = "Biosses.py"
|
9 |
+
# source = "qanastek/Biosses"
|
10 |
+
|
11 |
+
dataset = load_dataset(source)
|
12 |
+
# dataset = load_dataset(source, "default")
|
13 |
+
|
14 |
+
print(dataset)
|
15 |
+
|
16 |
+
f = dataset["validation"][0]
|
17 |
+
print(f)
|