Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
cb69156
·
1 Parent(s): 3b94d17

upload hubscripts/tmvar_v1_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. tmvar_v1.py +249 -0
tmvar_v1.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import itertools
18
+ import os
19
+ from pydoc import doc
20
+ from typing import Dict, Iterator, List, Tuple
21
+
22
+ import datasets
23
+
24
+ from .bigbiohub import kb_features
25
+ from .bigbiohub import BigBioConfig
26
+ from .bigbiohub import Tasks
27
+
28
+ _LANGUAGES = ['English']
29
+ _PUBMED = True
30
+ _LOCAL = False
31
+ _CITATION = """\
32
+ @article{wei2013tmvar,
33
+ title={tmVar: a text mining approach for extracting sequence variants in biomedical literature},
34
+ author={Wei, Chih-Hsuan and Harris, Bethany R and Kao, Hung-Yu and Lu, Zhiyong},
35
+ journal={Bioinformatics},
36
+ volume={29},
37
+ number={11},
38
+ pages={1433--1439},
39
+ year={2013},
40
+ publisher={Oxford University Press}
41
+ }
42
+ """
43
+
44
+ _DATASETNAME = "tmvar_v1"
45
+ _DISPLAYNAME = "tmVar v1"
46
+
47
+ _DESCRIPTION = """This dataset contains 500 PubMed articles manually annotated with mutation mentions of various kinds. It can be used for NER tasks only.
48
+ The dataset is split into train(334) and test(166) splits"""
49
+
50
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/research/bionlp/Tools/tmvar/"
51
+
52
+ _LICENSE = 'License information unavailable'
53
+
54
+ _URLS = {
55
+ _DATASETNAME: "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/download/tmVar/tmVarCorpus.zip",
56
+ }
57
+
58
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
59
+
60
+ _SOURCE_VERSION = "1.0.0"
61
+
62
+ _BIGBIO_VERSION = "1.0.0"
63
+
64
+ logger = datasets.utils.logging.get_logger(__name__)
65
+
66
+
67
+ class TmvarV1Dataset(datasets.GeneratorBasedBuilder):
68
+ """
69
+ The tmVar dataset contains 500 PubMed articles manually annotated with mutation
70
+ mentions of various kinds.
71
+ It can be used for biomedical NER tasks
72
+ """
73
+
74
+ DEFAULT_CONFIG_NAME = "tmvar_v1_source"
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
77
+
78
+ BUILDER_CONFIGS = []
79
+ BUILDER_CONFIGS.append(
80
+ BigBioConfig(
81
+ name=f"{_DATASETNAME}_source",
82
+ version=SOURCE_VERSION,
83
+ description=f"{_DATASETNAME} source schema",
84
+ schema="source",
85
+ subset_id=f"{_DATASETNAME}",
86
+ )
87
+ )
88
+ BUILDER_CONFIGS.append(
89
+ BigBioConfig(
90
+ name=f"{_DATASETNAME}_bigbio_kb",
91
+ version=BIGBIO_VERSION,
92
+ description=f"{_DATASETNAME} BigBio schema",
93
+ schema="bigbio_kb",
94
+ subset_id=f"{_DATASETNAME}",
95
+ )
96
+ )
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+
100
+ if self.config.schema == "source":
101
+ features = datasets.Features(
102
+ {
103
+ "pmid": datasets.Value("string"),
104
+ "passages": [
105
+ {
106
+ "type": datasets.Value("string"),
107
+ "text": datasets.Value("string"),
108
+ "offsets": [datasets.Value("int32")],
109
+ }
110
+ ],
111
+ "entities": [
112
+ {
113
+ "text": datasets.Value("string"),
114
+ "offsets": [datasets.Value("int32")],
115
+ "concept_id": datasets.Value("string"),
116
+ "semantic_type_id": datasets.Value("string"),
117
+ }
118
+ ],
119
+ }
120
+ )
121
+ elif self.config.schema == "bigbio_kb":
122
+ features = kb_features
123
+
124
+ return datasets.DatasetInfo(
125
+ description=_DESCRIPTION,
126
+ features=features,
127
+ homepage=_HOMEPAGE,
128
+ license=str(_LICENSE),
129
+ citation=_CITATION,
130
+ )
131
+
132
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
133
+ """Returns SplitGenerators."""
134
+
135
+ url = _URLS[_DATASETNAME]
136
+ data_dir = dl_manager.download_and_extract(url)
137
+ train_filepath = os.path.join(data_dir, "tmVarCorpus", "train.PubTator.txt")
138
+ test_filepath = os.path.join(data_dir, "tmVarCorpus", "test.PubTator.txt")
139
+ return [
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.TRAIN,
142
+ gen_kwargs={
143
+ "filepath": train_filepath,
144
+ },
145
+ ),
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TEST,
148
+ gen_kwargs={
149
+ "filepath": test_filepath,
150
+ },
151
+ ),
152
+ ]
153
+
154
+ def _generate_examples(self, filepath) -> Tuple[int, Dict]:
155
+ """Yields examples as (key, example) tuples."""
156
+ if self.config.schema == "source":
157
+ with open(filepath, "r", encoding="utf8") as fstream:
158
+ for raw_document in self.generate_raw_docs(fstream):
159
+ document = self.parse_raw_doc(raw_document)
160
+ yield document["pmid"], document
161
+
162
+ elif self.config.schema == "bigbio_kb":
163
+ with open(filepath, "r", encoding="utf8") as fstream:
164
+ uid = itertools.count(0)
165
+ for raw_document in self.generate_raw_docs(fstream):
166
+ document = self.parse_raw_doc(raw_document)
167
+ pmid = document.pop("pmid")
168
+ document["id"] = next(uid)
169
+ document["document_id"] = pmid
170
+
171
+ entities_ = []
172
+ for entity in document["entities"]:
173
+ entities_.append(
174
+ {
175
+ "id": next(uid),
176
+ "type": entity["semantic_type_id"],
177
+ "text": [entity["text"]],
178
+ "normalized": [],
179
+ "offsets": [entity["offsets"]],
180
+ }
181
+ )
182
+ for passage in document["passages"]:
183
+ passage["id"] = next(uid)
184
+
185
+ document["entities"] = entities_
186
+ document["relations"] = []
187
+ document["events"] = []
188
+ document["coreferences"] = []
189
+ yield document["document_id"], document
190
+
191
+ def generate_raw_docs(self, fstream):
192
+ """
193
+ Given a filestream, this function yields documents from it
194
+ """
195
+ raw_document = []
196
+ for line in fstream:
197
+ if line.strip():
198
+ raw_document.append(line.strip())
199
+ elif raw_document:
200
+ yield raw_document
201
+ raw_document = []
202
+ if raw_document:
203
+ yield raw_document
204
+
205
+ def parse_raw_doc(self, raw_doc):
206
+ pmid, _, title = raw_doc[0].split("|")
207
+ pmid = int(pmid)
208
+ _, _, abstract = raw_doc[1].split("|")
209
+
210
+ if self.config.schema == "source":
211
+ passages = [
212
+ {"type": "title", "text": title, "offsets": [0, len(title)]},
213
+ {
214
+ "type": "abstract",
215
+ "text": abstract,
216
+ "offsets": [len(title) + 1, len(title) + len(abstract) + 1],
217
+ },
218
+ ]
219
+ elif self.config.schema == "bigbio_kb":
220
+ passages = [
221
+ {"type": "title", "text": [title], "offsets": [[0, len(title)]]},
222
+ {
223
+ "type": "abstract",
224
+ "text": [abstract],
225
+ "offsets": [[len(title) + 1, len(title) + len(abstract) + 1]],
226
+ },
227
+ ]
228
+
229
+ entities = []
230
+ for line in raw_doc[2:]:
231
+ mentions = line.split("\t")
232
+ (
233
+ pmid_,
234
+ start_idx,
235
+ end_idx,
236
+ mention,
237
+ semantic_type_id,
238
+ entity_id,
239
+ ) = mentions
240
+
241
+ entity = {
242
+ "offsets": [int(start_idx), int(end_idx)],
243
+ "text": mention,
244
+ "semantic_type_id": semantic_type_id,
245
+ "concept_id": entity_id,
246
+ }
247
+ entities.append(entity)
248
+
249
+ return {"pmid": pmid, "passages": passages, "entities": entities}