holylovenia commited on
Commit
df6f96d
1 Parent(s): 63a6a47

Upload bioner_id.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. bioner_id.py +166 -0
bioner_id.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.common_parser import load_conll_data
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @article{abdillah2023pengenalan,
28
+ title={Pengenalan Entitas Biomedis dalam Teks Konsultasi Kesehatan Online Berbahasa Indonesia Berbasis Arsitektur Transformers},
29
+ author={Abdillah, Abid Famasya and Purwitasari, Diana and Juanita, Safitri and Purnomo, Mauridhi Hery},
30
+ year={2023},
31
+ month=feb,
32
+ journal={Jurnal Teknologi Informasi dan Ilmu Komputer},
33
+ volume={10},
34
+ number={1},
35
+ pages={131--140}
36
+ }
37
+ """
38
+
39
+ _DATASETNAME = "bioner_id"
40
+
41
+ _DESCRIPTION = """\
42
+ This dataset taken from online health consultation platform Alodokter.com which has been annotated by two medical doctors. Data were annotated using IOB in CoNLL format.
43
+
44
+ Dataset contains 2600 medical answers by doctors from 2017-2020. Two medical experts were assigned to annotate the data into two entity types: DISORDERS and ANATOMY.
45
+ The topics of answers are: diarrhea, HIV-AIDS, nephrolithiasis and TBC, which marked as high-risk dataset from WHO.
46
+ """
47
+
48
+ _HOMEPAGE = "https://huggingface.co/datasets/abid/indonesia-bioner-dataset"
49
+
50
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
51
+
52
+ _LICENSE = Licenses.BSD_3_CLAUSE_CLEAR.value
53
+
54
+ _LOCAL = False
55
+
56
+ _URLS = {
57
+ _DATASETNAME: {k: f"https://huggingface.co/datasets/abid/indonesia-bioner-dataset/raw/main/{k}.conll" for k in ["train", "valid", "test"]},
58
+ }
59
+
60
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
61
+
62
+ _SOURCE_VERSION = "1.0.0"
63
+
64
+ _SEACROWD_VERSION = "2024.06.20"
65
+
66
+
67
+ class BioNERIdDataset(datasets.GeneratorBasedBuilder):
68
+ """2600 conversations of patioent and medical doctors between 2017-2020.
69
+ Two medical annotated the data into two entity types: DISORDERS and ANATOMY"""
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
73
+
74
+ label_classes = ["B-ANAT", "B-DISO", "I-ANAT", "I-DISO", "O"]
75
+
76
+ BUILDER_CONFIGS = [
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_source",
79
+ version=SOURCE_VERSION,
80
+ description=f"{_DATASETNAME} source schema",
81
+ schema="source",
82
+ subset_id=f"{_DATASETNAME}",
83
+ ),
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_seacrowd_seq_label",
86
+ version=SEACROWD_VERSION,
87
+ description=f"{_DATASETNAME} SEACrowd schema",
88
+ schema="seacrowd_seq_label",
89
+ subset_id=f"{_DATASETNAME}",
90
+ ),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
94
+
95
+ def _info(self) -> datasets.DatasetInfo:
96
+
97
+ if self.config.schema == "source":
98
+ features = datasets.Features(
99
+ {
100
+ "sentence": [datasets.Value("string")],
101
+ "label": [datasets.Value("string")],
102
+ }
103
+ )
104
+
105
+ elif self.config.schema == "seacrowd_seq_label":
106
+ features = schemas.seq_label_features(self.label_classes)
107
+
108
+ return datasets.DatasetInfo(
109
+ description=_DESCRIPTION,
110
+ features=features,
111
+ homepage=_HOMEPAGE,
112
+ license=_LICENSE,
113
+ citation=_CITATION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
117
+ """Returns SplitGenerators."""
118
+ urls = _URLS[_DATASETNAME]
119
+ data_paths = dl_manager.download(urls)
120
+
121
+ # def _assert_data(msg):
122
+ # cur_data = list(map(
123
+ # lambda d: d.split(" "),
124
+ # open(fp, "r", encoding="utf8").readlines()
125
+ # ))
126
+ # assert {1, 4} == set(map(len, cur_data)), msg # length of 4 is due to uncommon delimiter of " _ _ "
127
+ # assert {('_', '_')} == set(map(lambda _: (_[1], _[2]), filter(lambda _: len(_) == 4, cur_data))), msg
128
+
129
+ # Convert to tab-seperated value
130
+ for subset in ["train", "valid", "test"]:
131
+ fp = data_paths[subset]
132
+ # _assert_data(f"Invalid file for subset '{subset}'")
133
+ data = open(fp, "r", encoding="utf8").read()
134
+ # data_paths[subset] = f"{fp}.tsv"
135
+ open(data_paths[subset], "w", encoding="utf8").write(data.replace(" _ _ ", "\t"))
136
+
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ gen_kwargs={"filepath": data_paths["train"]},
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TEST,
144
+ gen_kwargs={"filepath": data_paths["test"]},
145
+ ),
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.VALIDATION,
148
+ gen_kwargs={"filepath": data_paths["valid"]},
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
153
+ """Yields examples as (key, example) tuples."""
154
+ data = load_conll_data(filepath)
155
+
156
+ if self.config.schema == "source":
157
+ for key, ex in enumerate(data):
158
+ yield key, ex
159
+
160
+ elif self.config.schema == "seacrowd_seq_label":
161
+ for key, ex in enumerate(data):
162
+ yield key, {
163
+ "id": str(key),
164
+ "tokens": ex["sentence"],
165
+ "labels": ex["label"],
166
+ }