File size: 15,055 Bytes
64b608f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 |
# coding=utf-8
"""QUAERO Corpus"""
import os
import datasets
from tqdm import tqdm
from datasets import load_dataset
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@InProceedings{neveol14quaero,
author = {Névéol, Aurélie and Grouin, Cyril and Leixa, Jeremy
and Rosset, Sophie and Zweigenbaum, Pierre},
title = {The {QUAERO} {French} Medical Corpus: A Ressource for
Medical Entity Recognition and Normalization},
OPTbooktitle = {Proceedings of the Fourth Workshop on Building
and Evaluating Ressources for Health and Biomedical
Text Processing},
booktitle = {Proc of BioTextMining Work},
OPTseries = {BioTxtM 2014},
year = {2014},
pages = {24--30},
}
"""
_LICENSE = """
GNU Free Documentation License v1.3
"""
_DESCRIPTION = """
The QUAERO French Medical Corpus has been initially developed as a resource for named entity recognition and normalization [1]. It was then improved with the purpose of creating a gold standard set of normalized entities for French biomedical text, that was used in the CLEF eHealth evaluation lab [2][3].
A selection of MEDLINE titles and EMEA documents were manually annotated. The annotation process was guided by concepts in the Unified Medical Language System (UMLS):
1. Ten types of clinical entities, as defined by the following UMLS Semantic Groups (Bodenreider and McCray 2003) were annotated: Anatomy, Chemical and Drugs, Devices, Disorders, Geographic Areas, Living Beings, Objects, Phenomena, Physiology, Procedures.
2. The annotations were made in a comprehensive fashion, so that nested entities were marked, and entities could be mapped to more than one UMLS concept. In particular: (a) If a mention can refer to more than one Semantic Group, all the relevant Semantic Groups should be annotated. For instance, the mention “récidive” (recurrence) in the phrase “prévention des récidives” (recurrence prevention) should be annotated with the category “DISORDER” (CUI C2825055) and the category “PHENOMENON” (CUI C0034897); (b) If a mention can refer to more than one UMLS concept within the same Semantic Group, all the relevant concepts should be annotated. For instance, the mention “maniaques” (obsessive) in the phrase “patients maniaques” (obsessive patients) should be annotated with CUIs C0564408 and C0338831 (category “DISORDER”); (c) Entities which span overlaps with that of another entity should still be annotated. For instance, in the phrase “infarctus du myocarde” (myocardial infarction), the mention “myocarde” (myocardium) should be annotated with category “ANATOMY” (CUI C0027061) and the mention “infarctus du myocarde” should be annotated with category “DISORDER” (CUI C0027051)
The QUAERO French Medical Corpus BioC release comprises a subset of the QUAERO French Medical corpus, as follows:
Training data (BRAT version used in CLEF eHealth 2015 task 1b as training data):
- MEDLINE_train_bioc file: 833 MEDLINE titles, annotated with normalized entities in the BioC format
- EMEA_train_bioc file: 3 EMEA documents, segmented into 11 sub-documents, annotated with normalized entities in the BioC format
Development data (BRAT version used in CLEF eHealth 2015 task 1b as test data and in CLEF eHealth 2016 task 2 as development data):
- MEDLINE_dev_bioc file: 832 MEDLINE titles, annotated with normalized entities in the BioC format
- EMEA_dev_bioc file: 3 EMEA documents, segmented into 12 sub-documents, annotated with normalized entities in the BioC format
Test data (BRAT version used in CLEF eHealth 2016 task 2 as test data):
- MEDLINE_test_bioc folder: 833 MEDLINE titles, annotated with normalized entities in the BioC format
- EMEA folder_test_bioc: 4 EMEA documents, segmented into 15 sub-documents, annotated with normalized entities in the BioC format
This release of the QUAERO French medical corpus, BioC version, comes in the BioC format, through automatic conversion from the original BRAT format obtained with the Brat2BioC tool https://bitbucket.org/nicta_biomed/brat2bioc developped by Jimeno Yepes et al.
Antonio Jimeno Yepes, Mariana Neves, Karin Verspoor
Brat2BioC: conversion tool between brat and BioC
BioCreative IV track 1 - BioC: The BioCreative Interoperability Initiative, 2013
Please note that the original version of the QUAERO corpus distributed in the CLEF eHealth challenge 2015 and 2016 came in the BRAT stand alone format. It was distributed with the CLEF eHealth evaluation tool. This original distribution of the QUAERO French Medical corpus is available separately from https://quaerofrenchmed.limsi.fr
All questions regarding the task or data should be addressed to aurelie.neveol@limsi.fr
"""
_LABELS_BASE = ['DISO', 'DEVI', 'CHEM', 'GEOG', 'OBJC', 'PHEN', 'PHYS', 'LIVB', 'PROC', 'ANAT']
class QUAERO(datasets.GeneratorBasedBuilder):
"""QUAERO dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="emea", version=VERSION, description="The EMEA QUAERO corpora"),
datasets.BuilderConfig(name="medline", version=VERSION, description="The MEDLINE QUAERO corpora"),
]
DEFAULT_CONFIG_NAME = "emea"
def _info(self):
if self.config.name == "emea":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"document_id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.Value("string"),
# datasets.features.ClassLabel(
# names = ['O', 'LIVB', 'PROC', 'ANAT', 'DEVI', 'CHEM', 'GEOG', 'PHYS', 'PHEN', 'DISO', 'OBJC', 'CHEM_PHYS', 'ANAT_LIVB', 'ANAT_PROC', 'ANAT_DISO', 'DISO_PHYS', 'CHEM_OBJC', 'CHEM_LIVB', 'LIVB_PHYS', 'CHEM_PROC', 'PHEN_PROC', 'OBJC_PHEN', 'ANAT_CHEM', 'PHEN_PHYS', 'GEOG_LIVB', 'DISO_LIVB', 'CHEM_DISO', 'DISO_PROC', 'ANAT_PHYS', 'CHEM_DISO_PHYS', 'ANAT_DISO_PHYS', 'DISO_LIVB_PHYS'],
# )
),
}
),
supervised_keys=None,
homepage="https://quaerofrenchmed.limsi.fr/",
citation=_CITATION,
license=_LICENSE,
)
elif self.config.name == "medline":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"document_id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.Value("string"),
# datasets.features.ClassLabel(
# names = ['O', 'LIVB', 'PROC', 'ANAT', 'DEVI', 'CHEM', 'GEOG', 'PHYS', 'PHEN', 'DISO', 'OBJC', 'CHEM_DEVI', 'CHEM_PHYS', 'ANAT_LIVB', 'ANAT_PROC', 'DEVI_OBJC', 'ANAT_DEVI', 'ANAT_PHEN', 'PHYS_PROC', 'ANAT_DISO', 'DEVI_DISO', 'DISO_PHYS', 'CHEM_OBJC', 'CHEM_LIVB', 'LIVB_PHYS', 'CHEM_PROC', 'LIVB_OBJC', 'PHEN_PROC', 'DISO_OBJC', 'OBJC_PHEN', 'LIVB_PROC', 'ANAT_CHEM', 'ANAT_OBJC', 'PHEN_PHYS', 'GEOG_LIVB', 'DEVI_PROC', 'DEVI_PHEN', 'DISO_LIVB', 'DEVI_PHYS', 'CHEM_PHEN', 'DISO_PHEN', 'CHEM_DISO', 'OBJC_PROC', 'DISO_PROC', 'ANAT_PHYS', 'ANAT_PHYS_PROC', 'CHEM_DISO_LIVB', 'ANAT_DISO_PHYS', 'ANAT_CHEM_PROC', 'ANAT_DISO_LIVB', 'ANAT_DEVI_PROC', 'DISO_PHEN_PHYS', 'DISO_LIVB_PHYS', 'DISO_PHEN_PROC', 'ANAT_PHEN_PROC'],
# )
),
}
),
supervised_keys=None,
homepage="https://quaerofrenchmed.limsi.fr/",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split": "train",
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"split": "validation",
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"split": "test",
}
),
]
def split_sentences(self, json_o):
"""
Split le corpus en phrase plus courtes pour que ça fit dans des modèles types BERT
Le split est fait sur les points "."
"""
final_json = []
for i in json_o:
ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.'] + [len(i['tokens'])]
# ind_punc = [index for index, value in enumerate(i['tokens']) if value=='.' and not str(i['tokens'][index-1]).isnumeric()]
for index, value in enumerate(ind_punc):
if index==0:
final_json.append({'id': i['id']+'_'+str(index),
'document_id': i['id']+'_'+str(index),
'ner_tags': i['ner_tags'][:value+1],
'tokens': i['tokens'][:value+1]
})
else:
prev_value = ind_punc[index-1]
final_json.append({'id': i['id']+'_'+str(index),
'document_id': i['document_id']+'_'+str(index),
'ner_tags': i['ner_tags'][prev_value+1:value+1],
'tokens': i['tokens'][prev_value+1:value+1]
})
return final_json
def convert_to_prodigy(self, json_object):
new_json = []
for ex in json_object:
tokenized_text = ex['text'].split()
list_spans = []
for a in ex['text_bound_annotations']:
for o in range(len(a['offsets'])):
offset_start = a['offsets'][o][0]
offset_end = a['offsets'][o][1]
nb_tokens_annot = len(a['text'][o].split())
nb_tokens_before_annot = len(ex['text'][:offset_start].split())
nb_tokens_after_annot = len(ex['text'][offset_end:].split())
token_start = nb_tokens_before_annot
token_end = token_start + nb_tokens_annot - 1
list_spans.append({
'start': offset_start,
'end': offset_end,
'token_start': token_start,
'token_end': token_end,
'label': a['type'],
'id': a['id'],
'text': a['text'][o],
})
res = {
'id': ex['id'],
'document_id': ex['document_id'],
'text': ex['text'],
'tokens': tokenized_text,
'spans': list_spans
}
new_json.append(res)
return new_json
def convert_to_hf_format(self, json_object, list_label):
"""
Le format prends en compte le multilabel en faisant une concaténation avec "_" entre chaque label
"""
dict_out = []
for i in json_object:
nb_tokens = len(i['tokens'])
ner_tags = ['O']*nb_tokens
history = {}
if 'spans' in i:
for j in i['spans']:
for x in range(j['token_start'], j['token_end']+1, 1):
if j['label'] in list_label:
label_id = f"{j['label']}[{j['id']}]"
history[j['id']] = False
if ner_tags[x] == 'O':
ner_tags[x] = label_id
else:
# Commenter la ligne et mettre pass si on veut prendre qu'un label par token
# pass
ner_tags[x] = '_'.join(sorted(list(set(ner_tags[x].split('_')+[label_id]))))
# ner_tags[x] = '_'.join(sorted(list(set(ner_tags[x].split('_')+[j['label']]))))
new_ner_tags = []
# prev_identifier = "T-1"
for idx, tag in enumerate(ner_tags):
if tag != "O":
tags = []
for subtag in tag.split("_"):
base_tag = subtag.split("[")[0]
identifier_tag = subtag.split("[")[-1].split("]")[0]
# if idx == 0 or ner_tags[idx-1] == "O" or prev_identifier != identifier_tag:
# prefix = "B-"
# elif ner_tags[idx-1] != "O":
# prefix = "I-"
if history[identifier_tag] == False:
prefix = "B-"
history[identifier_tag] = True
else:
prefix = "I-"
# prev_identifier = identifier_tag
subtag = f"{prefix}{base_tag}"
tags.append(subtag)
tag = "_".join(tags)
new_ner_tags.append(tag)
print(ner_tags)
print("*"*25)
print(new_ner_tags)
print("*"*50)
dict_out.append({
'id': i['id'],
'document_id': i['document_id'],
"ner_tags": new_ner_tags,
# "new_ner_tags": new_ner_tags,
"tokens": i['tokens'],
})
return dict_out
def _generate_examples(self, split):
ds = load_dataset("bigbio/quaero", f"quaero_{self.config.name}_source")[split]
if self.config.name == "emea":
ds = self.split_sentences(
self.convert_to_hf_format(
self.convert_to_prodigy(ds),
_LABELS_BASE,
)
)
else:
ds = self.convert_to_hf_format(
self.convert_to_prodigy(ds),
_LABELS_BASE,
)
for d in ds:
yield d["id"], d
|