Upload huggingface.co_datasets_Dr-BERT_ESSAI_raw_main_ESSAI.py
Browse files
huggingface.co_datasets_Dr-BERT_ESSAI_raw_main_ESSAI.py
ADDED
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
_CITATION = """\
|
8 |
+
@misc{
|
9 |
+
dalloux,
|
10 |
+
title={Datasets – Clément Dalloux},
|
11 |
+
url={http://clementdalloux.fr/?page_id=28},
|
12 |
+
journal={Clément Dalloux},
|
13 |
+
author={Dalloux, Clément}
|
14 |
+
}
|
15 |
+
"""
|
16 |
+
|
17 |
+
_DESCRIPTION = """\
|
18 |
+
We manually annotated two corpora from the biomedical field. The ESSAI corpus \
|
19 |
+
contains clinical trial protocols in French. They were mainly obtained from the \
|
20 |
+
National Cancer Institute The typical protocol consists of two parts: the \
|
21 |
+
summary of the trial, which indicates the purpose of the trial and the methods \
|
22 |
+
applied; and a detailed description of the trial with the inclusion and \
|
23 |
+
exclusion criteria. The CAS corpus contains clinical cases published in \
|
24 |
+
scientific literature and training material. They are published in different \
|
25 |
+
journals from French-speaking countries (France, Belgium, Switzerland, Canada, \
|
26 |
+
African countries, tropical countries) and are related to various medical \
|
27 |
+
specialties (cardiology, urology, oncology, obstetrics, pulmonology, \
|
28 |
+
gastro-enterology). The purpose of clinical cases is to describe clinical \
|
29 |
+
situations of patients. Hence, their content is close to the content of clinical \
|
30 |
+
narratives (description of diagnoses, treatments or procedures, evolution, \
|
31 |
+
family history, expected audience, etc.). In clinical cases, the negation is \
|
32 |
+
frequently used for describing the patient signs, symptoms, and diagnosis. \
|
33 |
+
Speculation is present as well but less frequently.
|
34 |
+
|
35 |
+
This version only contain the annotated ESSAI corpus
|
36 |
+
"""
|
37 |
+
|
38 |
+
_HOMEPAGE = "https://clementdalloux.fr/?page_id=28"
|
39 |
+
|
40 |
+
_LICENSE = 'Data User Agreement'
|
41 |
+
|
42 |
+
class ESSAI(datasets.GeneratorBasedBuilder):
|
43 |
+
|
44 |
+
DEFAULT_CONFIG_NAME = "pos_spec"
|
45 |
+
|
46 |
+
BUILDER_CONFIGS = [
|
47 |
+
datasets.BuilderConfig(name="pos", version="1.0.0", description="The ESSAI corpora - POS Speculation task"),
|
48 |
+
|
49 |
+
datasets.BuilderConfig(name="cls", version="1.0.0", description="The ESSAI corpora - CLS Negation / Speculation task"),
|
50 |
+
|
51 |
+
datasets.BuilderConfig(name="ner_spec", version="1.0.0", description="The ESSAI corpora - NER Speculation task"),
|
52 |
+
datasets.BuilderConfig(name="ner_neg", version="1.0.0", description="The ESSAI corpora - NER Negation task"),
|
53 |
+
]
|
54 |
+
|
55 |
+
def _info(self):
|
56 |
+
|
57 |
+
if self.config.name.find("pos") != -1:
|
58 |
+
|
59 |
+
features = datasets.Features(
|
60 |
+
{
|
61 |
+
"id": datasets.Value("string"),
|
62 |
+
"document_id": datasets.Value("string"),
|
63 |
+
"tokens": [datasets.Value("string")],
|
64 |
+
"lemmas": [datasets.Value("string")],
|
65 |
+
"pos_tags": [datasets.features.ClassLabel(
|
66 |
+
names = ['B-INT', 'B-PRO:POS', 'B-PRP', 'B-SENT', 'B-PRO', 'B-ABR', 'B-VER:pres', 'B-KON', 'B-SYM', 'B-DET:POS', 'B-VER:', 'B-PRO:IND', 'B-NAM', 'B-ADV', 'B-PRO:DEM', 'B-NN', 'B-PRO:PER', 'B-VER:pper', 'B-VER:ppre', 'B-PUN', 'B-VER:simp', 'B-PREF', 'B-NUM', 'B-VER:futu', 'B-NOM', 'B-VER:impf', 'B-VER:subp', 'B-VER:infi', 'B-DET:ART', 'B-PUN:cit', 'B-ADJ', 'B-PRP:det', 'B-PRO:REL', 'B-VER:cond', 'B-VER:subi'],
|
67 |
+
)],
|
68 |
+
}
|
69 |
+
)
|
70 |
+
|
71 |
+
elif self.config.name.find("cls") != -1:
|
72 |
+
|
73 |
+
features = datasets.Features(
|
74 |
+
{
|
75 |
+
"id": datasets.Value("string"),
|
76 |
+
"document_id": datasets.Value("string"),
|
77 |
+
"tokens": [datasets.Value("string")],
|
78 |
+
"label": datasets.features.ClassLabel(
|
79 |
+
names = ['negation_speculation', 'negation', 'neutral', 'speculation'],
|
80 |
+
),
|
81 |
+
}
|
82 |
+
)
|
83 |
+
|
84 |
+
elif self.config.name.find("ner") != -1:
|
85 |
+
|
86 |
+
if self.config.name.find("_spec") != -1:
|
87 |
+
names = ['O', 'B_cue_spec', 'B_scope_spec', 'I_scope_spec']
|
88 |
+
elif self.config.name.find("_neg") != -1:
|
89 |
+
names = ['O', 'B_cue_neg', 'B_scope_neg', 'I_scope_neg']
|
90 |
+
|
91 |
+
features = datasets.Features(
|
92 |
+
{
|
93 |
+
"id": datasets.Value("string"),
|
94 |
+
"document_id": datasets.Value("string"),
|
95 |
+
"tokens": [datasets.Value("string")],
|
96 |
+
"lemmas": [datasets.Value("string")],
|
97 |
+
"ner_tags": [datasets.features.ClassLabel(
|
98 |
+
names = names,
|
99 |
+
)],
|
100 |
+
}
|
101 |
+
)
|
102 |
+
|
103 |
+
return datasets.DatasetInfo(
|
104 |
+
description=_DESCRIPTION,
|
105 |
+
features=features,
|
106 |
+
supervised_keys=None,
|
107 |
+
homepage=_HOMEPAGE,
|
108 |
+
license=str(_LICENSE),
|
109 |
+
citation=_CITATION,
|
110 |
+
)
|
111 |
+
|
112 |
+
def _split_generators(self, dl_manager):
|
113 |
+
|
114 |
+
if self.config.data_dir is None:
|
115 |
+
raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.")
|
116 |
+
|
117 |
+
else:
|
118 |
+
data_dir = self.config.data_dir
|
119 |
+
|
120 |
+
return [
|
121 |
+
datasets.SplitGenerator(
|
122 |
+
name=datasets.Split.TRAIN,
|
123 |
+
gen_kwargs={
|
124 |
+
"datadir": data_dir,
|
125 |
+
"split": "train",
|
126 |
+
},
|
127 |
+
),
|
128 |
+
datasets.SplitGenerator(
|
129 |
+
name=datasets.Split.VALIDATION,
|
130 |
+
gen_kwargs={
|
131 |
+
"datadir": data_dir,
|
132 |
+
"split": "validation",
|
133 |
+
},
|
134 |
+
),
|
135 |
+
datasets.SplitGenerator(
|
136 |
+
name=datasets.Split.TEST,
|
137 |
+
gen_kwargs={
|
138 |
+
"datadir": data_dir,
|
139 |
+
"split": "test",
|
140 |
+
},
|
141 |
+
),
|
142 |
+
]
|
143 |
+
|
144 |
+
def _generate_examples(self, datadir, split):
|
145 |
+
|
146 |
+
all_res = []
|
147 |
+
|
148 |
+
key = 0
|
149 |
+
|
150 |
+
subset = self.config.name.split("_")[-1]
|
151 |
+
|
152 |
+
unique_id_doc = []
|
153 |
+
|
154 |
+
if self.config.name.find("ner") != -1:
|
155 |
+
docs = [f"ESSAI_{subset}.txt"]
|
156 |
+
else:
|
157 |
+
docs = ["ESSAI_neg.txt", "ESSAI_spec.txt"]
|
158 |
+
|
159 |
+
for file in docs:
|
160 |
+
|
161 |
+
filename = os.path.join(datadir, file)
|
162 |
+
|
163 |
+
if self.config.name.find("pos") != -1:
|
164 |
+
|
165 |
+
id_docs = []
|
166 |
+
id_words = []
|
167 |
+
words = []
|
168 |
+
lemmas = []
|
169 |
+
POS_tags = []
|
170 |
+
|
171 |
+
with open(filename) as f:
|
172 |
+
|
173 |
+
for line in f.readlines():
|
174 |
+
|
175 |
+
splitted = line.split("\t")
|
176 |
+
|
177 |
+
if len(splitted) < 5:
|
178 |
+
continue
|
179 |
+
|
180 |
+
id_doc, id_word, word, lemma, tag = splitted[0:5]
|
181 |
+
if len(splitted) >= 8:
|
182 |
+
tag = splitted[6]
|
183 |
+
|
184 |
+
if tag == "@card@":
|
185 |
+
print(splitted)
|
186 |
+
|
187 |
+
if word == "@card@":
|
188 |
+
print(splitted)
|
189 |
+
|
190 |
+
if lemma == "000" and tag == "@card@":
|
191 |
+
tag = "NUM"
|
192 |
+
word = "100 000"
|
193 |
+
lemma = "100 000"
|
194 |
+
elif lemma == "45" and tag == "@card@":
|
195 |
+
tag = "NUM"
|
196 |
+
|
197 |
+
# if id_doc in id_docs:
|
198 |
+
# continue
|
199 |
+
|
200 |
+
id_docs.append(id_doc)
|
201 |
+
id_words.append(id_word)
|
202 |
+
words.append(word)
|
203 |
+
lemmas.append(lemma)
|
204 |
+
POS_tags.append('B-'+tag)
|
205 |
+
|
206 |
+
dic = {
|
207 |
+
"id_docs": np.array(list(map(int, id_docs))),
|
208 |
+
"id_words": id_words,
|
209 |
+
"words": words,
|
210 |
+
"lemmas": lemmas,
|
211 |
+
"POS_tags": POS_tags,
|
212 |
+
}
|
213 |
+
|
214 |
+
for doc_id in set(dic["id_docs"]):
|
215 |
+
|
216 |
+
indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
|
217 |
+
tokens = [dic["words"][id] for id in indexes]
|
218 |
+
text_lemmas = [dic["lemmas"][id] for id in indexes]
|
219 |
+
pos_tags = [dic["POS_tags"][id] for id in indexes]
|
220 |
+
|
221 |
+
if doc_id not in unique_id_doc:
|
222 |
+
|
223 |
+
all_res.append({
|
224 |
+
"id": str(doc_id),
|
225 |
+
"document_id": doc_id,
|
226 |
+
"tokens": tokens,
|
227 |
+
"lemmas": text_lemmas,
|
228 |
+
"pos_tags": pos_tags,
|
229 |
+
})
|
230 |
+
unique_id_doc.append(doc_id)
|
231 |
+
|
232 |
+
# key += 1
|
233 |
+
|
234 |
+
elif self.config.name.find("ner") != -1:
|
235 |
+
|
236 |
+
id_docs = []
|
237 |
+
id_words = []
|
238 |
+
words = []
|
239 |
+
lemmas = []
|
240 |
+
ner_tags = []
|
241 |
+
|
242 |
+
with open(filename) as f:
|
243 |
+
|
244 |
+
for line in f.readlines():
|
245 |
+
|
246 |
+
if len(line.split("\t")) < 5:
|
247 |
+
continue
|
248 |
+
|
249 |
+
id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
|
250 |
+
tag = line.replace("\n","").split("\t")[-1]
|
251 |
+
|
252 |
+
if tag == "***" or tag == "_":
|
253 |
+
tag = "O"
|
254 |
+
elif tag == "v":
|
255 |
+
tag = "I_scope_spec"
|
256 |
+
elif tag == "z":
|
257 |
+
tag = "O"
|
258 |
+
elif tag == "I_scope_spec_":
|
259 |
+
tag = "I_scope_spec"
|
260 |
+
|
261 |
+
id_docs.append(id_doc)
|
262 |
+
id_words.append(id_word)
|
263 |
+
words.append(word)
|
264 |
+
lemmas.append(lemma)
|
265 |
+
ner_tags.append(tag)
|
266 |
+
|
267 |
+
dic = {
|
268 |
+
"id_docs": np.array(list(map(int, id_docs))),
|
269 |
+
"id_words": id_words,
|
270 |
+
"words": words,
|
271 |
+
"lemmas": lemmas,
|
272 |
+
"ner_tags": ner_tags,
|
273 |
+
}
|
274 |
+
|
275 |
+
for doc_id in set(dic["id_docs"]):
|
276 |
+
|
277 |
+
indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
|
278 |
+
tokens = [dic["words"][id] for id in indexes]
|
279 |
+
text_lemmas = [dic["lemmas"][id] for id in indexes]
|
280 |
+
ner_tags = [dic["ner_tags"][id] for id in indexes]
|
281 |
+
|
282 |
+
all_res.append({
|
283 |
+
"id": key,
|
284 |
+
"document_id": doc_id,
|
285 |
+
"tokens": tokens,
|
286 |
+
"lemmas": text_lemmas,
|
287 |
+
"ner_tags": ner_tags,
|
288 |
+
})
|
289 |
+
|
290 |
+
key += 1
|
291 |
+
|
292 |
+
elif self.config.name.find("cls") != -1:
|
293 |
+
|
294 |
+
f_in = open(filename, "r")
|
295 |
+
conll = [
|
296 |
+
[b.split("\t") for b in a.split("\n")]
|
297 |
+
for a in f_in.read().split("\n\n")
|
298 |
+
]
|
299 |
+
f_in.close()
|
300 |
+
|
301 |
+
classe = "negation" if filename.find("_neg") != -1 else "speculation"
|
302 |
+
|
303 |
+
for document in conll:
|
304 |
+
|
305 |
+
if document == [""]:
|
306 |
+
continue
|
307 |
+
|
308 |
+
identifier = document[0][0]
|
309 |
+
|
310 |
+
unique = list(set([w[-1] for w in document]))
|
311 |
+
tokens = [sent[2] for sent in document if len(sent) > 1]
|
312 |
+
|
313 |
+
if "***" in unique:
|
314 |
+
l = "neutral"
|
315 |
+
elif "_" in unique:
|
316 |
+
l = classe
|
317 |
+
|
318 |
+
if identifier in unique_id_doc and l == 'neutral':
|
319 |
+
continue
|
320 |
+
|
321 |
+
elif identifier in unique_id_doc and l != 'neutral':
|
322 |
+
|
323 |
+
index_l = unique_id_doc.index(identifier)
|
324 |
+
|
325 |
+
if all_res[index_l]["label"] != "neutral":
|
326 |
+
l = "negation_speculation"
|
327 |
+
|
328 |
+
all_res[index_l] = {
|
329 |
+
"id": str(identifier),
|
330 |
+
"document_id": identifier,
|
331 |
+
"tokens": tokens,
|
332 |
+
"label": l,
|
333 |
+
}
|
334 |
+
|
335 |
+
else:
|
336 |
+
|
337 |
+
all_res.append({
|
338 |
+
"id": str(identifier),
|
339 |
+
"document_id": identifier,
|
340 |
+
"tokens": tokens,
|
341 |
+
"label": l,
|
342 |
+
})
|
343 |
+
|
344 |
+
unique_id_doc.append(identifier)
|
345 |
+
|
346 |
+
ids = [r["id"] for r in all_res]
|
347 |
+
|
348 |
+
random.seed(4)
|
349 |
+
random.shuffle(ids)
|
350 |
+
random.shuffle(ids)
|
351 |
+
random.shuffle(ids)
|
352 |
+
|
353 |
+
train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
|
354 |
+
|
355 |
+
if split == "train":
|
356 |
+
allowed_ids = list(train)
|
357 |
+
elif split == "validation":
|
358 |
+
allowed_ids = list(validation)
|
359 |
+
elif split == "test":
|
360 |
+
allowed_ids = list(test)
|
361 |
+
|
362 |
+
for r in all_res:
|
363 |
+
if r["id"] in allowed_ids:
|
364 |
+
yield r["id"], r
|