parquet-converter
commited on
Commit
·
0917a9a
1
Parent(s):
325358e
Update parquet files
Browse files- .gitattributes +0 -54
- bigbiohub.py +0 -556
- muchmore.py +0 -739
- muchmore_bigbio_kb/muchmore-train.parquet +3 -0
- muchmore_bigbio_t2t/muchmore-train.parquet +3 -0
- muchmore_de_bigbio_kb/muchmore-train.parquet +3 -0
- muchmore_en_bigbio_kb/muchmore-train.parquet +3 -0
- muchmore_source/muchmore-train.parquet +3 -0
.gitattributes
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
# Audio files - uncompressed
|
37 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
38 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
40 |
-
# Audio files - compressed
|
41 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
42 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
43 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
46 |
-
# Image files - uncompressed
|
47 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
48 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
49 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
51 |
-
# Image files - compressed
|
52 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bigbiohub.py
DELETED
@@ -1,556 +0,0 @@
|
|
1 |
-
from collections import defaultdict
|
2 |
-
from dataclasses import dataclass
|
3 |
-
from enum import Enum
|
4 |
-
import logging
|
5 |
-
from pathlib import Path
|
6 |
-
from types import SimpleNamespace
|
7 |
-
from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
|
8 |
-
|
9 |
-
import datasets
|
10 |
-
|
11 |
-
if TYPE_CHECKING:
|
12 |
-
import bioc
|
13 |
-
|
14 |
-
logger = logging.getLogger(__name__)
|
15 |
-
|
16 |
-
|
17 |
-
BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
|
18 |
-
|
19 |
-
|
20 |
-
@dataclass
|
21 |
-
class BigBioConfig(datasets.BuilderConfig):
|
22 |
-
"""BuilderConfig for BigBio."""
|
23 |
-
|
24 |
-
name: str = None
|
25 |
-
version: datasets.Version = None
|
26 |
-
description: str = None
|
27 |
-
schema: str = None
|
28 |
-
subset_id: str = None
|
29 |
-
|
30 |
-
|
31 |
-
class Tasks(Enum):
|
32 |
-
NAMED_ENTITY_RECOGNITION = "NER"
|
33 |
-
NAMED_ENTITY_DISAMBIGUATION = "NED"
|
34 |
-
EVENT_EXTRACTION = "EE"
|
35 |
-
RELATION_EXTRACTION = "RE"
|
36 |
-
COREFERENCE_RESOLUTION = "COREF"
|
37 |
-
QUESTION_ANSWERING = "QA"
|
38 |
-
TEXTUAL_ENTAILMENT = "TE"
|
39 |
-
SEMANTIC_SIMILARITY = "STS"
|
40 |
-
TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
|
41 |
-
PARAPHRASING = "PARA"
|
42 |
-
TRANSLATION = "TRANSL"
|
43 |
-
SUMMARIZATION = "SUM"
|
44 |
-
TEXT_CLASSIFICATION = "TXTCLASS"
|
45 |
-
|
46 |
-
|
47 |
-
entailment_features = datasets.Features(
|
48 |
-
{
|
49 |
-
"id": datasets.Value("string"),
|
50 |
-
"premise": datasets.Value("string"),
|
51 |
-
"hypothesis": datasets.Value("string"),
|
52 |
-
"label": datasets.Value("string"),
|
53 |
-
}
|
54 |
-
)
|
55 |
-
|
56 |
-
pairs_features = datasets.Features(
|
57 |
-
{
|
58 |
-
"id": datasets.Value("string"),
|
59 |
-
"document_id": datasets.Value("string"),
|
60 |
-
"text_1": datasets.Value("string"),
|
61 |
-
"text_2": datasets.Value("string"),
|
62 |
-
"label": datasets.Value("string"),
|
63 |
-
}
|
64 |
-
)
|
65 |
-
|
66 |
-
qa_features = datasets.Features(
|
67 |
-
{
|
68 |
-
"id": datasets.Value("string"),
|
69 |
-
"question_id": datasets.Value("string"),
|
70 |
-
"document_id": datasets.Value("string"),
|
71 |
-
"question": datasets.Value("string"),
|
72 |
-
"type": datasets.Value("string"),
|
73 |
-
"choices": [datasets.Value("string")],
|
74 |
-
"context": datasets.Value("string"),
|
75 |
-
"answer": datasets.Sequence(datasets.Value("string")),
|
76 |
-
}
|
77 |
-
)
|
78 |
-
|
79 |
-
text_features = datasets.Features(
|
80 |
-
{
|
81 |
-
"id": datasets.Value("string"),
|
82 |
-
"document_id": datasets.Value("string"),
|
83 |
-
"text": datasets.Value("string"),
|
84 |
-
"labels": [datasets.Value("string")],
|
85 |
-
}
|
86 |
-
)
|
87 |
-
|
88 |
-
text2text_features = datasets.Features(
|
89 |
-
{
|
90 |
-
"id": datasets.Value("string"),
|
91 |
-
"document_id": datasets.Value("string"),
|
92 |
-
"text_1": datasets.Value("string"),
|
93 |
-
"text_2": datasets.Value("string"),
|
94 |
-
"text_1_name": datasets.Value("string"),
|
95 |
-
"text_2_name": datasets.Value("string"),
|
96 |
-
}
|
97 |
-
)
|
98 |
-
|
99 |
-
kb_features = datasets.Features(
|
100 |
-
{
|
101 |
-
"id": datasets.Value("string"),
|
102 |
-
"document_id": datasets.Value("string"),
|
103 |
-
"passages": [
|
104 |
-
{
|
105 |
-
"id": datasets.Value("string"),
|
106 |
-
"type": datasets.Value("string"),
|
107 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
108 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
109 |
-
}
|
110 |
-
],
|
111 |
-
"entities": [
|
112 |
-
{
|
113 |
-
"id": datasets.Value("string"),
|
114 |
-
"type": datasets.Value("string"),
|
115 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
116 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
117 |
-
"normalized": [
|
118 |
-
{
|
119 |
-
"db_name": datasets.Value("string"),
|
120 |
-
"db_id": datasets.Value("string"),
|
121 |
-
}
|
122 |
-
],
|
123 |
-
}
|
124 |
-
],
|
125 |
-
"events": [
|
126 |
-
{
|
127 |
-
"id": datasets.Value("string"),
|
128 |
-
"type": datasets.Value("string"),
|
129 |
-
# refers to the text_bound_annotation of the trigger
|
130 |
-
"trigger": {
|
131 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
132 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
133 |
-
},
|
134 |
-
"arguments": [
|
135 |
-
{
|
136 |
-
"role": datasets.Value("string"),
|
137 |
-
"ref_id": datasets.Value("string"),
|
138 |
-
}
|
139 |
-
],
|
140 |
-
}
|
141 |
-
],
|
142 |
-
"coreferences": [
|
143 |
-
{
|
144 |
-
"id": datasets.Value("string"),
|
145 |
-
"entity_ids": datasets.Sequence(datasets.Value("string")),
|
146 |
-
}
|
147 |
-
],
|
148 |
-
"relations": [
|
149 |
-
{
|
150 |
-
"id": datasets.Value("string"),
|
151 |
-
"type": datasets.Value("string"),
|
152 |
-
"arg1_id": datasets.Value("string"),
|
153 |
-
"arg2_id": datasets.Value("string"),
|
154 |
-
"normalized": [
|
155 |
-
{
|
156 |
-
"db_name": datasets.Value("string"),
|
157 |
-
"db_id": datasets.Value("string"),
|
158 |
-
}
|
159 |
-
],
|
160 |
-
}
|
161 |
-
],
|
162 |
-
}
|
163 |
-
)
|
164 |
-
|
165 |
-
|
166 |
-
def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
|
167 |
-
|
168 |
-
offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
|
169 |
-
|
170 |
-
text = ann.text
|
171 |
-
|
172 |
-
if len(offsets) > 1:
|
173 |
-
i = 0
|
174 |
-
texts = []
|
175 |
-
for start, end in offsets:
|
176 |
-
chunk_len = end - start
|
177 |
-
texts.append(text[i : chunk_len + i])
|
178 |
-
i += chunk_len
|
179 |
-
while i < len(text) and text[i] == " ":
|
180 |
-
i += 1
|
181 |
-
else:
|
182 |
-
texts = [text]
|
183 |
-
|
184 |
-
return offsets, texts
|
185 |
-
|
186 |
-
|
187 |
-
def remove_prefix(a: str, prefix: str) -> str:
|
188 |
-
if a.startswith(prefix):
|
189 |
-
a = a[len(prefix) :]
|
190 |
-
return a
|
191 |
-
|
192 |
-
|
193 |
-
def parse_brat_file(
|
194 |
-
txt_file: Path,
|
195 |
-
annotation_file_suffixes: List[str] = None,
|
196 |
-
parse_notes: bool = False,
|
197 |
-
) -> Dict:
|
198 |
-
"""
|
199 |
-
Parse a brat file into the schema defined below.
|
200 |
-
`txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
|
201 |
-
Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
|
202 |
-
e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
|
203 |
-
Will include annotator notes, when `parse_notes == True`.
|
204 |
-
brat_features = datasets.Features(
|
205 |
-
{
|
206 |
-
"id": datasets.Value("string"),
|
207 |
-
"document_id": datasets.Value("string"),
|
208 |
-
"text": datasets.Value("string"),
|
209 |
-
"text_bound_annotations": [ # T line in brat, e.g. type or event trigger
|
210 |
-
{
|
211 |
-
"offsets": datasets.Sequence([datasets.Value("int32")]),
|
212 |
-
"text": datasets.Sequence(datasets.Value("string")),
|
213 |
-
"type": datasets.Value("string"),
|
214 |
-
"id": datasets.Value("string"),
|
215 |
-
}
|
216 |
-
],
|
217 |
-
"events": [ # E line in brat
|
218 |
-
{
|
219 |
-
"trigger": datasets.Value(
|
220 |
-
"string"
|
221 |
-
), # refers to the text_bound_annotation of the trigger,
|
222 |
-
"id": datasets.Value("string"),
|
223 |
-
"type": datasets.Value("string"),
|
224 |
-
"arguments": datasets.Sequence(
|
225 |
-
{
|
226 |
-
"role": datasets.Value("string"),
|
227 |
-
"ref_id": datasets.Value("string"),
|
228 |
-
}
|
229 |
-
),
|
230 |
-
}
|
231 |
-
],
|
232 |
-
"relations": [ # R line in brat
|
233 |
-
{
|
234 |
-
"id": datasets.Value("string"),
|
235 |
-
"head": {
|
236 |
-
"ref_id": datasets.Value("string"),
|
237 |
-
"role": datasets.Value("string"),
|
238 |
-
},
|
239 |
-
"tail": {
|
240 |
-
"ref_id": datasets.Value("string"),
|
241 |
-
"role": datasets.Value("string"),
|
242 |
-
},
|
243 |
-
"type": datasets.Value("string"),
|
244 |
-
}
|
245 |
-
],
|
246 |
-
"equivalences": [ # Equiv line in brat
|
247 |
-
{
|
248 |
-
"id": datasets.Value("string"),
|
249 |
-
"ref_ids": datasets.Sequence(datasets.Value("string")),
|
250 |
-
}
|
251 |
-
],
|
252 |
-
"attributes": [ # M or A lines in brat
|
253 |
-
{
|
254 |
-
"id": datasets.Value("string"),
|
255 |
-
"type": datasets.Value("string"),
|
256 |
-
"ref_id": datasets.Value("string"),
|
257 |
-
"value": datasets.Value("string"),
|
258 |
-
}
|
259 |
-
],
|
260 |
-
"normalizations": [ # N lines in brat
|
261 |
-
{
|
262 |
-
"id": datasets.Value("string"),
|
263 |
-
"type": datasets.Value("string"),
|
264 |
-
"ref_id": datasets.Value("string"),
|
265 |
-
"resource_name": datasets.Value(
|
266 |
-
"string"
|
267 |
-
), # Name of the resource, e.g. "Wikipedia"
|
268 |
-
"cuid": datasets.Value(
|
269 |
-
"string"
|
270 |
-
), # ID in the resource, e.g. 534366
|
271 |
-
"text": datasets.Value(
|
272 |
-
"string"
|
273 |
-
), # Human readable description/name of the entity, e.g. "Barack Obama"
|
274 |
-
}
|
275 |
-
],
|
276 |
-
### OPTIONAL: Only included when `parse_notes == True`
|
277 |
-
"notes": [ # # lines in brat
|
278 |
-
{
|
279 |
-
"id": datasets.Value("string"),
|
280 |
-
"type": datasets.Value("string"),
|
281 |
-
"ref_id": datasets.Value("string"),
|
282 |
-
"text": datasets.Value("string"),
|
283 |
-
}
|
284 |
-
],
|
285 |
-
},
|
286 |
-
)
|
287 |
-
"""
|
288 |
-
|
289 |
-
example = {}
|
290 |
-
example["document_id"] = txt_file.with_suffix("").name
|
291 |
-
with txt_file.open() as f:
|
292 |
-
example["text"] = f.read()
|
293 |
-
|
294 |
-
# If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
|
295 |
-
# for event extraction
|
296 |
-
if annotation_file_suffixes is None:
|
297 |
-
annotation_file_suffixes = [".a1", ".a2", ".ann"]
|
298 |
-
|
299 |
-
if len(annotation_file_suffixes) == 0:
|
300 |
-
raise AssertionError(
|
301 |
-
"At least one suffix for the to-be-read annotation files should be given!"
|
302 |
-
)
|
303 |
-
|
304 |
-
ann_lines = []
|
305 |
-
for suffix in annotation_file_suffixes:
|
306 |
-
annotation_file = txt_file.with_suffix(suffix)
|
307 |
-
if annotation_file.exists():
|
308 |
-
with annotation_file.open() as f:
|
309 |
-
ann_lines.extend(f.readlines())
|
310 |
-
|
311 |
-
example["text_bound_annotations"] = []
|
312 |
-
example["events"] = []
|
313 |
-
example["relations"] = []
|
314 |
-
example["equivalences"] = []
|
315 |
-
example["attributes"] = []
|
316 |
-
example["normalizations"] = []
|
317 |
-
|
318 |
-
if parse_notes:
|
319 |
-
example["notes"] = []
|
320 |
-
|
321 |
-
for line in ann_lines:
|
322 |
-
line = line.strip()
|
323 |
-
if not line:
|
324 |
-
continue
|
325 |
-
|
326 |
-
if line.startswith("T"): # Text bound
|
327 |
-
ann = {}
|
328 |
-
fields = line.split("\t")
|
329 |
-
|
330 |
-
ann["id"] = fields[0]
|
331 |
-
ann["type"] = fields[1].split()[0]
|
332 |
-
ann["offsets"] = []
|
333 |
-
span_str = remove_prefix(fields[1], (ann["type"] + " "))
|
334 |
-
text = fields[2]
|
335 |
-
for span in span_str.split(";"):
|
336 |
-
start, end = span.split()
|
337 |
-
ann["offsets"].append([int(start), int(end)])
|
338 |
-
|
339 |
-
# Heuristically split text of discontiguous entities into chunks
|
340 |
-
ann["text"] = []
|
341 |
-
if len(ann["offsets"]) > 1:
|
342 |
-
i = 0
|
343 |
-
for start, end in ann["offsets"]:
|
344 |
-
chunk_len = end - start
|
345 |
-
ann["text"].append(text[i : chunk_len + i])
|
346 |
-
i += chunk_len
|
347 |
-
while i < len(text) and text[i] == " ":
|
348 |
-
i += 1
|
349 |
-
else:
|
350 |
-
ann["text"] = [text]
|
351 |
-
|
352 |
-
example["text_bound_annotations"].append(ann)
|
353 |
-
|
354 |
-
elif line.startswith("E"):
|
355 |
-
ann = {}
|
356 |
-
fields = line.split("\t")
|
357 |
-
|
358 |
-
ann["id"] = fields[0]
|
359 |
-
|
360 |
-
ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
|
361 |
-
|
362 |
-
ann["arguments"] = []
|
363 |
-
for role_ref_id in fields[1].split()[1:]:
|
364 |
-
argument = {
|
365 |
-
"role": (role_ref_id.split(":"))[0],
|
366 |
-
"ref_id": (role_ref_id.split(":"))[1],
|
367 |
-
}
|
368 |
-
ann["arguments"].append(argument)
|
369 |
-
|
370 |
-
example["events"].append(ann)
|
371 |
-
|
372 |
-
elif line.startswith("R"):
|
373 |
-
ann = {}
|
374 |
-
fields = line.split("\t")
|
375 |
-
|
376 |
-
ann["id"] = fields[0]
|
377 |
-
ann["type"] = fields[1].split()[0]
|
378 |
-
|
379 |
-
ann["head"] = {
|
380 |
-
"role": fields[1].split()[1].split(":")[0],
|
381 |
-
"ref_id": fields[1].split()[1].split(":")[1],
|
382 |
-
}
|
383 |
-
ann["tail"] = {
|
384 |
-
"role": fields[1].split()[2].split(":")[0],
|
385 |
-
"ref_id": fields[1].split()[2].split(":")[1],
|
386 |
-
}
|
387 |
-
|
388 |
-
example["relations"].append(ann)
|
389 |
-
|
390 |
-
# '*' seems to be the legacy way to mark equivalences,
|
391 |
-
# but I couldn't find any info on the current way
|
392 |
-
# this might have to be adapted dependent on the brat version
|
393 |
-
# of the annotation
|
394 |
-
elif line.startswith("*"):
|
395 |
-
ann = {}
|
396 |
-
fields = line.split("\t")
|
397 |
-
|
398 |
-
ann["id"] = fields[0]
|
399 |
-
ann["ref_ids"] = fields[1].split()[1:]
|
400 |
-
|
401 |
-
example["equivalences"].append(ann)
|
402 |
-
|
403 |
-
elif line.startswith("A") or line.startswith("M"):
|
404 |
-
ann = {}
|
405 |
-
fields = line.split("\t")
|
406 |
-
|
407 |
-
ann["id"] = fields[0]
|
408 |
-
|
409 |
-
info = fields[1].split()
|
410 |
-
ann["type"] = info[0]
|
411 |
-
ann["ref_id"] = info[1]
|
412 |
-
|
413 |
-
if len(info) > 2:
|
414 |
-
ann["value"] = info[2]
|
415 |
-
else:
|
416 |
-
ann["value"] = ""
|
417 |
-
|
418 |
-
example["attributes"].append(ann)
|
419 |
-
|
420 |
-
elif line.startswith("N"):
|
421 |
-
ann = {}
|
422 |
-
fields = line.split("\t")
|
423 |
-
|
424 |
-
ann["id"] = fields[0]
|
425 |
-
ann["text"] = fields[2]
|
426 |
-
|
427 |
-
info = fields[1].split()
|
428 |
-
|
429 |
-
ann["type"] = info[0]
|
430 |
-
ann["ref_id"] = info[1]
|
431 |
-
ann["resource_name"] = info[2].split(":")[0]
|
432 |
-
ann["cuid"] = info[2].split(":")[1]
|
433 |
-
example["normalizations"].append(ann)
|
434 |
-
|
435 |
-
elif parse_notes and line.startswith("#"):
|
436 |
-
ann = {}
|
437 |
-
fields = line.split("\t")
|
438 |
-
|
439 |
-
ann["id"] = fields[0]
|
440 |
-
ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
|
441 |
-
|
442 |
-
info = fields[1].split()
|
443 |
-
|
444 |
-
ann["type"] = info[0]
|
445 |
-
ann["ref_id"] = info[1]
|
446 |
-
example["notes"].append(ann)
|
447 |
-
|
448 |
-
return example
|
449 |
-
|
450 |
-
|
451 |
-
def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
|
452 |
-
"""
|
453 |
-
Transform a brat parse (conforming to the standard brat schema) obtained with
|
454 |
-
`parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
|
455 |
-
:param brat_parse:
|
456 |
-
"""
|
457 |
-
|
458 |
-
unified_example = {}
|
459 |
-
|
460 |
-
# Prefix all ids with document id to ensure global uniqueness,
|
461 |
-
# because brat ids are only unique within their document
|
462 |
-
id_prefix = brat_parse["document_id"] + "_"
|
463 |
-
|
464 |
-
# identical
|
465 |
-
unified_example["document_id"] = brat_parse["document_id"]
|
466 |
-
unified_example["passages"] = [
|
467 |
-
{
|
468 |
-
"id": id_prefix + "_text",
|
469 |
-
"type": "abstract",
|
470 |
-
"text": [brat_parse["text"]],
|
471 |
-
"offsets": [[0, len(brat_parse["text"])]],
|
472 |
-
}
|
473 |
-
]
|
474 |
-
|
475 |
-
# get normalizations
|
476 |
-
ref_id_to_normalizations = defaultdict(list)
|
477 |
-
for normalization in brat_parse["normalizations"]:
|
478 |
-
ref_id_to_normalizations[normalization["ref_id"]].append(
|
479 |
-
{
|
480 |
-
"db_name": normalization["resource_name"],
|
481 |
-
"db_id": normalization["cuid"],
|
482 |
-
}
|
483 |
-
)
|
484 |
-
|
485 |
-
# separate entities and event triggers
|
486 |
-
unified_example["events"] = []
|
487 |
-
non_event_ann = brat_parse["text_bound_annotations"].copy()
|
488 |
-
for event in brat_parse["events"]:
|
489 |
-
event = event.copy()
|
490 |
-
event["id"] = id_prefix + event["id"]
|
491 |
-
trigger = next(
|
492 |
-
tr
|
493 |
-
for tr in brat_parse["text_bound_annotations"]
|
494 |
-
if tr["id"] == event["trigger"]
|
495 |
-
)
|
496 |
-
if trigger in non_event_ann:
|
497 |
-
non_event_ann.remove(trigger)
|
498 |
-
event["trigger"] = {
|
499 |
-
"text": trigger["text"].copy(),
|
500 |
-
"offsets": trigger["offsets"].copy(),
|
501 |
-
}
|
502 |
-
for argument in event["arguments"]:
|
503 |
-
argument["ref_id"] = id_prefix + argument["ref_id"]
|
504 |
-
|
505 |
-
unified_example["events"].append(event)
|
506 |
-
|
507 |
-
unified_example["entities"] = []
|
508 |
-
anno_ids = [ref_id["id"] for ref_id in non_event_ann]
|
509 |
-
for ann in non_event_ann:
|
510 |
-
entity_ann = ann.copy()
|
511 |
-
entity_ann["id"] = id_prefix + entity_ann["id"]
|
512 |
-
entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
|
513 |
-
unified_example["entities"].append(entity_ann)
|
514 |
-
|
515 |
-
# massage relations
|
516 |
-
unified_example["relations"] = []
|
517 |
-
skipped_relations = set()
|
518 |
-
for ann in brat_parse["relations"]:
|
519 |
-
if (
|
520 |
-
ann["head"]["ref_id"] not in anno_ids
|
521 |
-
or ann["tail"]["ref_id"] not in anno_ids
|
522 |
-
):
|
523 |
-
skipped_relations.add(ann["id"])
|
524 |
-
continue
|
525 |
-
unified_example["relations"].append(
|
526 |
-
{
|
527 |
-
"arg1_id": id_prefix + ann["head"]["ref_id"],
|
528 |
-
"arg2_id": id_prefix + ann["tail"]["ref_id"],
|
529 |
-
"id": id_prefix + ann["id"],
|
530 |
-
"type": ann["type"],
|
531 |
-
"normalized": [],
|
532 |
-
}
|
533 |
-
)
|
534 |
-
if len(skipped_relations) > 0:
|
535 |
-
example_id = brat_parse["document_id"]
|
536 |
-
logger.info(
|
537 |
-
f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
|
538 |
-
f" Skip (for now): "
|
539 |
-
f"{list(skipped_relations)}"
|
540 |
-
)
|
541 |
-
|
542 |
-
# get coreferences
|
543 |
-
unified_example["coreferences"] = []
|
544 |
-
for i, ann in enumerate(brat_parse["equivalences"], start=1):
|
545 |
-
is_entity_cluster = True
|
546 |
-
for ref_id in ann["ref_ids"]:
|
547 |
-
if not ref_id.startswith("T"): # not textbound -> no entity
|
548 |
-
is_entity_cluster = False
|
549 |
-
elif ref_id not in anno_ids: # event trigger -> no entity
|
550 |
-
is_entity_cluster = False
|
551 |
-
if is_entity_cluster:
|
552 |
-
entity_ids = [id_prefix + i for i in ann["ref_ids"]]
|
553 |
-
unified_example["coreferences"].append(
|
554 |
-
{"id": id_prefix + str(i), "entity_ids": entity_ids}
|
555 |
-
)
|
556 |
-
return unified_example
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
muchmore.py
DELETED
@@ -1,739 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
"""
|
17 |
-
A dataset loader for the MuchMore Springer Bilingual Corpus
|
18 |
-
|
19 |
-
homepage
|
20 |
-
|
21 |
-
* https://muchmore.dfki.de/resources1.htm
|
22 |
-
|
23 |
-
description of annotation format
|
24 |
-
|
25 |
-
* https://muchmore.dfki.de/pubs/D4.1.pdf
|
26 |
-
|
27 |
-
Four files are distributed
|
28 |
-
|
29 |
-
* springer_english_train_plain.tar.gz (english plain text of abstracts)
|
30 |
-
* springer_german_train_plain.tar.gz (german plain text of abstracts)
|
31 |
-
* springer_english_train_V4.2.tar.gz (annotated xml in english)
|
32 |
-
* springer_german_train_V4.2.tar.gz (annotated xml in german)
|
33 |
-
|
34 |
-
Each tar file has one member file per abstract.
|
35 |
-
There are keys to join the english and german files
|
36 |
-
but there is not a 1-1 mapping between them (i.e. some
|
37 |
-
english files have no german counterpart and some german
|
38 |
-
files have no english counterpart). However, there is a 1-1
|
39 |
-
mapping between plain text and annotations for a given language
|
40 |
-
(i.e. an abstract in springer_english_train_plain.tar.gz will
|
41 |
-
also be found in springer_english_train_V4.2.tar.gz)
|
42 |
-
|
43 |
-
Counts,
|
44 |
-
|
45 |
-
* 15,631 total abstracts
|
46 |
-
* 7,823 english abstracts
|
47 |
-
* 7,808 german abstracts
|
48 |
-
* 6,374 matched (en/de) abstracts
|
49 |
-
* 1,449 english abstracts with no german
|
50 |
-
* 1,434 german abstracts with no english
|
51 |
-
|
52 |
-
Notes
|
53 |
-
|
54 |
-
* Arthroskopie.00130237.eng.abstr.chunkmorph.annotated.xml seems to be empty
|
55 |
-
|
56 |
-
|
57 |
-
* entity spans can overlap. an example from the first sample:
|
58 |
-
|
59 |
-
{'id': 'Arthroskopie.00130003.eng.abstr-s1-t1',
|
60 |
-
'type': 'umlsterm',
|
61 |
-
'text': ['posterior'],
|
62 |
-
'offsets': [[4, 13]],
|
63 |
-
'normalized': [{'db_name': 'UMLS', 'db_id': 'C0032009'}]},
|
64 |
-
{'id': 'Arthroskopie.00130003.eng.abstr-s1-t8',
|
65 |
-
'type': 'umlsterm',
|
66 |
-
'text': ['posterior cruciate ligament'],
|
67 |
-
'offsets': [[4, 31]],
|
68 |
-
'normalized': [{'db_name': 'UMLS', 'db_id': 'C0080039'}]},
|
69 |
-
{'id': 'Arthroskopie.00130003.eng.abstr-s1-t2',
|
70 |
-
'type': 'umlsterm',
|
71 |
-
'text': ['ligament'],
|
72 |
-
'offsets': [[23, 31]],
|
73 |
-
'normalized': [{'db_name': 'UMLS', 'db_id': 'C0023685'},
|
74 |
-
{'db_name': 'UMLS', 'db_id': 'C0023686'}]},
|
75 |
-
|
76 |
-
|
77 |
-
* semantic relations are defined beween concepts but entities can
|
78 |
-
have multiple concpets associated with them. in the bigbio
|
79 |
-
schema we skip relations between multiple concept of the
|
80 |
-
same entity. an example of a relation that is kept from the
|
81 |
-
source schema is below,
|
82 |
-
|
83 |
-
In [35]: dsd['train'][0]['sentences'][0]['tokens']
|
84 |
-
Out[35]:
|
85 |
-
[{'id': 'w1', 'pos': 'DT', 'lemma': 'the', 'text': 'The'},
|
86 |
-
{'id': 'w2', 'pos': 'JJ', 'lemma': 'posterior', 'text': 'posterior'},
|
87 |
-
{'id': 'w3', 'pos': 'JJ', 'lemma': 'cruciate', 'text': 'cruciate'},
|
88 |
-
{'id': 'w4', 'pos': 'NN', 'lemma': 'ligament', 'text': 'ligament'},
|
89 |
-
{'id': 'w5', 'pos': 'PUNCT', 'lemma': None, 'text': '('},
|
90 |
-
{'id': 'w6', 'pos': 'NN', 'lemma': None, 'text': 'PCL'},
|
91 |
-
{'id': 'w7', 'pos': 'PUNCT', 'lemma': None, 'text': ')'},
|
92 |
-
{'id': 'w8', 'pos': 'VBZ', 'lemma': 'be', 'text': 'is'},
|
93 |
-
{'id': 'w9', 'pos': 'DT', 'lemma': 'the', 'text': 'the'},
|
94 |
-
{'id': 'w10', 'pos': 'JJS', 'lemma': 'strong', 'text': 'strongest'},
|
95 |
-
{'id': 'w11', 'pos': 'NN', 'lemma': 'ligament', 'text': 'ligament'},
|
96 |
-
{'id': 'w12', 'pos': 'IN', 'lemma': 'of', 'text': 'of'},
|
97 |
-
{'id': 'w13', 'pos': 'DT', 'lemma': 'the', 'text': 'the'},
|
98 |
-
{'id': 'w14', 'pos': 'JJ', 'lemma': 'human', 'text': 'human'},
|
99 |
-
{'id': 'w15', 'pos': 'NN', 'lemma': 'knee', 'text': 'knee'},
|
100 |
-
{'id': 'w16', 'pos': 'JJ', 'lemma': 'joint', 'text': 'joint'},
|
101 |
-
{'id': 'w17', 'pos': 'PUNCT', 'lemma': None, 'text': '.'}]
|
102 |
-
|
103 |
-
|
104 |
-
In [36]: dsd['train'][0]['sentences'][0]['semrels'][0]
|
105 |
-
Out[36]: {'id': 'r1', 'term1': 't3.1', 'term2': 't6.1', 'reltype': 'surrounds'}
|
106 |
-
|
107 |
-
In [37]: dsd['train'][0]['sentences'][0]['umlsterms'][2]
|
108 |
-
Out[37]:
|
109 |
-
{'id': 't3',
|
110 |
-
'from': 'w11',
|
111 |
-
'to': 'w11',
|
112 |
-
'concepts': [{'id': 't3.1',
|
113 |
-
'cui': 'C0023685',
|
114 |
-
'preferred': 'Ligaments',
|
115 |
-
'tui': 'T024',
|
116 |
-
'mshs': [{'code': 'A2.513'}]},
|
117 |
-
{'id': 't3.2',
|
118 |
-
'cui': 'C0023686',
|
119 |
-
'preferred': 'Articular ligaments',
|
120 |
-
'tui': 'T023',
|
121 |
-
'mshs': [{'code': 'A2.513.514'}, {'code': 'A2.835.583.512'}]}]}
|
122 |
-
|
123 |
-
In [38]: dsd['train'][0]['sentences'][0]['umlsterms'][5]
|
124 |
-
Out[38]:
|
125 |
-
{'id': 't6',
|
126 |
-
'from': 'w16',
|
127 |
-
'to': 'w16',
|
128 |
-
'concepts': [{'id': 't6.1',
|
129 |
-
'cui': 'C0022417',
|
130 |
-
'preferred': 'Joints',
|
131 |
-
'tui': 'T030',
|
132 |
-
'mshs': [{'code': 'A2.835.583'}]}]}
|
133 |
-
|
134 |
-
"""
|
135 |
-
|
136 |
-
import itertools
|
137 |
-
import os
|
138 |
-
import re
|
139 |
-
import tarfile
|
140 |
-
import xml.etree.ElementTree as ET
|
141 |
-
from collections import defaultdict
|
142 |
-
from typing import Dict, List
|
143 |
-
from xml.etree.ElementTree import Element
|
144 |
-
|
145 |
-
import datasets
|
146 |
-
from datasets import Features, Value
|
147 |
-
|
148 |
-
# TODO: home page has a list of publications but its not clear which to choose
|
149 |
-
# https://muchmore.dfki.de/papers1.htm
|
150 |
-
# to start, chose the one below.
|
151 |
-
# Buitelaar, Paul / Declerck, Thierry / Sacaleanu, Bogdan / Vintar, Spela / Raileanu, Diana / Crispi, Claudia: A Multi-Layered, XML-Based Approach to the Integration of Linguistic and Semantic Annotations. In: Proceedings of EACL 2003 Workshop on Language Technology and the Semantic Web (NLPXML’03), Budapest, Hungary, April 2003.
|
152 |
-
from .bigbiohub import kb_features
|
153 |
-
from .bigbiohub import text2text_features
|
154 |
-
from .bigbiohub import BigBioConfig
|
155 |
-
from .bigbiohub import Tasks
|
156 |
-
|
157 |
-
_LANGUAGES = ['English', 'German']
|
158 |
-
_PUBMED = True
|
159 |
-
_LOCAL = False
|
160 |
-
_CITATION = """\
|
161 |
-
@inproceedings{buitelaar2003multi,
|
162 |
-
title={A multi-layered, xml-based approach to the integration of linguistic and semantic annotations},
|
163 |
-
author={Buitelaar, Paul and Declerck, Thierry and Sacaleanu, Bogdan and Vintar, {\v{S}}pela and Raileanu, Diana and Crispi, Claudia},
|
164 |
-
booktitle={Proceedings of EACL 2003 Workshop on Language Technology and the Semantic Web (NLPXML'03), Budapest, Hungary},
|
165 |
-
year={2003}
|
166 |
-
}
|
167 |
-
"""
|
168 |
-
|
169 |
-
_DESCRIPTION = """\
|
170 |
-
The corpus used in the MuchMore project is a parallel corpus of English-German scientific
|
171 |
-
medical abstracts obtained from the Springer Link web site. The corpus consists
|
172 |
-
approximately of 1 million tokens for each language. Abstracts are from 41 medical
|
173 |
-
journals, each of which constitutes a relatively homogeneous medical sub-domain (e.g.
|
174 |
-
Neurology, Radiology, etc.). The corpus of downloaded HTML documents is normalized in
|
175 |
-
various ways, in order to produce a clean, plain text version, consisting of a title, abstract
|
176 |
-
and keywords. Additionally, the corpus was aligned on the sentence level.
|
177 |
-
|
178 |
-
Automatic (!) annotation includes: Part-of-Speech; Morphology (inflection and
|
179 |
-
decomposition); Chunks; Semantic Classes (UMLS: Unified Medical Language System,
|
180 |
-
MeSH: Medical Subject Headings, EuroWordNet); Semantic Relations from UMLS.
|
181 |
-
"""
|
182 |
-
|
183 |
-
_DATASETNAME = "muchmore"
|
184 |
-
_DISPLAYNAME = "MuchMore"
|
185 |
-
|
186 |
-
_HOMEPAGE = "https://muchmore.dfki.de/resources1.htm"
|
187 |
-
|
188 |
-
# TODO: website says the following, but don't see a specific license
|
189 |
-
# TODO: add to FAQs about what to do in this situation.
|
190 |
-
|
191 |
-
# "The cross-lingual information access prototype system for the medical domain
|
192 |
-
# will be made publicly accessible through the internet. It provides access to
|
193 |
-
# multilingual information on the basis of a domain ontology and classification.
|
194 |
-
# For the main task of multilingual domain modelling, the project will focus
|
195 |
-
# on German and English. "
|
196 |
-
_LICENSE = 'License information unavailable'
|
197 |
-
_URLs = {
|
198 |
-
"muchmore_source": [
|
199 |
-
"https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
200 |
-
"https://muchmore.dfki.de/pubs/springer_english_train_V4.2.tar.gz",
|
201 |
-
"https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
202 |
-
"https://muchmore.dfki.de/pubs/springer_german_train_V4.2.tar.gz",
|
203 |
-
],
|
204 |
-
"muchmore_bigbio_kb": [
|
205 |
-
"https://muchmore.dfki.de/pubs/springer_english_train_V4.2.tar.gz",
|
206 |
-
"https://muchmore.dfki.de/pubs/springer_german_train_V4.2.tar.gz",
|
207 |
-
],
|
208 |
-
"muchmore_en_bigbio_kb": "https://muchmore.dfki.de/pubs/springer_english_train_V4.2.tar.gz",
|
209 |
-
"muchmore_de_bigbio_kb": "https://muchmore.dfki.de/pubs/springer_german_train_V4.2.tar.gz",
|
210 |
-
"plain": [
|
211 |
-
"https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
212 |
-
"https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
213 |
-
],
|
214 |
-
"plain_en": "https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
215 |
-
"plain_de": "https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
216 |
-
"muchmore_bigbio_t2t": [
|
217 |
-
"https://muchmore.dfki.de/pubs/springer_english_train_plain.tar.gz",
|
218 |
-
"https://muchmore.dfki.de/pubs/springer_german_train_plain.tar.gz",
|
219 |
-
],
|
220 |
-
}
|
221 |
-
|
222 |
-
# took version from annotated file names
|
223 |
-
_SOURCE_VERSION = "4.2.0"
|
224 |
-
_BIGBIO_VERSION = "1.0.0"
|
225 |
-
_SUPPORTED_TASKS = [
|
226 |
-
Tasks.TRANSLATION,
|
227 |
-
Tasks.NAMED_ENTITY_RECOGNITION,
|
228 |
-
Tasks.NAMED_ENTITY_DISAMBIGUATION,
|
229 |
-
Tasks.RELATION_EXTRACTION,
|
230 |
-
]
|
231 |
-
|
232 |
-
NATIVE_ENCODING = "ISO-8859-1"
|
233 |
-
FILE_NAME_PATTERN = r"^(.+?)\.(eng|ger)\.abstr(\.chunkmorph\.annotated\.xml)?$"
|
234 |
-
LANG_MAP = {"eng": "en", "ger": "de"}
|
235 |
-
|
236 |
-
|
237 |
-
class MuchMoreDataset(datasets.GeneratorBasedBuilder):
|
238 |
-
"""MuchMore Springer Bilingual Corpus"""
|
239 |
-
|
240 |
-
DEFAULT_CONFIG_NAME = "muchmore_source"
|
241 |
-
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
242 |
-
BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
|
243 |
-
|
244 |
-
BUILDER_CONFIGS = [
|
245 |
-
BigBioConfig(
|
246 |
-
name="muchmore_source",
|
247 |
-
version=SOURCE_VERSION,
|
248 |
-
description="MuchMore source schema",
|
249 |
-
schema="source",
|
250 |
-
subset_id="muchmore",
|
251 |
-
),
|
252 |
-
BigBioConfig(
|
253 |
-
name="muchmore_bigbio_kb",
|
254 |
-
version=BIGBIO_VERSION,
|
255 |
-
description="MuchMore simplified BigBio kb schema",
|
256 |
-
schema="bigbio_kb",
|
257 |
-
subset_id="muchmore",
|
258 |
-
),
|
259 |
-
BigBioConfig(
|
260 |
-
name="muchmore_en_bigbio_kb",
|
261 |
-
version=BIGBIO_VERSION,
|
262 |
-
description="MuchMore simplified BigBio kb schema",
|
263 |
-
schema="bigbio_kb",
|
264 |
-
subset_id="muchmore_en",
|
265 |
-
),
|
266 |
-
BigBioConfig(
|
267 |
-
name="muchmore_de_bigbio_kb",
|
268 |
-
version=BIGBIO_VERSION,
|
269 |
-
description="MuchMore simplified BigBio kb schema",
|
270 |
-
schema="bigbio_kb",
|
271 |
-
subset_id="muchmore_de",
|
272 |
-
),
|
273 |
-
BigBioConfig(
|
274 |
-
name="muchmore_bigbio_t2t",
|
275 |
-
version=BIGBIO_VERSION,
|
276 |
-
description="MuchMore simplified BigBio translation schema",
|
277 |
-
schema="bigbio_t2t",
|
278 |
-
subset_id="muchmore",
|
279 |
-
),
|
280 |
-
]
|
281 |
-
|
282 |
-
# default config produces english annotations at the moment
|
283 |
-
def _info(self):
|
284 |
-
|
285 |
-
if self.config.schema == "source":
|
286 |
-
features = Features(
|
287 |
-
{
|
288 |
-
"sample_id": Value("string"),
|
289 |
-
"corresp": Value("string"),
|
290 |
-
"language": Value("string"),
|
291 |
-
"abstract": Value("string"),
|
292 |
-
"sentences": [
|
293 |
-
{
|
294 |
-
"id": Value("string"),
|
295 |
-
"corresp": Value("string"),
|
296 |
-
"umlsterms": [
|
297 |
-
{
|
298 |
-
"id": Value("string"),
|
299 |
-
"from": Value("string"),
|
300 |
-
"to": Value("string"),
|
301 |
-
"concepts": [
|
302 |
-
{
|
303 |
-
"id": Value("string"),
|
304 |
-
"cui": Value("string"),
|
305 |
-
"preferred": Value("string"),
|
306 |
-
"tui": Value("string"),
|
307 |
-
"mshs": [
|
308 |
-
{
|
309 |
-
"code": Value("string"),
|
310 |
-
}
|
311 |
-
],
|
312 |
-
}
|
313 |
-
],
|
314 |
-
}
|
315 |
-
],
|
316 |
-
"ewnterms": [
|
317 |
-
{
|
318 |
-
"id": Value("string"),
|
319 |
-
"to": Value("string"),
|
320 |
-
"from": Value("string"),
|
321 |
-
"senses": [
|
322 |
-
{
|
323 |
-
"offset": Value("string"),
|
324 |
-
}
|
325 |
-
],
|
326 |
-
}
|
327 |
-
],
|
328 |
-
"semrels": [
|
329 |
-
{
|
330 |
-
"id": Value("string"),
|
331 |
-
"term1": Value("string"),
|
332 |
-
"term2": Value("string"),
|
333 |
-
"reltype": Value("string"),
|
334 |
-
}
|
335 |
-
],
|
336 |
-
"chunks": [
|
337 |
-
{
|
338 |
-
"id": Value("string"),
|
339 |
-
"to": Value("string"),
|
340 |
-
"from": Value("string"),
|
341 |
-
"type": Value("string"),
|
342 |
-
}
|
343 |
-
],
|
344 |
-
"tokens": [
|
345 |
-
{
|
346 |
-
"id": Value("string"),
|
347 |
-
"pos": Value("string"),
|
348 |
-
"lemma": Value("string"),
|
349 |
-
"text": Value("string"),
|
350 |
-
}
|
351 |
-
],
|
352 |
-
}
|
353 |
-
],
|
354 |
-
}
|
355 |
-
)
|
356 |
-
|
357 |
-
elif self.config.schema == "bigbio_kb":
|
358 |
-
features = kb_features
|
359 |
-
|
360 |
-
elif self.config.name in ("plain", "plain_en", "plain_de"):
|
361 |
-
features = Features(
|
362 |
-
{
|
363 |
-
"sample_id": Value("string"),
|
364 |
-
"sample_id_prefix": Value("string"),
|
365 |
-
"language": Value("string"),
|
366 |
-
"abstract": Value("string"),
|
367 |
-
}
|
368 |
-
)
|
369 |
-
|
370 |
-
elif self.config.schema == "bigbio_t2t":
|
371 |
-
features = text2text_features
|
372 |
-
|
373 |
-
return datasets.DatasetInfo(
|
374 |
-
description=_DESCRIPTION,
|
375 |
-
features=features,
|
376 |
-
supervised_keys=None,
|
377 |
-
homepage=_HOMEPAGE,
|
378 |
-
license=str(_LICENSE),
|
379 |
-
citation=_CITATION,
|
380 |
-
)
|
381 |
-
|
382 |
-
def _split_generators(self, dl_manager):
|
383 |
-
"""Returns SplitGenerators."""
|
384 |
-
my_urls = _URLs[self.config.name]
|
385 |
-
data_dirs = dl_manager.download(my_urls)
|
386 |
-
# ensure that data_dirs is always a list of string paths
|
387 |
-
if isinstance(data_dirs, str):
|
388 |
-
data_dirs = [data_dirs]
|
389 |
-
|
390 |
-
return [
|
391 |
-
datasets.SplitGenerator(
|
392 |
-
name=datasets.Split.TRAIN,
|
393 |
-
gen_kwargs={
|
394 |
-
"file_names_and_pointers": itertools.chain(
|
395 |
-
*[dl_manager.iter_archive(data_dir) for data_dir in data_dirs]
|
396 |
-
),
|
397 |
-
"split": "train",
|
398 |
-
},
|
399 |
-
),
|
400 |
-
]
|
401 |
-
|
402 |
-
@staticmethod
|
403 |
-
def _get_umlsterms_from_xsent(xsent: Element) -> List:
|
404 |
-
xumlsterms = xsent.find("./umlsterms")
|
405 |
-
|
406 |
-
umlsterms = []
|
407 |
-
for xumlsterm in xumlsterms.findall("./umlsterm"):
|
408 |
-
|
409 |
-
concepts = []
|
410 |
-
for xconcept in xumlsterm.findall("./concept"):
|
411 |
-
|
412 |
-
mshs = [
|
413 |
-
{"code": xmsh.get("code")} for xmsh in xconcept.findall("./msh")
|
414 |
-
]
|
415 |
-
|
416 |
-
concept = {
|
417 |
-
"id": xconcept.get("id"),
|
418 |
-
"cui": xconcept.get("cui"),
|
419 |
-
"preferred": xconcept.get("preferred"),
|
420 |
-
"tui": xconcept.get("tui"),
|
421 |
-
"mshs": mshs,
|
422 |
-
}
|
423 |
-
concepts.append(concept)
|
424 |
-
|
425 |
-
umlsterm = {
|
426 |
-
"id": xumlsterm.get("id"),
|
427 |
-
"from": xumlsterm.get("from"),
|
428 |
-
"to": xumlsterm.get("to"),
|
429 |
-
"concepts": concepts,
|
430 |
-
}
|
431 |
-
umlsterms.append(umlsterm)
|
432 |
-
|
433 |
-
return umlsterms
|
434 |
-
|
435 |
-
@staticmethod
|
436 |
-
def _get_ewnterms_from_xsent(xsent: Element) -> List:
|
437 |
-
xewnterms = xsent.find("./ewnterms")
|
438 |
-
|
439 |
-
ewnterms = []
|
440 |
-
for xewnterm in xewnterms.findall("./ewnterm"):
|
441 |
-
|
442 |
-
senses = [
|
443 |
-
{"offset": xsense.get("offset")}
|
444 |
-
for xsense in xewnterm.findall("./sense")
|
445 |
-
]
|
446 |
-
|
447 |
-
ewnterm = {
|
448 |
-
"id": xewnterm.get("id"),
|
449 |
-
"from": xewnterm.get("from"),
|
450 |
-
"to": xewnterm.get("to"),
|
451 |
-
"senses": senses,
|
452 |
-
}
|
453 |
-
ewnterms.append(ewnterm)
|
454 |
-
|
455 |
-
return ewnterms
|
456 |
-
|
457 |
-
@staticmethod
|
458 |
-
def _get_semrels_from_xsent(xsent: Element) -> List[Dict[str, str]]:
|
459 |
-
xsemrels = xsent.find("./semrels")
|
460 |
-
return [
|
461 |
-
{
|
462 |
-
"id": xsemrel.get("id"),
|
463 |
-
"term1": xsemrel.get("term1"),
|
464 |
-
"term2": xsemrel.get("term2"),
|
465 |
-
"reltype": xsemrel.get("reltype"),
|
466 |
-
}
|
467 |
-
for xsemrel in xsemrels.findall("./semrel")
|
468 |
-
]
|
469 |
-
|
470 |
-
@staticmethod
|
471 |
-
def _get_chunks_from_xsent(xsent: Element) -> List[Dict[str, str]]:
|
472 |
-
xchunks = xsent.find("./chunks")
|
473 |
-
return [
|
474 |
-
{
|
475 |
-
"id": xchunk.get("id"),
|
476 |
-
"to": xchunk.get("to"),
|
477 |
-
"from": xchunk.get("from"),
|
478 |
-
"type": xchunk.get("type"),
|
479 |
-
}
|
480 |
-
for xchunk in xchunks.findall("./chunk")
|
481 |
-
]
|
482 |
-
|
483 |
-
@staticmethod
|
484 |
-
def _get_tokens_from_xsent(xsent: Element) -> List[Dict[str, str]]:
|
485 |
-
xtext = xsent.find("./text")
|
486 |
-
return [
|
487 |
-
{
|
488 |
-
"id": xtoken.get("id"),
|
489 |
-
"pos": xtoken.get("pos"),
|
490 |
-
"lemma": xtoken.get("lemma"),
|
491 |
-
"text": xtoken.text,
|
492 |
-
}
|
493 |
-
for xtoken in xtext.findall("./token")
|
494 |
-
]
|
495 |
-
|
496 |
-
def _generate_original_examples(self, file_names_and_pointers):
|
497 |
-
"""Generate something close to the original dataset.
|
498 |
-
|
499 |
-
This will yield one sample per abstract with the plaintext
|
500 |
-
and the annotations combined into one object. If an abstract
|
501 |
-
is available in both english and german each language version
|
502 |
-
will be a distinct example.
|
503 |
-
"""
|
504 |
-
abstracts = {}
|
505 |
-
samples = {}
|
506 |
-
for file_name, fp in file_names_and_pointers:
|
507 |
-
|
508 |
-
if file_name.endswith(".abstr"):
|
509 |
-
sample_id = file_name
|
510 |
-
abstracts[sample_id] = fp.read().decode(NATIVE_ENCODING)
|
511 |
-
|
512 |
-
elif file_name.endswith(".abstr.chunkmorph.annotated.xml"):
|
513 |
-
content_bytes = fp.read()
|
514 |
-
content_str = content_bytes.decode(NATIVE_ENCODING)
|
515 |
-
if content_str == "":
|
516 |
-
continue
|
517 |
-
|
518 |
-
xroot = ET.fromstring(content_str)
|
519 |
-
|
520 |
-
sentences = []
|
521 |
-
for xsent in xroot.findall("./"):
|
522 |
-
sentence = {
|
523 |
-
"id": xsent.get("id"),
|
524 |
-
"corresp": xsent.get("corresp"),
|
525 |
-
"umlsterms": self._get_umlsterms_from_xsent(xsent),
|
526 |
-
"ewnterms": self._get_ewnterms_from_xsent(xsent),
|
527 |
-
"semrels": self._get_semrels_from_xsent(xsent),
|
528 |
-
"chunks": self._get_chunks_from_xsent(xsent),
|
529 |
-
"tokens": self._get_tokens_from_xsent(xsent),
|
530 |
-
}
|
531 |
-
sentences.append(sentence)
|
532 |
-
|
533 |
-
sample_id = xroot.get("id")
|
534 |
-
samples[sample_id] = {
|
535 |
-
"sample_id": sample_id,
|
536 |
-
"corresp": xroot.get("corresp"),
|
537 |
-
"language": xroot.get("lang"),
|
538 |
-
"sentences": sentences,
|
539 |
-
}
|
540 |
-
|
541 |
-
for _id, (sample_id, sample) in enumerate(samples.items()):
|
542 |
-
sample["abstract"] = abstracts[sample_id]
|
543 |
-
yield _id, sample
|
544 |
-
|
545 |
-
def _generate_bigbio_kb_examples(self, file_names_and_pointers):
|
546 |
-
"""Generate big science biomedical kb examples."""
|
547 |
-
|
548 |
-
def snippets_tokens_from_sents(sentences):
|
549 |
-
snippets = []
|
550 |
-
for sentence in sentences:
|
551 |
-
snippet = [el["text"] for el in sentence["tokens"]]
|
552 |
-
snippets.append(snippet)
|
553 |
-
return snippets
|
554 |
-
|
555 |
-
def sid_to_text_off(sid, snip_txts_lens):
|
556 |
-
ii_sid = int(sid[1:])
|
557 |
-
start = sum(snip_txts_lens[: ii_sid - 1]) + (ii_sid - 1)
|
558 |
-
end = start + snip_txts_lens[ii_sid - 1]
|
559 |
-
return start, end
|
560 |
-
|
561 |
-
def sid_wid_to_text_off(sid, wid, snip_txts_lens, snip_toks_lens):
|
562 |
-
s_start, s_end = sid_to_text_off(sid, snip_txts_lens)
|
563 |
-
ii_sid = int(sid[1:])
|
564 |
-
ii_wid = int(wid[1:])
|
565 |
-
w_start = sum(snip_toks_lens[ii_sid - 1][: ii_wid - 1]) + (ii_wid - 1)
|
566 |
-
start = s_start + w_start
|
567 |
-
end = start + snip_toks_lens[ii_sid - 1][ii_wid - 1]
|
568 |
-
return start, end
|
569 |
-
|
570 |
-
for _id, (file_name, fp) in enumerate(file_names_and_pointers):
|
571 |
-
|
572 |
-
content_bytes = fp.read()
|
573 |
-
content_str = content_bytes.decode(NATIVE_ENCODING)
|
574 |
-
if content_str == "":
|
575 |
-
continue
|
576 |
-
|
577 |
-
xroot = ET.fromstring(content_str)
|
578 |
-
|
579 |
-
sentences = []
|
580 |
-
for xsent in xroot.findall("./"):
|
581 |
-
sentence = {
|
582 |
-
"id": xsent.get("id"),
|
583 |
-
"corresp": xsent.get("corresp"),
|
584 |
-
"umlsterms": self._get_umlsterms_from_xsent(xsent),
|
585 |
-
"ewnterms": self._get_ewnterms_from_xsent(xsent),
|
586 |
-
"semrels": self._get_semrels_from_xsent(xsent),
|
587 |
-
"chunks": self._get_chunks_from_xsent(xsent),
|
588 |
-
"tokens": self._get_tokens_from_xsent(xsent),
|
589 |
-
}
|
590 |
-
sentences.append(sentence)
|
591 |
-
|
592 |
-
snip_toks = snippets_tokens_from_sents(sentences)
|
593 |
-
snip_txts = [" ".join(snip_tok) for snip_tok in snip_toks]
|
594 |
-
snip_txts_lens = [len(el) for el in snip_txts]
|
595 |
-
snip_toks_lens = [[len(tok) for tok in snip] for snip in snip_toks]
|
596 |
-
text = " ".join(snip_txts)
|
597 |
-
passages = [
|
598 |
-
{
|
599 |
-
"id": "{}-passage-0".format(xroot.get("id")),
|
600 |
-
"type": "abstract",
|
601 |
-
"text": [text],
|
602 |
-
"offsets": [(0, len(text))],
|
603 |
-
}
|
604 |
-
]
|
605 |
-
|
606 |
-
entities = []
|
607 |
-
rel_map = {}
|
608 |
-
for sentence in sentences:
|
609 |
-
sid = sentence["id"]
|
610 |
-
ii_sid = int(sid[1:])
|
611 |
-
|
612 |
-
for umlsterm in sentence["umlsterms"]:
|
613 |
-
umlsterm_id = umlsterm["id"]
|
614 |
-
entity_id = f"{sid}-{umlsterm_id}"
|
615 |
-
wid_from = umlsterm["from"]
|
616 |
-
wid_to = umlsterm["to"]
|
617 |
-
ii_wid_from = int(wid_from[1:])
|
618 |
-
ii_wid_to = int(wid_to[1:])
|
619 |
-
|
620 |
-
tok_text = " ".join(
|
621 |
-
snip_toks[ii_sid - 1][ii_wid_from - 1 : ii_wid_to]
|
622 |
-
)
|
623 |
-
w_from_start, w_from_end = sid_wid_to_text_off(
|
624 |
-
sid, wid_from, snip_txts_lens, snip_toks_lens
|
625 |
-
)
|
626 |
-
w_to_start, w_to_end = sid_wid_to_text_off(
|
627 |
-
sid, wid_to, snip_txts_lens, snip_toks_lens
|
628 |
-
)
|
629 |
-
|
630 |
-
offsets = [(w_from_start, w_to_end)]
|
631 |
-
main_text = text[w_from_start:w_to_end]
|
632 |
-
umls_cuis = [el["cui"] for el in umlsterm["concepts"]]
|
633 |
-
for concept in umlsterm["concepts"]:
|
634 |
-
rel_map[concept["id"]] = entity_id
|
635 |
-
|
636 |
-
entity = {
|
637 |
-
"id": "{}-{}".format(xroot.get("id"), entity_id),
|
638 |
-
"offsets": offsets,
|
639 |
-
"text": [tok_text],
|
640 |
-
"type": "umlsterm",
|
641 |
-
"normalized": [
|
642 |
-
{"db_name": "UMLS", "db_id": cui} for cui in umls_cuis
|
643 |
-
],
|
644 |
-
}
|
645 |
-
entities.append(entity)
|
646 |
-
|
647 |
-
relations = []
|
648 |
-
for sentence in sentences:
|
649 |
-
sid = sentence["id"]
|
650 |
-
for semrel in sentence["semrels"]:
|
651 |
-
semrel_id = semrel["id"]
|
652 |
-
rel_id = "{}-{}-{}-{}".format(
|
653 |
-
sid, semrel_id, semrel["term1"], semrel["term2"],
|
654 |
-
)
|
655 |
-
arg1_id = "{}-{}".format(xroot.get("id"), rel_map[semrel["term1"]])
|
656 |
-
arg2_id = "{}-{}".format(xroot.get("id"), rel_map[semrel["term2"]])
|
657 |
-
# some semrels are between multiple normalizations of
|
658 |
-
# a single entity. we skip these. see docstring at top
|
659 |
-
# of module for more complete description
|
660 |
-
if arg1_id == arg2_id:
|
661 |
-
continue
|
662 |
-
relation = {
|
663 |
-
"id": "{}-{}".format(xroot.get("id"), rel_id),
|
664 |
-
"type": semrel["reltype"],
|
665 |
-
"arg1_id": arg1_id,
|
666 |
-
"arg2_id": arg2_id,
|
667 |
-
"normalized": []
|
668 |
-
}
|
669 |
-
relations.append(relation)
|
670 |
-
|
671 |
-
yield _id, {
|
672 |
-
"id": xroot.get("id"),
|
673 |
-
"document_id": xroot.get("id"),
|
674 |
-
"passages": passages,
|
675 |
-
"entities": entities,
|
676 |
-
"coreferences": [],
|
677 |
-
"events": [],
|
678 |
-
"relations": relations,
|
679 |
-
}
|
680 |
-
|
681 |
-
def _generate_plain_examples(self, file_names_and_pointers):
|
682 |
-
"""Generate plain text abstract examples."""
|
683 |
-
for _id, (file_name, fp) in enumerate(file_names_and_pointers):
|
684 |
-
match = re.match(FILE_NAME_PATTERN, file_name)
|
685 |
-
yield _id, {
|
686 |
-
"sample_id_prefix": match.group(1),
|
687 |
-
"sample_id": file_name,
|
688 |
-
"language": LANG_MAP[match.group(2)],
|
689 |
-
"abstract": fp.read().decode(NATIVE_ENCODING),
|
690 |
-
}
|
691 |
-
|
692 |
-
def _generate_translation_examples(self, file_names_and_pointers):
|
693 |
-
sample_map = defaultdict(list)
|
694 |
-
for file_name, fp in file_names_and_pointers:
|
695 |
-
if file_name.endswith("eng.abstr"):
|
696 |
-
language = "en"
|
697 |
-
elif file_name.endswith("ger.abstr"):
|
698 |
-
language = "de"
|
699 |
-
else:
|
700 |
-
raise ValueError()
|
701 |
-
sample_id_prefix = re.sub(".(eng|ger).abstr$", "", file_name)
|
702 |
-
sample_id = file_name
|
703 |
-
abstract = fp.read().decode(NATIVE_ENCODING)
|
704 |
-
sample_map[sample_id_prefix].append(
|
705 |
-
{"language": language, "sample_id": sample_id, "abstract": abstract}
|
706 |
-
)
|
707 |
-
|
708 |
-
_id = 0
|
709 |
-
for sample_id_prefix, sample_pair in sample_map.items():
|
710 |
-
if len(sample_pair) != 2:
|
711 |
-
continue
|
712 |
-
en_idx = 0 if sample_pair[0]["language"] == "en" else 1
|
713 |
-
de_idx = 0 if en_idx == 1 else 1
|
714 |
-
yield _id, {
|
715 |
-
"id": sample_id_prefix,
|
716 |
-
"document_id": sample_id_prefix,
|
717 |
-
"text_1": sample_pair[en_idx]["abstract"],
|
718 |
-
"text_2": sample_pair[de_idx]["abstract"],
|
719 |
-
"text_1_name": "en",
|
720 |
-
"text_2_name": "de",
|
721 |
-
}
|
722 |
-
_id += 1
|
723 |
-
|
724 |
-
def _generate_examples(self, file_names_and_pointers, split):
|
725 |
-
|
726 |
-
if self.config.schema == "source":
|
727 |
-
genny = self._generate_original_examples(file_names_and_pointers)
|
728 |
-
|
729 |
-
elif self.config.schema == "bigbio_kb":
|
730 |
-
genny = self._generate_bigbio_kb_examples(file_names_and_pointers)
|
731 |
-
|
732 |
-
elif self.config.name in ("plain", "plain_en", "plain_de"):
|
733 |
-
genny = self._generate_plain_examples(file_names_and_pointers)
|
734 |
-
|
735 |
-
elif self.config.schema == "bigbio_t2t":
|
736 |
-
genny = self._generate_translation_examples(file_names_and_pointers)
|
737 |
-
|
738 |
-
for _id, sample in genny:
|
739 |
-
yield _id, sample
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
muchmore_bigbio_kb/muchmore-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21ecb6aec0fb7ff6f8911490aba2567d11c85a4930caf5728fea8e6bd612dcc3
|
3 |
+
size 26127505
|
muchmore_bigbio_t2t/muchmore-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06f692a8fc0ea9bb2afeef613b56031a28a07f759ffa82c94d7f388eebf9da0d
|
3 |
+
size 7453621
|
muchmore_de_bigbio_kb/muchmore-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:beff97acdc3cec4e0f657e74a985235eb4065314fb890a7152295a4861d652c2
|
3 |
+
size 9711777
|
muchmore_en_bigbio_kb/muchmore-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70af152cab3ab77d8b55c036877674a9b495023e9cb58726b16ea333a6887826
|
3 |
+
size 16432783
|
muchmore_source/muchmore-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:009e0d0d49ac00e49070ba7a0bc9ce7f1dd525c0eaa2ebab19fd86834a8bfb5c
|
3 |
+
size 38974300
|