Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
albertvillanova HF staff commited on
Commit
828add6
1 Parent(s): 2d757a8

Use iter_files

Browse files
Files changed (1) hide show
  1. chebi_nactem.py +68 -78
chebi_nactem.py CHANGED
@@ -14,8 +14,9 @@
14
  # limitations under the License.
15
 
16
  import os.path
 
17
  from pathlib import Path
18
- from typing import Dict, List, Tuple
19
 
20
  import datasets
21
 
@@ -25,7 +26,7 @@ from .bigbiohub import Tasks
25
  from .bigbiohub import parse_brat_file
26
 
27
 
28
- _LANGUAGES = ['English']
29
  _PUBMED = True
30
  _LOCAL = False
31
  _CITATION = """\
@@ -66,7 +67,7 @@ and over 6,000 relations between entities.
66
 
67
  _HOMEPAGE = "http://www.nactem.ac.uk/chebi"
68
 
69
- _LICENSE = 'Creative Commons Attribution 4.0 International'
70
 
71
  _URLS = {
72
  _DATASETNAME: "http://www.nactem.ac.uk/chebi/ChEBI.zip",
@@ -154,86 +155,75 @@ class ChebiNactemDatasset(datasets.GeneratorBasedBuilder):
154
  "chebi_nactem_abstr_ann2": os.path.join("ChEBI", "abstracts", "Annotator2"),
155
  "chebi_nactem_fullpaper": os.path.join("ChEBI", "fullpapers"),
156
  }
157
-
158
- subset_dir = Path(data_dir) / subset_paths[self.config.subset_id]
159
-
160
  return [
161
  datasets.SplitGenerator(
162
  name=datasets.Split.TRAIN,
163
- # Whatever you put in gen_kwargs will be passed to _generate_examples
164
- gen_kwargs={
165
- "data_dir": subset_dir,
166
- },
167
  )
168
  ]
169
 
170
- def _generate_examples(self, data_dir: Path) -> Tuple[int, Dict]:
171
  """Yields examples as (key, example) tuples."""
172
 
173
- def uid_gen():
174
- _uid = 0
175
- while True:
176
- yield str(_uid)
177
- _uid += 1
178
-
179
- uid = iter(uid_gen())
180
-
181
- txt_files = (f for f in sorted(os.listdir(data_dir)) if f.endswith(".txt"))
182
- for idx, file_name in enumerate(txt_files):
183
-
184
- brat_file = data_dir / file_name
185
- contents = parse_brat_file(brat_file)
186
-
187
- if self.config.schema == "source":
188
- yield idx, {
189
- "document_id": contents["document_id"],
190
- "text": contents["text"],
191
- "entities": contents["text_bound_annotations"],
192
- "relations": [
193
- {
194
- "id": relation["id"],
195
- "type": relation["type"],
196
- "arg1": relation["head"]["ref_id"],
197
- "arg2": relation["tail"]["ref_id"],
198
- }
199
- for relation in contents["relations"]
200
- ],
201
- }
202
-
203
- elif self.config.schema == "bigbio_kb":
204
- yield idx, {
205
- "id": next(uid),
206
- "document_id": contents["document_id"],
207
- "passages": [
208
- {
209
- "id": next(uid),
210
- "type": "",
211
- "text": [contents["text"]],
212
- "offsets": [(0, len(contents["text"]))],
213
- }
214
- ],
215
- "entities": [
216
- {
217
- "id": f"{idx}_{entity['id']}",
218
- "type": entity["type"],
219
- "offsets": entity["offsets"],
220
- "text": entity["text"],
221
- "normalized": [],
222
- }
223
- for entity in contents["text_bound_annotations"]
224
- ],
225
- "events": [],
226
- "coreferences": [],
227
- "relations": [
228
- {
229
- "id": f"{idx}_{relation['id']}",
230
- "type": relation["type"],
231
- "arg1_id": f"{idx}_{relation['head']['ref_id']}",
232
- "arg2_id": f"{idx}_{relation['tail']['ref_id']}",
233
- "normalized": [],
234
- }
235
- for relation in contents["relations"]
236
- ],
237
- }
238
- else:
239
- raise NotImplementedError(self.config.schema)
 
14
  # limitations under the License.
15
 
16
  import os.path
17
+ from itertools import count
18
  from pathlib import Path
19
+ from typing import Dict, Iterable, List, Tuple
20
 
21
  import datasets
22
 
 
26
  from .bigbiohub import parse_brat_file
27
 
28
 
29
+ _LANGUAGES = ["English"]
30
  _PUBMED = True
31
  _LOCAL = False
32
  _CITATION = """\
 
67
 
68
  _HOMEPAGE = "http://www.nactem.ac.uk/chebi"
69
 
70
+ _LICENSE = "Creative Commons Attribution 4.0 International"
71
 
72
  _URLS = {
73
  _DATASETNAME: "http://www.nactem.ac.uk/chebi/ChEBI.zip",
 
155
  "chebi_nactem_abstr_ann2": os.path.join("ChEBI", "abstracts", "Annotator2"),
156
  "chebi_nactem_fullpaper": os.path.join("ChEBI", "fullpapers"),
157
  }
158
+ subset_dir = os.path.join(data_dir, subset_paths[self.config.subset_id])
 
 
159
  return [
160
  datasets.SplitGenerator(
161
  name=datasets.Split.TRAIN,
162
+ gen_kwargs={"file_paths": dl_manager.iter_files(subset_dir)},
 
 
 
163
  )
164
  ]
165
 
166
+ def _generate_examples(self, file_paths: Iterable[str]) -> Tuple[int, Dict]:
167
  """Yields examples as (key, example) tuples."""
168
 
169
+ uid = count(0)
170
+
171
+ for idx, file_path in enumerate(file_paths):
172
+ if os.path.basename(file_path).endswith(".txt"):
173
+ contents = parse_brat_file(
174
+ Path(file_path), annotation_file_suffixes=[".ann"]
175
+ )
176
+
177
+ if self.config.schema == "source":
178
+ yield idx, {
179
+ "document_id": contents["document_id"],
180
+ "text": contents["text"],
181
+ "entities": contents["text_bound_annotations"],
182
+ "relations": [
183
+ {
184
+ "id": relation["id"],
185
+ "type": relation["type"],
186
+ "arg1": relation["head"]["ref_id"],
187
+ "arg2": relation["tail"]["ref_id"],
188
+ }
189
+ for relation in contents["relations"]
190
+ ],
191
+ }
192
+
193
+ elif self.config.schema == "bigbio_kb":
194
+ yield idx, {
195
+ "id": next(uid),
196
+ "document_id": contents["document_id"],
197
+ "passages": [
198
+ {
199
+ "id": next(uid),
200
+ "type": "",
201
+ "text": [contents["text"]],
202
+ "offsets": [(0, len(contents["text"]))],
203
+ }
204
+ ],
205
+ "entities": [
206
+ {
207
+ "id": f"{idx}_{entity['id']}",
208
+ "type": entity["type"],
209
+ "offsets": entity["offsets"],
210
+ "text": entity["text"],
211
+ "normalized": [],
212
+ }
213
+ for entity in contents["text_bound_annotations"]
214
+ ],
215
+ "events": [],
216
+ "coreferences": [],
217
+ "relations": [
218
+ {
219
+ "id": f"{idx}_{relation['id']}",
220
+ "type": relation["type"],
221
+ "arg1_id": f"{idx}_{relation['head']['ref_id']}",
222
+ "arg2_id": f"{idx}_{relation['tail']['ref_id']}",
223
+ "normalized": [],
224
+ }
225
+ for relation in contents["relations"]
226
+ ],
227
+ }
228
+ else:
229
+ raise NotImplementedError(self.config.schema)