albertvillanova HF staff commited on
Commit
cbbcbbc
1 Parent(s): ac17d9c

Update loading script

Browse files
Files changed (1) hide show
  1. species_800.py +7 -13
species_800.py CHANGED
@@ -21,9 +21,6 @@ import os
21
  import datasets
22
 
23
 
24
- logger = datasets.logging.get_logger(__name__)
25
-
26
-
27
  _CITATION = """\
28
  @article{pafilis2013species,
29
  title={The SPECIES and ORGANISMS resources for fast and accurate identification of taxonomic names in text},
@@ -48,7 +45,8 @@ and developed a web resource, ORGANISMS, that makes the results accessible to th
48
  """
49
 
50
  _HOMEPAGE = "https://species.jensenlab.org/"
51
- _URL = "https://drive.google.com/u/0/uc?id=1OletxmPYNkz2ltOr9pyT0b0iBtUWxslh&export=download/"
 
52
  _BIOBERT_NER_DATASET_DIRECTORY = "s800"
53
  _TRAINING_FILE = "train.tsv"
54
  _DEV_FILE = "devel.tsv"
@@ -98,26 +96,22 @@ class Species800(datasets.GeneratorBasedBuilder):
98
 
99
  def _split_generators(self, dl_manager):
100
  """Returns SplitGenerators."""
101
- urls_to_download = {
102
- "biobert_ner_datasets": _URL,
103
- }
104
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
105
- dataset_directory = os.path.join(downloaded_files["biobert_ner_datasets"], _BIOBERT_NER_DATASET_DIRECTORY)
106
 
107
  return [
108
  datasets.SplitGenerator(
109
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dataset_directory, _TRAINING_FILE)}
110
  ),
111
  datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dataset_directory, _DEV_FILE)}
113
  ),
114
  datasets.SplitGenerator(
115
- name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dataset_directory, _TEST_FILE)}
116
  ),
117
  ]
118
 
119
  def _generate_examples(self, filepath):
120
- logger.info("⏳ Generating examples from = %s", filepath)
121
  with open(filepath, encoding="utf-8") as f:
122
  guid = 0
123
  tokens = []
 
21
  import datasets
22
 
23
 
 
 
 
24
  _CITATION = """\
25
  @article{pafilis2013species,
26
  title={The SPECIES and ORGANISMS resources for fast and accurate identification of taxonomic names in text},
 
45
  """
46
 
47
  _HOMEPAGE = "https://species.jensenlab.org/"
48
+ # Source data from: http://nlp.dmis.korea.edu/projects/biobert-2020-checkpoints/NERdata.zip
49
+ _URL = "data/s800.zip"
50
  _BIOBERT_NER_DATASET_DIRECTORY = "s800"
51
  _TRAINING_FILE = "train.tsv"
52
  _DEV_FILE = "devel.tsv"
 
96
 
97
  def _split_generators(self, dl_manager):
98
  """Returns SplitGenerators."""
99
+ dl_dir = dl_manager.download_and_extract(_URL)
100
+ data_dir = os.path.join(dl_dir, _BIOBERT_NER_DATASET_DIRECTORY)
 
 
 
101
 
102
  return [
103
  datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, _TRAINING_FILE)}
105
  ),
106
  datasets.SplitGenerator(
107
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, _DEV_FILE)}
108
  ),
109
  datasets.SplitGenerator(
110
+ name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, _TEST_FILE)}
111
  ),
112
  ]
113
 
114
  def _generate_examples(self, filepath):
 
115
  with open(filepath, encoding="utf-8") as f:
116
  guid = 0
117
  tokens = []