feat: adapt loading script
Browse files- agro_nt_tasks.py +78 -112
agro_nt_tasks.py
CHANGED
@@ -1,14 +1,3 @@
|
|
1 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
# TODO: Address all TODOs and remove all explanatory comments
|
@@ -18,133 +7,114 @@
|
|
18 |
import csv
|
19 |
import json
|
20 |
import os
|
|
|
|
|
21 |
|
22 |
import datasets
|
23 |
|
24 |
|
25 |
# TODO: Add BibTeX citation
|
26 |
-
|
27 |
-
|
28 |
-
@InProceedings{huggingface:dataset,
|
29 |
-
title = {A great new dataset},
|
30 |
-
author={huggingface, Inc.
|
31 |
-
},
|
32 |
-
year={2020}
|
33 |
-
}
|
34 |
-
"""
|
35 |
|
36 |
-
# TODO: Add description of the dataset here
|
37 |
-
# You can copy an official description
|
38 |
_DESCRIPTION = """\
|
39 |
-
This
|
|
|
|
|
|
|
40 |
"""
|
41 |
|
42 |
-
# TODO: Add a link to an official homepage for the dataset here
|
43 |
-
_HOMEPAGE = ""
|
44 |
|
45 |
# TODO: Add the licence for the dataset here if you can find it
|
46 |
_LICENSE = ""
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
-
|
|
|
62 |
|
63 |
-
|
64 |
-
# If you don't want/need to define several sub-sets in your dataset,
|
65 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
66 |
|
67 |
-
|
68 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
69 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
70 |
|
71 |
-
|
72 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
73 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
74 |
-
BUILDER_CONFIGS = [
|
75 |
-
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
|
76 |
-
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
|
77 |
-
]
|
78 |
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
-
def _info(self):
|
82 |
-
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
83 |
-
if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
|
84 |
-
features = datasets.Features(
|
85 |
-
{
|
86 |
-
"sentence": datasets.Value("string"),
|
87 |
-
"option1": datasets.Value("string"),
|
88 |
-
"answer": datasets.Value("string")
|
89 |
-
# These are the features of your dataset like images, labels ...
|
90 |
-
}
|
91 |
-
)
|
92 |
-
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
93 |
-
features = datasets.Features(
|
94 |
-
{
|
95 |
-
"sentence": datasets.Value("string"),
|
96 |
-
"option2": datasets.Value("string"),
|
97 |
-
"second_domain_answer": datasets.Value("string")
|
98 |
-
# These are the features of your dataset like images, labels ...
|
99 |
-
}
|
100 |
-
)
|
101 |
return datasets.DatasetInfo(
|
102 |
# This is the description that will appear on the datasets page.
|
103 |
description=_DESCRIPTION,
|
104 |
# This defines the different columns of the dataset and their types
|
105 |
-
features=features,
|
106 |
-
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
107 |
-
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
108 |
-
# supervised_keys=("sentence", "label"),
|
109 |
-
# Homepage of the dataset for documentation
|
110 |
-
homepage=_HOMEPAGE,
|
111 |
# License for the dataset if available
|
112 |
license=_LICENSE,
|
113 |
# Citation for the dataset
|
114 |
citation=_CITATION,
|
115 |
)
|
116 |
|
117 |
-
def _split_generators(self, dl_manager):
|
118 |
-
|
119 |
-
|
|
|
120 |
|
121 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
122 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
123 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
124 |
-
urls = _URLS[self.config.name]
|
125 |
-
data_dir = dl_manager.download_and_extract(urls)
|
126 |
return [
|
127 |
datasets.SplitGenerator(
|
128 |
name=datasets.Split.TRAIN,
|
129 |
# These kwargs will be passed to _generate_examples
|
130 |
gen_kwargs={
|
131 |
-
"filepath":
|
132 |
"split": "train",
|
133 |
},
|
134 |
),
|
135 |
-
datasets.SplitGenerator(
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
),
|
143 |
datasets.SplitGenerator(
|
144 |
name=datasets.Split.TEST,
|
145 |
# These kwargs will be passed to _generate_examples
|
146 |
gen_kwargs={
|
147 |
-
"filepath":
|
148 |
"split": "test"
|
149 |
},
|
150 |
),
|
@@ -152,21 +122,17 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
152 |
|
153 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
154 |
def _generate_examples(self, filepath, split):
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
for key, row in enumerate(f):
|
159 |
-
data = json.loads(row)
|
160 |
-
if self.config.name == "first_domain":
|
161 |
# Yields examples as (key, example) tuples
|
|
|
|
|
|
|
|
|
|
|
162 |
yield key, {
|
163 |
-
"
|
164 |
-
"
|
165 |
-
"
|
166 |
-
}
|
167 |
-
else:
|
168 |
-
yield key, {
|
169 |
-
"sentence": data["sentence"],
|
170 |
-
"option2": data["option2"],
|
171 |
-
"second_domain_answer": "" if split == "test" else data["second_domain_answer"],
|
172 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# See the License for the specific language governing permissions and
|
2 |
# limitations under the License.
|
3 |
# TODO: Address all TODOs and remove all explanatory comments
|
|
|
7 |
import csv
|
8 |
import json
|
9 |
import os
|
10 |
+
from typing import List
|
11 |
+
from Bio import SeqIO
|
12 |
|
13 |
import datasets
|
14 |
|
15 |
|
16 |
# TODO: Add BibTeX citation
|
17 |
+
_CITATION = ''
|
18 |
+
# """\
|
19 |
+
# @InProceedings{huggingface:dataset,
|
20 |
+
# title = {A great new dataset},
|
21 |
+
# author={huggingface, Inc.
|
22 |
+
# },
|
23 |
+
# year={2020}
|
24 |
+
# }
|
25 |
+
# """
|
26 |
|
|
|
|
|
27 |
_DESCRIPTION = """\
|
28 |
+
This dataset comprises the various supervised learning tasks considered in the agro-nt
|
29 |
+
paper. The task types include binary classification,multi-label classification,
|
30 |
+
regression,and multi-output regression. The actual underlying genomic tasks range from
|
31 |
+
predicting regulatory features, RNA processing sites, and gene expression values.
|
32 |
"""
|
33 |
|
|
|
|
|
34 |
|
35 |
# TODO: Add the licence for the dataset here if you can find it
|
36 |
_LICENSE = ""
|
37 |
|
38 |
+
_TASKS = ['poly_a',
|
39 |
+
'splice_site'
|
40 |
+
'lncrna',
|
41 |
+
'chromatin_access'
|
42 |
+
'promoter_strength',
|
43 |
+
'gene_expression',
|
44 |
+
]
|
45 |
+
|
46 |
+
|
47 |
+
class AgroNtTasksConfig(datasets.BuilderConfig):
|
48 |
+
"""BuilderConfig for the Agro NT supervised learning tasks dataset."""
|
49 |
+
|
50 |
+
def __init__(self, *args, task: str, **kwargs):
|
51 |
+
"""BuilderConfig downstream tasks dataset.
|
52 |
+
Args:
|
53 |
+
task (:obj:`str`): Task name.
|
54 |
+
**kwargs: keyword arguments forwarded to super.
|
55 |
+
"""
|
56 |
+
super().__init__(
|
57 |
+
*args,
|
58 |
+
name=f"{task}",
|
59 |
+
**kwargs,
|
60 |
+
)
|
61 |
+
self.task = task
|
62 |
|
63 |
+
class AgroNtTasks(datasets.GeneratorBasedBuilder):
|
64 |
+
"""GeneratorBasedBuilder for the Agro NT supervised learning tasks dataset."""
|
65 |
|
66 |
+
BUILDER_CONFIGS = [AgroNtTasksConfig(task=TASK) for TASK in _TASKS]
|
|
|
|
|
67 |
|
68 |
+
DEFAULT_CONFIG_NAME = _TASKS[0]
|
|
|
|
|
69 |
|
70 |
+
def _info(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
features = datasets.Features(
|
73 |
+
{
|
74 |
+
"sequence": datasets.Value("string"),
|
75 |
+
"name": datasets.Value("string"),
|
76 |
+
"labels": datasets.Value("int8"),
|
77 |
+
}
|
78 |
+
)
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
return datasets.DatasetInfo(
|
81 |
# This is the description that will appear on the datasets page.
|
82 |
description=_DESCRIPTION,
|
83 |
# This defines the different columns of the dataset and their types
|
84 |
+
features=features,
|
|
|
|
|
|
|
|
|
|
|
85 |
# License for the dataset if available
|
86 |
license=_LICENSE,
|
87 |
# Citation for the dataset
|
88 |
citation=_CITATION,
|
89 |
)
|
90 |
|
91 |
+
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
|
92 |
+
|
93 |
+
train_file = dl_manager.download_and_extract(self.config.task + "/train.fa")
|
94 |
+
test_file = dl_manager.download_and_extract(self.config.task + "/test.fa")
|
95 |
|
|
|
|
|
|
|
|
|
|
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|
98 |
name=datasets.Split.TRAIN,
|
99 |
# These kwargs will be passed to _generate_examples
|
100 |
gen_kwargs={
|
101 |
+
"filepath": train_file,
|
102 |
"split": "train",
|
103 |
},
|
104 |
),
|
105 |
+
# datasets.SplitGenerator(
|
106 |
+
# name=datasets.Split.VALIDATION,
|
107 |
+
# # These kwargs will be passed to _generate_examples
|
108 |
+
# gen_kwargs={
|
109 |
+
# "filepath": test_file,
|
110 |
+
# "split": "dev",
|
111 |
+
# },
|
112 |
+
# ),
|
113 |
datasets.SplitGenerator(
|
114 |
name=datasets.Split.TEST,
|
115 |
# These kwargs will be passed to _generate_examples
|
116 |
gen_kwargs={
|
117 |
+
"filepath": test_file,
|
118 |
"split": "test"
|
119 |
},
|
120 |
),
|
|
|
122 |
|
123 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
124 |
def _generate_examples(self, filepath, split):
|
125 |
+
key = 0
|
126 |
+
with open(filepath, 'r') as f:
|
127 |
+
for record in SeqIO.parse(f,'fasta'):
|
|
|
|
|
|
|
128 |
# Yields examples as (key, example) tuples
|
129 |
+
|
130 |
+
split_name = record.name.split("|")
|
131 |
+
name = split_name[0]
|
132 |
+
labels = split_name[1:]
|
133 |
+
|
134 |
yield key, {
|
135 |
+
"sequence": str(record.seq),
|
136 |
+
"name": name,
|
137 |
+
"label": labels,
|
138 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|