File size: 3,612 Bytes
e564735 759e0c2 e564735 5487ff8 e564735 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
"""Script for the dataset containing the 28 downstream tasks from the DNABertv2 paper."""
from typing import List
import csv
import datasets
# This function is a basic reimplementation of SeqIO's parse method. This allows the
# dataset viewer to work as it does not require an external package.
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = ''
# You can copy an official description
_DESCRIPTION = ''
_HOMEPAGE = ""
_LICENSE = ""
_TASKS = [
"splice_reconstructed",
"mouse0",
"mouse1",
"mouse2",
"mouse3",
"mouse4",
'covid',
'prom_core_tata',
'prom_core_notata',
'prom_core_all',
'prom_300_tata',
'prom_300_notata',
'prom_300_all',
'tf0',
'tf1',
'tf2',
'tf3',
'tf4',
'H3',
'H3K14ac',
'H3K36me3',
'H3K4me1',
'H3K4me2',
'H3K4me3',
'H3K79me3',
'H3K9ac',
'H4',
'H4ac',
]
class GUEConfig(datasets.BuilderConfig):
"""BuilderConfig for GUE taks dataset."""
def __init__(self, *args, task: str, **kwargs):
"""BuilderConfig downstream tasks dataset.
Args:
task (:obj:`str`): Task name.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name=f"{task}",
**kwargs,
)
self.task = task
class GUEDownstreamTasks(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = GUEConfig
BUILDER_CONFIGS = [GUEConfig(task=task) for task in _TASKS]
DEFAULT_CONFIG_NAME = "reconstructed"
def _info(self):
features = datasets.Features(
{
"sequence": datasets.Value("string"),
"label": datasets.Value("int32"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
train_file = dl_manager.download_and_extract(self.config.task + "/train.csv")
valid_file = dl_manager.download_and_extract(self.config.task + "/dev.csv")
test_file = dl_manager.download_and_extract(self.config.task + "/test.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"file": valid_file}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"file": test_file}
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file):
key = 0
print(file)
with open(file, "r") as f:
csv_reader = csv.reader(f)
head = next(csv_reader)
for row in csv_reader:
# yield example
yield key, {
"sequence": row[0],
"label": row[1],
}
key += 1
|