GUE_public / GUE_public.py
fm4bio-ning's picture
Update GUE_public.py
5487ff8 verified
"""Script for the dataset containing the 28 downstream tasks from the DNABertv2 paper."""
from typing import List
import csv
import datasets
# This function is a basic reimplementation of SeqIO's parse method. This allows the
# dataset viewer to work as it does not require an external package.
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = ''
# You can copy an official description
_DESCRIPTION = ''
_HOMEPAGE = ""
_LICENSE = ""
_TASKS = [
"splice_reconstructed",
"mouse0",
"mouse1",
"mouse2",
"mouse3",
"mouse4",
'covid',
'prom_core_tata',
'prom_core_notata',
'prom_core_all',
'prom_300_tata',
'prom_300_notata',
'prom_300_all',
'tf0',
'tf1',
'tf2',
'tf3',
'tf4',
'H3',
'H3K14ac',
'H3K36me3',
'H3K4me1',
'H3K4me2',
'H3K4me3',
'H3K79me3',
'H3K9ac',
'H4',
'H4ac',
]
class GUEConfig(datasets.BuilderConfig):
"""BuilderConfig for GUE taks dataset."""
def __init__(self, *args, task: str, **kwargs):
"""BuilderConfig downstream tasks dataset.
Args:
task (:obj:`str`): Task name.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name=f"{task}",
**kwargs,
)
self.task = task
class GUEDownstreamTasks(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = GUEConfig
BUILDER_CONFIGS = [GUEConfig(task=task) for task in _TASKS]
DEFAULT_CONFIG_NAME = "reconstructed"
def _info(self):
features = datasets.Features(
{
"sequence": datasets.Value("string"),
"label": datasets.Value("int32"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
train_file = dl_manager.download_and_extract(self.config.task + "/train.csv")
valid_file = dl_manager.download_and_extract(self.config.task + "/dev.csv")
test_file = dl_manager.download_and_extract(self.config.task + "/test.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"file": valid_file}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"file": test_file}
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file):
key = 0
print(file)
with open(file, "r") as f:
csv_reader = csv.reader(f)
head = next(csv_reader)
for row in csv_reader:
# yield example
yield key, {
"sequence": row[0],
"label": row[1],
}
key += 1