astro-classification-redshifts-augmented / train_augmented_dataset.py
helenqu
add files
306a138
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import jsonlines
import pandas as pd
from pathlib import Path
from connect_later.split_dataset_into_files import split_augmented_jsonl_dataset
from connect_later.constants import PLASTICC_CLASS_MAPPING, INT_LABELS
import datasets
import pdb
RAW_DATA_PATH = "/pscratch/sd/h/helenqu/plasticc/raw"
DATASET_PATH = "/pscratch/sd/h/helenqu/plasticc/train_augmented_dataset"
ORIG_DATASET_PATH = "/pscratch/sd/h/helenqu/plasticc/raw_train_with_labels"
# PLASTICC_CLASS_MAPPING = {
# 90: "SNIa",
# 67: "SNIa-91bg",
# 52: "SNIax",
# 42: "SNII",
# 62: "SNIbc",
# 95: "SLSN-I",
# 15: "TDE",
# 64: "KN",
# 88: "AGN",
# 92: "RRL",
# 65: "M-dwarf",
# 16: "EB",
# 53: "Mira",
# 6: "$\mu$-Lens-Single",
# }
# INT_LABELS = sorted(PLASTICC_CLASS_MAPPING.keys())
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class NewDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
# if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"objid": datasets.Value("string"),
"times_wv": datasets.Array2D(shape=(300, 2), dtype='float64'), # ith row is [time, central wv of band]
"target": datasets.Array2D(shape=(300, 2), dtype='float64'), # the time series data, ith row is [flux, flux_err]
"label": datasets.ClassLabel(
num_classes=len(PLASTICC_CLASS_MAPPING),
names=[PLASTICC_CLASS_MAPPING[int_label] for int_label in INT_LABELS]
),
"redshift": datasets.Value("float32"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
)
def _split_generators(self, dl_manager):
dataset_path = Path(DATASET_PATH)
if not (dataset_path / 'train.jsonl').exists():
print('Splitting dataset into files...')
split_augmented_jsonl_dataset(DATASET_PATH, Path(ORIG_DATASET_PATH) / "plasticc_train_lightcurves.csv.jsonl", "*.jsonl", 0.8)
print(f"int index to label mapping: {INT_LABELS}")
print(f"label to class name mapping: {PLASTICC_CLASS_MAPPING}")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": dataset_path / "train.jsonl",
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": dataset_path / "val.jsonl",
"split": "dev",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
metadata = pd.read_csv(Path(RAW_DATA_PATH) / 'plasticc_train_metadata.csv.gz')
with jsonlines.open(filepath) as reader:
for obj in reader:
objid = int(obj['object_id'].split('_')[1]) if type(obj['object_id']) == str else obj['object_id'] # avocado objids are of the form 'plasticc_id{_aug_hash}'
metadata_obj = metadata[metadata['object_id'] == objid]
label = list(INT_LABELS).index(metadata_obj.true_target.values[0])
redshift = metadata_obj.true_z.values[0]
yield obj['object_id'], {
"objid": obj['object_id'],
"times_wv": obj['times_wv'],
"target": obj['lightcurve'],
"label": label,
"redshift": redshift
}