CityLearn / CityLearn.py
TobiTob's picture
Update CityLearn.py
1653a2b
import pickle
import datasets
import numpy as np
_DESCRIPTION = """The dataset consists of tuples of (observations, actions, rewards, dones) sampled by agents
interacting with the CityLearn 2022 Phase 1 environment (only first 5 buildings)"""
_BASE_URL = "https://huggingface.co/datasets/TobiTob/CityLearn/resolve/main"
_URLS = {
"f_230": f"{_BASE_URL}/f_230x5x38.pkl",
"LSTM": f"{_BASE_URL}/L_2189x5x4.pkl",
"RB1": f"{_BASE_URL}/R1_2189x5x4.pkl",
"RB2": f"{_BASE_URL}/R2_2189x5x4.pkl",
"Merged1": f"{_BASE_URL}/L_R1_2189x5x8.pkl",
"Merged2": f"{_BASE_URL}/L_R1_R2_2189x5x12.pkl",
}
class DecisionTransformerCityLearnDataset(datasets.GeneratorBasedBuilder):
# You will be able to load one configuration in the following list with
# data = datasets.load_dataset('TobiTob/CityLearn', 'data_name')
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="f_230",
description="Data sampled from an expert LSTM policy. Sequence length = 230, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="LSTM",
description="Data sampled from an expert LSTM policy. Sequence length = 2189, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="RB1",
description="Data sampled a rule based policy. Sequence length = 2189, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="RB2",
description="Data sampled a rule based policy. Sequence length = 2189, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="Merged1",
description="LSTM + RBC1. Sequence length = 2189, Buildings = 5, Episodes = 1+1 ",
),
datasets.BuilderConfig(
name="Merged2",
description="LSTM + RBC1 + RBC2. Sequence length = 2189, Buildings = 5, Episodes = 1+1+1 ",
),
]
def _info(self):
features = datasets.Features(
{
"observations": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"actions": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"rewards": datasets.Sequence(datasets.Value("float32")),
"dones": datasets.Sequence(datasets.Value("bool")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "train",
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, "rb") as f:
trajectories = pickle.load(f)
for idx, traj in enumerate(trajectories):
yield idx, {
"observations": traj["observations"],
"actions": traj["actions"],
"rewards": np.expand_dims(traj["rewards"], axis=1),
"dones": np.expand_dims(traj.get("dones", traj.get("terminals")), axis=1),
}