import pickle import datasets import numpy as np _DESCRIPTION = """The dataset consists of tuples of (observations, actions, rewards, dones) sampled by agents interacting with the CityLearn 2022 Phase 1 environment""" _BASE_URL = "https://huggingface.co/datasets/TobiTob/CityLearn/resolve/main" _URLS = { "s_test": f"{_BASE_URL}/s_test.pkl", "s_week": f"{_BASE_URL}/s_week.pkl", "s_month": f"{_BASE_URL}/s_month.pkl", "s_random": f"{_BASE_URL}/s_random.pkl", "s_random2": f"{_BASE_URL}/s_random2.pkl", "s_random3": f"{_BASE_URL}/s_random3.pkl", } class DecisionTransformerCityLearnDataset(datasets.GeneratorBasedBuilder): # You will be able to load one configuration in the following list with # data = datasets.load_dataset('TobiTob/CityLearn', 'data_name') BUILDER_CONFIGS = [ datasets.BuilderConfig( name="s_test", description="Small dataset sampled from an expert policy in CityLearn environment. Data size 10x8", ), datasets.BuilderConfig( name="s_week", description="Data sampled from an expert policy in CityLearn environment. Data size 260x168", ), datasets.BuilderConfig( name="s_month", description="Data sampled from an expert policy in CityLearn environment. Data size 60x720", ), datasets.BuilderConfig( name="s_random", description="Random environment interactions in CityLearn environment. Data size 950x461", ), datasets.BuilderConfig( name="s_random2", description="Random environment interactions in CityLearn environment. Data size 43795x10", ), datasets.BuilderConfig( name="s_random3", description="Random environment interactions in CityLearn environment. Data size 23050x19", ), ] def _info(self): features = datasets.Features( { "observations": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), "actions": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), "rewards": datasets.Sequence(datasets.Value("float32")), "dones": datasets.Sequence(datasets.Value("bool")), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, ) def _split_generators(self, dl_manager): urls = _URLS[self.config.name] data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": data_dir, "split": "train", }, ) ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, filepath, split): with open(filepath, "rb") as f: trajectories = pickle.load(f) for idx, traj in enumerate(trajectories): yield idx, { "observations": traj["observations"], "actions": traj["actions"], "rewards": np.expand_dims(traj["rewards"], axis=1), "dones": np.expand_dims(traj.get("dones", traj.get("terminals")), axis=1), }