File size: 7,040 Bytes
b33ee2f 8c8ab30 67508ed b33ee2f d8c1348 fd3caf5 80e2bad f535b63 b55815e d30e6b1 19d34a7 b5a306a 3325fe0 dfb8e6c 6289df1 dfb8e6c 6289df1 dfb8e6c b33ee2f 890f548 0a2304d b33ee2f d8c1348 f40bd14 f573b09 fd3caf5 f40bd14 fd3caf5 f40bd14 b33ee2f f40bd14 b33ee2f f535b63 b55815e d30e6b1 19d34a7 b5a306a 3325fe0 dfb8e6c 6289df1 dfb8e6c 6289df1 dfb8e6c b33ee2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import pickle
import datasets
import numpy as np
_DESCRIPTION = """The dataset consists of tuples of (observations, actions, rewards, dones) sampled by agents
interacting with the CityLearn 2022 Phase 1 environment"""
_BASE_URL = "https://huggingface.co/datasets/TobiTob/CityLearn/resolve/main"
_URLS = {
"s_test": f"{_BASE_URL}/s_test.pkl",
"s_week": f"{_BASE_URL}/s_week.pkl",
"s_month": f"{_BASE_URL}/s_month.pkl",
"s_random": f"{_BASE_URL}/s_random.pkl",
"s_random2": f"{_BASE_URL}/s_random2.pkl",
"s_random3": f"{_BASE_URL}/s_random3.pkl",
"s_random4": f"{_BASE_URL}/s_random4.pkl",
"f_50": f"{_BASE_URL}/f_50x5x1750.pkl",
"f_24": f"{_BASE_URL}/f_24x5x364.pkl",
"fr_24": f"{_BASE_URL}/fr_24x5x364.pkl",
"fn_24": f"{_BASE_URL}/fn_24x5x3649.pkl",
"rb_24": f"{_BASE_URL}/rb_24x5x364.pkl",
"rb_50": f"{_BASE_URL}/rb_50x5x175.pkl",
"rb_108": f"{_BASE_URL}/rb_108x5x81.pkl",
"rb_230": f"{_BASE_URL}/rb_230x5x38.pkl",
"rb_461": f"{_BASE_URL}/rb_461x5x19.pkl",
"rb_973": f"{_BASE_URL}/rb_973x5x9.pkl",
"rb_2189": f"{_BASE_URL}/rb_2189x5x4.pkl",
"rbn_24": f"{_BASE_URL}/rb_24x5x18247.pkl",
}
class DecisionTransformerCityLearnDataset(datasets.GeneratorBasedBuilder):
# You will be able to load one configuration in the following list with
# data = datasets.load_dataset('TobiTob/CityLearn', 'data_name')
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="s_test",
description="Small dataset sampled from an expert policy in CityLearn environment. Data size 10x8",
),
datasets.BuilderConfig(
name="s_week",
description="Data sampled from an expert policy in CityLearn environment. Data size 260x168",
),
datasets.BuilderConfig(
name="s_month",
description="Data sampled from an expert policy in CityLearn environment. Data size 60x720",
),
datasets.BuilderConfig(
name="s_random",
description="Random environment interactions in CityLearn environment. Data size 950x461",
),
datasets.BuilderConfig(
name="s_random2",
description="Random environment interactions in CityLearn environment. Data size 43795x10",
),
datasets.BuilderConfig(
name="s_random3",
description="Random environment interactions in CityLearn environment. Data size 23050x19",
),
datasets.BuilderConfig(
name="s_random4",
description="Random environment interactions in CityLearn environment. Data size 437950x1",
),
datasets.BuilderConfig(
name="f_50",
description="Data sampled from an expert policy in CityLearn environment. Sequence length = 50, Buildings = 5, Episodes = 10 ",
),
datasets.BuilderConfig(
name="f_24",
description="Data sampled from an expert policy in CityLearn environment. Sequence length = 24, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="fr_24",
description="Data sampled from an expert policy in CityLearn environment. Used the new reward function. Sequence length = 24, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="fn_24",
description="Data sampled from an expert policy in CityLearn environment. Used the new reward function and changed some interactions with noise. Sequence length = 24, Buildings = 5, Episodes = 10 ",
),
datasets.BuilderConfig(
name="rb_24",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 24, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_50",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 50, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_108",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 108, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_230",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 230, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_461",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 461, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_973",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 973, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_2189",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 2189, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rbn_24",
description="Data sampled from a simple rule based policy. Used the new reward function and changed some interactions with noise. Sequence length = 24, Buildings = 5, Episodes = 50 ",
),
]
def _info(self):
features = datasets.Features(
{
"observations": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"actions": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"rewards": datasets.Sequence(datasets.Value("float32")),
"dones": datasets.Sequence(datasets.Value("bool")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "train",
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, "rb") as f:
trajectories = pickle.load(f)
for idx, traj in enumerate(trajectories):
yield idx, {
"observations": traj["observations"],
"actions": traj["actions"],
"rewards": np.expand_dims(traj["rewards"], axis=1),
"dones": np.expand_dims(traj.get("dones", traj.get("terminals")), axis=1),
}
|