File size: 7,831 Bytes
633ce71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
import os
import numpy as np
import pickle
import time
from agents.orderenforcingwrapper import OrderEnforcingAgent
from citylearn.citylearn import CityLearnEnv
"""
This file is used to generate offline data for a decision transformer.
Data is saved as pickle file.
Data structure:
list(
dict(
"observations": nparray(nparray(np.float32)),
"next_observations": nparray(nparray(np.float32)),
"actions": nparray(nparray(np.float32)),
"rewards": nparray(np.oat32),
"terminals": nparray(np.bool_)
)
)
"""
class Constants:
file_to_save = "non.pkl"
sequence_length = 720
episodes = 1
state_dim = 28 # size of state space
action_dim = 1 # size of action space
schema_path = './data/citylearn_challenge_2022_phase_1/schema.json'
def action_space_to_dict(aspace):
""" Only for box space """
return {"high": aspace.high,
"low": aspace.low,
"shape": aspace.shape,
"dtype": str(aspace.dtype)
}
def env_reset(env):
observations = env.reset()
action_space = env.action_space
observation_space = env.observation_space
building_info = env.get_building_information()
building_info = list(building_info.values())
action_space_dicts = [action_space_to_dict(asp) for asp in action_space]
observation_space_dicts = [action_space_to_dict(osp) for osp in observation_space]
obs_dict = {"action_space": action_space_dicts,
"observation_space": observation_space_dicts,
"building_info": building_info,
"observation": observations}
return obs_dict
def generate_data():
print("========================= Start Data Collection ========================")
env = CityLearnEnv(schema=Constants.schema_path)
agent = OrderEnforcingAgent()
dataset = []
observation_data = []
next_observation_data = []
action_data = []
reward_data = []
done_data = []
obs_dict = env_reset(env)
observations = obs_dict["observation"]
agent_time_elapsed = 0
step_start = time.perf_counter()
actions = agent.register_reset(obs_dict)
agent_time_elapsed += time.perf_counter() - step_start
episodes_completed = 0
sequences_completed = 0
current_step_total = 0
current_step_in_sequence = 0
interrupted = False
episode_metrics = []
try:
while True:
current_step_in_sequence += 1
current_step_total += 1
next_observations, reward, done, info = env.step(actions)
# ACTION [-1,1] attempts to decrease or increase the electricity stored in the battery by an amount
# equivalent to action times its maximum capacity
# Save environment interactions:
observation_data.append(observations)
next_observation_data.append(next_observations)
action_data.append(actions)
reward_data.append(reward)
done_data.append(False) # always False
observations = next_observations # observations of next time step
if current_step_in_sequence >= Constants.sequence_length: # Sequence completed
current_step_in_sequence = 0
sequences_completed += 1
for bi in range(len(env.buildings)):
obs_building_i = np.zeros((Constants.sequence_length, Constants.state_dim), dtype=np.float32)
n_obs_building_i = np.zeros((Constants.sequence_length, Constants.state_dim), dtype=np.float32)
acts_building_i = np.zeros((Constants.sequence_length, Constants.action_dim), dtype=np.float32)
rwds_building_i = np.zeros(Constants.sequence_length, dtype=np.float32)
dones_building_i = np.zeros(Constants.sequence_length, dtype=np.bool_)
for ti in range(Constants.sequence_length):
obs_building_i[ti] = np.array(observation_data[ti][bi])
n_obs_building_i[ti] = np.array(next_observation_data[ti][bi])
acts_building_i[ti] = np.array(action_data[ti][bi])
rwds_building_i[ti] = reward_data[ti][bi]
dones_building_i[ti] = done_data[ti]
dict_building_i = {
"observations": obs_building_i,
"next_observations": n_obs_building_i,
"actions": acts_building_i,
"rewards": rwds_building_i,
"terminals": dones_building_i
}
dataset.append(dict_building_i)
observation_data = []
next_observation_data = []
action_data = []
reward_data = []
done_data = []
print("Sequence completed:", sequences_completed)
if done:
episodes_completed += 1
metrics_t = env.evaluate()
metrics = {"price_cost": metrics_t[0], "emmision_cost": metrics_t[1], "grid_cost": metrics_t[2]}
if np.any(np.isnan(metrics_t)):
raise ValueError("Episode metrics are nan, please contant organizers")
episode_metrics.append(metrics)
print(f"Episode complete: {episodes_completed} | Latest episode metrics: {metrics}", )
obs_dict = env_reset(env)
observations = obs_dict["observation"]
step_start = time.perf_counter()
actions = agent.register_reset(obs_dict)
agent_time_elapsed += time.perf_counter() - step_start
else:
step_start = time.perf_counter()
actions = agent.compute_action(next_observations)
agent_time_elapsed += time.perf_counter() - step_start
if current_step_total % 1000 == 0:
print(f"Num Steps: {current_step_total}, Num episodes: {episodes_completed}")
if episodes_completed >= Constants.episodes:
break
except KeyboardInterrupt:
print("========================= Stopping Generation ==========================")
interrupted = True
if not interrupted:
print("========================= Generation Completed =========================")
if len(episode_metrics) > 0:
print("Agent Performance:")
print("Average Price Cost:", np.mean([e['price_cost'] for e in episode_metrics]))
print("Average Emission Cost:", np.mean([e['emmision_cost'] for e in episode_metrics]))
print("Average Grid Cost:", np.mean([e['grid_cost'] for e in episode_metrics]))
print(f"Total time taken by agent: {agent_time_elapsed}s")
print("========================= Writing Data File ============================")
length = 0
for data in dataset:
if len(data["observations"]) > length:
length = len(data["observations"])
print("Amount Of Sequences: ", len(dataset))
print("Longest Sequence: ", length)
total_values = (2 * Constants.state_dim + Constants.action_dim + 2) * length * len(dataset)
print("Total values to store: ", total_values)
# create or overwrite pickle file
with open(Constants.file_to_save, "wb") as f:
pickle.dump(dataset, f)
print("========================= Writing Completed ============================")
file_size = os.stat(Constants.file_to_save).st_size
if file_size > 1e+6:
string_byte = "(" + str(round(file_size / 1e+6)) + " MB)"
else:
string_byte = "(" + str(round(file_size / 1e+3)) + " kB)"
print("==> Data saved in", Constants.file_to_save, string_byte)
if __name__ == '__main__':
generate_data()
|