|
import os |
|
import numpy as np |
|
import pickle |
|
import time |
|
from agents.orderenforcingwrapper import OrderEnforcingAgent |
|
from citylearn.citylearn import CityLearnEnv |
|
|
|
""" |
|
This file is used to generate offline data for a decision transformer. |
|
Data is saved as pickle file. |
|
Data structure: |
|
list( |
|
dict( |
|
"observations": nparray(nparray(np.float32)), |
|
"next_observations": nparray(nparray(np.float32)), |
|
"actions": nparray(nparray(np.float32)), |
|
"rewards": nparray(np.oat32), |
|
"terminals": nparray(np.bool_) |
|
) |
|
) |
|
""" |
|
|
|
|
|
class Constants: |
|
file_to_save = "non.pkl" |
|
sequence_length = 720 |
|
episodes = 1 |
|
state_dim = 28 |
|
action_dim = 1 |
|
schema_path = './data/citylearn_challenge_2022_phase_1/schema.json' |
|
|
|
|
|
def action_space_to_dict(aspace): |
|
""" Only for box space """ |
|
return {"high": aspace.high, |
|
"low": aspace.low, |
|
"shape": aspace.shape, |
|
"dtype": str(aspace.dtype) |
|
} |
|
|
|
|
|
def env_reset(env): |
|
observations = env.reset() |
|
action_space = env.action_space |
|
observation_space = env.observation_space |
|
building_info = env.get_building_information() |
|
building_info = list(building_info.values()) |
|
action_space_dicts = [action_space_to_dict(asp) for asp in action_space] |
|
observation_space_dicts = [action_space_to_dict(osp) for osp in observation_space] |
|
obs_dict = {"action_space": action_space_dicts, |
|
"observation_space": observation_space_dicts, |
|
"building_info": building_info, |
|
"observation": observations} |
|
return obs_dict |
|
|
|
|
|
def generate_data(): |
|
print("========================= Start Data Collection ========================") |
|
|
|
env = CityLearnEnv(schema=Constants.schema_path) |
|
agent = OrderEnforcingAgent() |
|
|
|
dataset = [] |
|
observation_data = [] |
|
next_observation_data = [] |
|
action_data = [] |
|
reward_data = [] |
|
done_data = [] |
|
|
|
obs_dict = env_reset(env) |
|
observations = obs_dict["observation"] |
|
|
|
agent_time_elapsed = 0 |
|
|
|
step_start = time.perf_counter() |
|
actions = agent.register_reset(obs_dict) |
|
agent_time_elapsed += time.perf_counter() - step_start |
|
|
|
episodes_completed = 0 |
|
sequences_completed = 0 |
|
current_step_total = 0 |
|
current_step_in_sequence = 0 |
|
interrupted = False |
|
episode_metrics = [] |
|
|
|
try: |
|
while True: |
|
current_step_in_sequence += 1 |
|
current_step_total += 1 |
|
next_observations, reward, done, info = env.step(actions) |
|
|
|
|
|
|
|
observation_data.append(observations) |
|
next_observation_data.append(next_observations) |
|
action_data.append(actions) |
|
reward_data.append(reward) |
|
done_data.append(False) |
|
|
|
observations = next_observations |
|
|
|
if current_step_in_sequence >= Constants.sequence_length: |
|
current_step_in_sequence = 0 |
|
sequences_completed += 1 |
|
|
|
for bi in range(len(env.buildings)): |
|
obs_building_i = np.zeros((Constants.sequence_length, Constants.state_dim), dtype=np.float32) |
|
n_obs_building_i = np.zeros((Constants.sequence_length, Constants.state_dim), dtype=np.float32) |
|
acts_building_i = np.zeros((Constants.sequence_length, Constants.action_dim), dtype=np.float32) |
|
rwds_building_i = np.zeros(Constants.sequence_length, dtype=np.float32) |
|
dones_building_i = np.zeros(Constants.sequence_length, dtype=np.bool_) |
|
for ti in range(Constants.sequence_length): |
|
obs_building_i[ti] = np.array(observation_data[ti][bi]) |
|
n_obs_building_i[ti] = np.array(next_observation_data[ti][bi]) |
|
acts_building_i[ti] = np.array(action_data[ti][bi]) |
|
rwds_building_i[ti] = reward_data[ti][bi] |
|
dones_building_i[ti] = done_data[ti] |
|
|
|
dict_building_i = { |
|
"observations": obs_building_i, |
|
"next_observations": n_obs_building_i, |
|
"actions": acts_building_i, |
|
"rewards": rwds_building_i, |
|
"terminals": dones_building_i |
|
} |
|
dataset.append(dict_building_i) |
|
|
|
observation_data = [] |
|
next_observation_data = [] |
|
action_data = [] |
|
reward_data = [] |
|
done_data = [] |
|
print("Sequence completed:", sequences_completed) |
|
|
|
if done: |
|
episodes_completed += 1 |
|
|
|
metrics_t = env.evaluate() |
|
metrics = {"price_cost": metrics_t[0], "emmision_cost": metrics_t[1], "grid_cost": metrics_t[2]} |
|
if np.any(np.isnan(metrics_t)): |
|
raise ValueError("Episode metrics are nan, please contant organizers") |
|
episode_metrics.append(metrics) |
|
print(f"Episode complete: {episodes_completed} | Latest episode metrics: {metrics}", ) |
|
|
|
obs_dict = env_reset(env) |
|
observations = obs_dict["observation"] |
|
|
|
step_start = time.perf_counter() |
|
actions = agent.register_reset(obs_dict) |
|
agent_time_elapsed += time.perf_counter() - step_start |
|
else: |
|
step_start = time.perf_counter() |
|
actions = agent.compute_action(next_observations) |
|
agent_time_elapsed += time.perf_counter() - step_start |
|
|
|
if current_step_total % 1000 == 0: |
|
print(f"Num Steps: {current_step_total}, Num episodes: {episodes_completed}") |
|
|
|
if episodes_completed >= Constants.episodes: |
|
break |
|
except KeyboardInterrupt: |
|
print("========================= Stopping Generation ==========================") |
|
interrupted = True |
|
|
|
if not interrupted: |
|
print("========================= Generation Completed =========================") |
|
|
|
if len(episode_metrics) > 0: |
|
print("Agent Performance:") |
|
print("Average Price Cost:", np.mean([e['price_cost'] for e in episode_metrics])) |
|
print("Average Emission Cost:", np.mean([e['emmision_cost'] for e in episode_metrics])) |
|
print("Average Grid Cost:", np.mean([e['grid_cost'] for e in episode_metrics])) |
|
print(f"Total time taken by agent: {agent_time_elapsed}s") |
|
|
|
print("========================= Writing Data File ============================") |
|
|
|
length = 0 |
|
for data in dataset: |
|
if len(data["observations"]) > length: |
|
length = len(data["observations"]) |
|
|
|
print("Amount Of Sequences: ", len(dataset)) |
|
print("Longest Sequence: ", length) |
|
total_values = (2 * Constants.state_dim + Constants.action_dim + 2) * length * len(dataset) |
|
print("Total values to store: ", total_values) |
|
|
|
|
|
with open(Constants.file_to_save, "wb") as f: |
|
pickle.dump(dataset, f) |
|
|
|
print("========================= Writing Completed ============================") |
|
file_size = os.stat(Constants.file_to_save).st_size |
|
if file_size > 1e+6: |
|
string_byte = "(" + str(round(file_size / 1e+6)) + " MB)" |
|
else: |
|
string_byte = "(" + str(round(file_size / 1e+3)) + " kB)" |
|
print("==> Data saved in", Constants.file_to_save, string_byte) |
|
|
|
|
|
if __name__ == '__main__': |
|
generate_data() |
|
|