TobiTob commited on
Commit
633ce71
1 Parent(s): 8e49326

Upload generate_sequences.py

Browse files
Files changed (1) hide show
  1. generate_sequences.py +201 -0
generate_sequences.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import pickle
4
+ import time
5
+ from agents.orderenforcingwrapper import OrderEnforcingAgent
6
+ from citylearn.citylearn import CityLearnEnv
7
+
8
+ """
9
+ This file is used to generate offline data for a decision transformer.
10
+ Data is saved as pickle file.
11
+ Data structure:
12
+ list(
13
+ dict(
14
+ "observations": nparray(nparray(np.float32)),
15
+ "next_observations": nparray(nparray(np.float32)),
16
+ "actions": nparray(nparray(np.float32)),
17
+ "rewards": nparray(np.oat32),
18
+ "terminals": nparray(np.bool_)
19
+ )
20
+ )
21
+ """
22
+
23
+
24
+ class Constants:
25
+ file_to_save = "non.pkl"
26
+ sequence_length = 720
27
+ episodes = 1
28
+ state_dim = 28 # size of state space
29
+ action_dim = 1 # size of action space
30
+ schema_path = './data/citylearn_challenge_2022_phase_1/schema.json'
31
+
32
+
33
+ def action_space_to_dict(aspace):
34
+ """ Only for box space """
35
+ return {"high": aspace.high,
36
+ "low": aspace.low,
37
+ "shape": aspace.shape,
38
+ "dtype": str(aspace.dtype)
39
+ }
40
+
41
+
42
+ def env_reset(env):
43
+ observations = env.reset()
44
+ action_space = env.action_space
45
+ observation_space = env.observation_space
46
+ building_info = env.get_building_information()
47
+ building_info = list(building_info.values())
48
+ action_space_dicts = [action_space_to_dict(asp) for asp in action_space]
49
+ observation_space_dicts = [action_space_to_dict(osp) for osp in observation_space]
50
+ obs_dict = {"action_space": action_space_dicts,
51
+ "observation_space": observation_space_dicts,
52
+ "building_info": building_info,
53
+ "observation": observations}
54
+ return obs_dict
55
+
56
+
57
+ def generate_data():
58
+ print("========================= Start Data Collection ========================")
59
+
60
+ env = CityLearnEnv(schema=Constants.schema_path)
61
+ agent = OrderEnforcingAgent()
62
+
63
+ dataset = []
64
+ observation_data = []
65
+ next_observation_data = []
66
+ action_data = []
67
+ reward_data = []
68
+ done_data = []
69
+
70
+ obs_dict = env_reset(env)
71
+ observations = obs_dict["observation"]
72
+
73
+ agent_time_elapsed = 0
74
+
75
+ step_start = time.perf_counter()
76
+ actions = agent.register_reset(obs_dict)
77
+ agent_time_elapsed += time.perf_counter() - step_start
78
+
79
+ episodes_completed = 0
80
+ sequences_completed = 0
81
+ current_step_total = 0
82
+ current_step_in_sequence = 0
83
+ interrupted = False
84
+ episode_metrics = []
85
+
86
+ try:
87
+ while True:
88
+ current_step_in_sequence += 1
89
+ current_step_total += 1
90
+ next_observations, reward, done, info = env.step(actions)
91
+ # ACTION [-1,1] attempts to decrease or increase the electricity stored in the battery by an amount
92
+ # equivalent to action times its maximum capacity
93
+ # Save environment interactions:
94
+ observation_data.append(observations)
95
+ next_observation_data.append(next_observations)
96
+ action_data.append(actions)
97
+ reward_data.append(reward)
98
+ done_data.append(False) # always False
99
+
100
+ observations = next_observations # observations of next time step
101
+
102
+ if current_step_in_sequence >= Constants.sequence_length: # Sequence completed
103
+ current_step_in_sequence = 0
104
+ sequences_completed += 1
105
+
106
+ for bi in range(len(env.buildings)):
107
+ obs_building_i = np.zeros((Constants.sequence_length, Constants.state_dim), dtype=np.float32)
108
+ n_obs_building_i = np.zeros((Constants.sequence_length, Constants.state_dim), dtype=np.float32)
109
+ acts_building_i = np.zeros((Constants.sequence_length, Constants.action_dim), dtype=np.float32)
110
+ rwds_building_i = np.zeros(Constants.sequence_length, dtype=np.float32)
111
+ dones_building_i = np.zeros(Constants.sequence_length, dtype=np.bool_)
112
+ for ti in range(Constants.sequence_length):
113
+ obs_building_i[ti] = np.array(observation_data[ti][bi])
114
+ n_obs_building_i[ti] = np.array(next_observation_data[ti][bi])
115
+ acts_building_i[ti] = np.array(action_data[ti][bi])
116
+ rwds_building_i[ti] = reward_data[ti][bi]
117
+ dones_building_i[ti] = done_data[ti]
118
+
119
+ dict_building_i = {
120
+ "observations": obs_building_i,
121
+ "next_observations": n_obs_building_i,
122
+ "actions": acts_building_i,
123
+ "rewards": rwds_building_i,
124
+ "terminals": dones_building_i
125
+ }
126
+ dataset.append(dict_building_i)
127
+
128
+ observation_data = []
129
+ next_observation_data = []
130
+ action_data = []
131
+ reward_data = []
132
+ done_data = []
133
+ print("Sequence completed:", sequences_completed)
134
+
135
+ if done:
136
+ episodes_completed += 1
137
+
138
+ metrics_t = env.evaluate()
139
+ metrics = {"price_cost": metrics_t[0], "emmision_cost": metrics_t[1], "grid_cost": metrics_t[2]}
140
+ if np.any(np.isnan(metrics_t)):
141
+ raise ValueError("Episode metrics are nan, please contant organizers")
142
+ episode_metrics.append(metrics)
143
+ print(f"Episode complete: {episodes_completed} | Latest episode metrics: {metrics}", )
144
+
145
+ obs_dict = env_reset(env)
146
+ observations = obs_dict["observation"]
147
+
148
+ step_start = time.perf_counter()
149
+ actions = agent.register_reset(obs_dict)
150
+ agent_time_elapsed += time.perf_counter() - step_start
151
+ else:
152
+ step_start = time.perf_counter()
153
+ actions = agent.compute_action(next_observations)
154
+ agent_time_elapsed += time.perf_counter() - step_start
155
+
156
+ if current_step_total % 1000 == 0:
157
+ print(f"Num Steps: {current_step_total}, Num episodes: {episodes_completed}")
158
+
159
+ if episodes_completed >= Constants.episodes:
160
+ break
161
+ except KeyboardInterrupt:
162
+ print("========================= Stopping Generation ==========================")
163
+ interrupted = True
164
+
165
+ if not interrupted:
166
+ print("========================= Generation Completed =========================")
167
+
168
+ if len(episode_metrics) > 0:
169
+ print("Agent Performance:")
170
+ print("Average Price Cost:", np.mean([e['price_cost'] for e in episode_metrics]))
171
+ print("Average Emission Cost:", np.mean([e['emmision_cost'] for e in episode_metrics]))
172
+ print("Average Grid Cost:", np.mean([e['grid_cost'] for e in episode_metrics]))
173
+ print(f"Total time taken by agent: {agent_time_elapsed}s")
174
+
175
+ print("========================= Writing Data File ============================")
176
+
177
+ length = 0
178
+ for data in dataset:
179
+ if len(data["observations"]) > length:
180
+ length = len(data["observations"])
181
+
182
+ print("Amount Of Sequences: ", len(dataset))
183
+ print("Longest Sequence: ", length)
184
+ total_values = (2 * Constants.state_dim + Constants.action_dim + 2) * length * len(dataset)
185
+ print("Total values to store: ", total_values)
186
+
187
+ # create or overwrite pickle file
188
+ with open(Constants.file_to_save, "wb") as f:
189
+ pickle.dump(dataset, f)
190
+
191
+ print("========================= Writing Completed ============================")
192
+ file_size = os.stat(Constants.file_to_save).st_size
193
+ if file_size > 1e+6:
194
+ string_byte = "(" + str(round(file_size / 1e+6)) + " MB)"
195
+ else:
196
+ string_byte = "(" + str(round(file_size / 1e+3)) + " kB)"
197
+ print("==> Data saved in", Constants.file_to_save, string_byte)
198
+
199
+
200
+ if __name__ == '__main__':
201
+ generate_data()