TobiTob commited on
Commit
8e49326
1 Parent(s): db1e627

Delete data_generation.py

Browse files
Files changed (1) hide show
  1. data_generation.py +0 -171
data_generation.py DELETED
@@ -1,171 +0,0 @@
1
- from ast import Raise
2
- from re import S
3
- import re
4
- import gym
5
-
6
- import matplotlib.pyplot as plt
7
-
8
- from citylearn.citylearn import CityLearnEnv
9
- import numpy as np
10
- import pandas as pd
11
- import os
12
-
13
- from collections import deque
14
- import argparse
15
- import random
16
- # import logger
17
- import logging
18
- from sys import stdout
19
- from copy import deepcopy
20
-
21
-
22
- class Constants:
23
- episodes = 3
24
- schema_path = '/home/aicrowd/data/citylearn_challenge_2022_phase_1/schema.json'
25
- variables_to_forecast = ['solar_generation', 'non_shiftable_load', 'electricity_pricing', 'carbon_intensity', "electricity_consumption_crude",
26
- 'hour', 'month']
27
-
28
- additional_variable = ['hour', "month"]
29
-
30
-
31
- # create env from citylearn
32
- env = CityLearnEnv(schema=Constants.schema_path)
33
-
34
- def action_space_to_dict(aspace):
35
- """ Only for box space """
36
- return { "high": aspace.high,
37
- "low": aspace.low,
38
- "shape": aspace.shape,
39
- "dtype": str(aspace.dtype)
40
- }
41
-
42
- def env_reset(env):
43
- observations = env.reset()
44
- action_space = env.action_space
45
- observation_space = env.observation_space
46
- building_info = env.get_building_information()
47
- building_info = list(building_info.values())
48
- action_space_dicts = [action_space_to_dict(asp) for asp in action_space]
49
- observation_space_dicts = [action_space_to_dict(osp) for osp in observation_space]
50
- obs_dict = {"action_space": action_space_dicts,
51
- "observation_space": observation_space_dicts,
52
- "building_info": building_info,
53
- "observation": observations }
54
- return obs_dict
55
-
56
- ## env wrapper for stable baselines
57
- class EnvCityGym(gym.Env):
58
- """
59
- Env wrapper coming from the gym library.
60
- """
61
- def __init__(self, env):
62
- self.env = env
63
-
64
- # get the number of buildings
65
- self.num_buildings = len(env.action_space)
66
- print("num_buildings: ", self.num_buildings)
67
-
68
- self.action_space = gym.spaces.Box(low=np.array([-0.2]), high=np.array([0.2]), dtype=np.float32)
69
-
70
- self.observation_space = gym.spaces.MultiDiscrete(np.array([25, 13]))
71
-
72
- def reset(self):
73
- obs_dict = env_reset(self.env)
74
- obs = self.env.reset()
75
-
76
- observation = [o for o in obs]
77
-
78
- return observation
79
-
80
- def step(self, action):
81
- """
82
- we apply the same action for all the buildings
83
- """
84
- obs, reward, done, info = self.env.step(action)
85
-
86
- observation = [o for o in obs]
87
-
88
- return observation, reward, done, info
89
-
90
- def render(self, mode='human'):
91
- return self.env.render(mode)
92
-
93
-
94
-
95
-
96
- def env_run_without_action(actions_all=None):
97
- """
98
- This function is used to run the environment without applying any action.
99
- and return the dataset
100
- """
101
- # create env from citylearn
102
- env = CityLearnEnv(schema=Constants.schema_path)
103
-
104
- # get the number of buildings
105
- num_buildings = len(env.action_space)
106
- print("num_buildings: ", num_buildings)
107
-
108
- # create env wrapper
109
- env = EnvCityGym(env)
110
-
111
- # reset the environment
112
- obs = env.reset()
113
-
114
- infos = []
115
-
116
- for id_building in range(num_buildings):
117
- # run the environment
118
- obs = env.reset()
119
-
120
- for i in range(8759):
121
-
122
- info_tmp = env.env.buildings[id_building].observations.copy()
123
-
124
- if actions_all is not None:
125
-
126
- action = [[actions_all[i + 8759 * b]] for b in range(num_buildings)]
127
-
128
- else:
129
- # we get the action
130
- action = np.zeros((5, )) # 5 is the number of buildings
131
-
132
- # reshape action into form like [[0], [0], [0], [0], [0]]
133
- action = [[a] for a in action]
134
-
135
- #print(action)
136
-
137
- obs, reward, done, info = env.step(action)
138
-
139
- info_tmp['reward'] = reward[id_building]
140
- info_tmp['building_id'] = id_building
141
- infos.append(info_tmp)
142
-
143
- if done:
144
- obs = env.reset()
145
-
146
- # create the data
147
- data_pd = {}
148
-
149
- for info in infos:
150
- for i, v in info.items():
151
- try:
152
- data_pd[i].append(v)
153
- except:
154
- data_pd[i] = [v]
155
-
156
- data = pd.DataFrame(infos)
157
-
158
- return data
159
-
160
- if __name__ == "__main__":
161
-
162
- # data generation
163
- data = env_run_without_action()
164
-
165
- # we only normalize month and hour
166
- data['hour'] = data['hour']/24
167
- data['month'] = data['month']/12
168
-
169
- # save the data into the data_histo folder into parquet format
170
- data.to_parquet("data_histo/data.parquet")
171
-