Yasaman commited on
Commit
2eb0db2
β€’
1 Parent(s): 1553c5a

Update functions.py

Browse files
Files changed (1) hide show
  1. functions.py +120 -125
functions.py CHANGED
@@ -1,123 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import requests
2
  import os
 
3
  import pandas as pd
4
  import datetime
5
  import numpy as np
 
6
  from sklearn.preprocessing import OrdinalEncoder
7
  from dotenv import load_dotenv
8
- load_dotenv()
9
-
10
-
11
- ## TODO: write function to display the color coding of the categoies both in the df and as a guide.
12
- #sg like:
13
- def color_aq(val):
14
- color = 'green' if val else 'red'
15
- return f'background-color: {color}'
16
- # but better
17
-
18
-
19
- def get_air_quality_data(station_name):
20
- AIR_QUALITY_API_KEY = os.getenv('AIR_QUALITY_API_KEY')
21
- request_value = f'https://api.waqi.info/feed/{station_name}/?token={AIR_QUALITY_API_KEY}'
22
- answer = requests.get(request_value).json()["data"]
23
- forecast = answer['forecast']['daily']
24
- return [
25
- answer["time"]["s"][:10], # Date
26
- int(forecast['pm25'][0]['avg']), # avg predicted pm25
27
- int(forecast['pm10'][0]['avg']), # avg predicted pm10
28
- max(int(forecast['pm25'][0]['avg']), int(forecast['pm10'][0]['avg'])) # avg predicted aqi
29
- ]
30
-
31
- def get_air_quality_df(data):
32
- col_names = [
33
- 'date',
34
- 'pm25',
35
- 'pm10',
36
- 'aqi'
37
- ]
38
-
39
- new_data = pd.DataFrame(
40
- data
41
- ).T
42
- new_data.columns = col_names
43
- new_data['pm25'] = pd.to_numeric(new_data['pm25'])
44
- new_data['pm10'] = pd.to_numeric(new_data['pm10'])
45
- new_data['aqi'] = pd.to_numeric(new_data['aqi'])
46
-
47
- return new_data
48
-
49
-
50
- def get_weather_data_daily(city):
51
- WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
52
- answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/today?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
53
- data = answer['days'][0]
54
- return [
55
- answer['address'].lower(),
56
- data['datetime'],
57
- data['tempmax'],
58
- data['tempmin'],
59
- data['temp'],
60
- data['feelslikemax'],
61
- data['feelslikemin'],
62
- data['feelslike'],
63
- data['dew'],
64
- data['humidity'],
65
- data['precip'],
66
- data['precipprob'],
67
- data['precipcover'],
68
- data['snow'],
69
- data['snowdepth'],
70
- data['windgust'],
71
- data['windspeed'],
72
- data['winddir'],
73
- data['pressure'],
74
- data['cloudcover'],
75
- data['visibility'],
76
- data['solarradiation'],
77
- data['solarenergy'],
78
- data['uvindex'],
79
- data['conditions']
80
- ]
81
-
82
  def get_weather_data_weekly(city: str, start_date: datetime) -> pd.DataFrame:
83
- WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
84
- end_date = f"{start_date + datetime.timedelta(days=6):%Y-%m-%d}"
85
- answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/{start_date}/{end_date}?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
86
- weather_data = answer['days']
87
- final_df = pd.DataFrame()
88
-
89
- for i in range(7):
90
- data = weather_data[i]
91
- list_of_data = [
92
- answer['address'].lower(),
93
- data['datetime'],
94
- data['tempmax'],
95
- data['tempmin'],
96
- data['temp'],
97
- data['feelslikemax'],
98
- data['feelslikemin'],
99
- data['feelslike'],
100
- data['dew'],
101
- data['humidity'],
102
- data['precip'],
103
- data['precipprob'],
104
- data['precipcover'],
105
- data['snow'],
106
- data['snowdepth'],
107
- data['windgust'],
108
- data['windspeed'],
109
- data['winddir'],
110
- data['pressure'],
111
- data['cloudcover'],
112
- data['visibility'],
113
- data['solarradiation'],
114
- data['solarenergy'],
115
- data['uvindex'],
116
- data['conditions']
117
- ]
118
- weather_df = get_weather_df(list_of_data)
119
- final_df = pd.concat([final_df, weather_df])
120
- return final_df
121
 
122
  def get_weather_df(data):
123
  col_names = [
@@ -158,23 +157,19 @@ def get_weather_df(data):
158
 
159
  return new_data
160
 
161
- def data_encoder(X):
162
- X.drop(columns=['date', 'name'], inplace=True)
163
- X['conditions'] = OrdinalEncoder().fit_transform(X[['conditions']])
164
- return X
165
 
166
- def get_aplevel(temps:np.ndarray, table:list):
 
167
  boundary_list = np.array([0, 50, 100, 150, 200, 300]) # assert temps.shape == [x, 1]
168
  redf = np.logical_not(temps<=boundary_list) # temps.shape[0] x boundary_list.shape[0] ndarray
169
  hift = np.concatenate((np.roll(redf, -1)[:, :-1], np.full((temps.shape[0], 1), False)), axis = 1)
170
  cat = np.nonzero(np.not_equal(redf,hift))
171
 
172
- level = [table[el] for el in cat[1]]
173
- return level
174
-
175
- def get_color(level:list):
176
  air_pollution_level = ['Good', 'Moderate', 'Unhealthy for sensitive Groups','Unhealthy' ,'Very Unhealthy', 'Hazardous']
177
- color_list = ["Green", "Yellow", "DarkOrange", "Red", "Purple", "DarkRed"]
178
- ind = [air_pollution_level.index(lel) for lel in level]
179
- text = [f"color:{color_list[idex]};" for idex in ind]
180
- return text
 
 
 
 
1
+ Hugging Face's logo
2
+ Hugging Face
3
+ Search models, datasets, users...
4
+ Models
5
+ Datasets
6
+ Spaces
7
+ Docs
8
+ Solutions
9
+ Pricing
10
+
11
+
12
+
13
+ Hugging Face is way more fun with friends and colleagues! πŸ€— Join an organization
14
+ Spaces:
15
+
16
+ Mei000
17
+ /
18
+ air_quality_gb Copied
19
+ like
20
+ 0
21
+ App
22
+ Files and versions
23
+ Community
24
+ air_quality_gb
25
+ /
26
+ functions.py
27
+ Mei000's picture
28
+ Mei000
29
+ Upload 3 files
30
+ 557ecc6
31
+ about 6 hours ago
32
+ raw
33
+ history
34
+ blame
35
+ contribute
36
+ delete
37
+ 5.54 kB
38
  import requests
39
  import os
40
+ import joblib
41
  import pandas as pd
42
  import datetime
43
  import numpy as np
44
+ import time
45
  from sklearn.preprocessing import OrdinalEncoder
46
  from dotenv import load_dotenv
47
+ load_dotenv(override=True)
48
+
49
+
50
+ def decode_features(df, feature_view):
51
+ """Decodes features in the input DataFrame using corresponding Hopsworks Feature Store transformation functions"""
52
+ df_res = df.copy()
53
+
54
+ import inspect
55
+
56
+
57
+ td_transformation_functions = feature_view._batch_scoring_server._transformation_functions
58
+
59
+ res = {}
60
+ for feature_name in td_transformation_functions:
61
+ if feature_name in df_res.columns:
62
+ td_transformation_function = td_transformation_functions[feature_name]
63
+ sig, foobar_locals = inspect.signature(td_transformation_function.transformation_fn), locals()
64
+ param_dict = dict([(param.name, param.default) for param in sig.parameters.values() if param.default != inspect._empty])
65
+ if td_transformation_function.name == "min_max_scaler":
66
+ df_res[feature_name] = df_res[feature_name].map(
67
+ lambda x: x * (param_dict["max_value"] - param_dict["min_value"]) + param_dict["min_value"])
68
+
69
+ elif td_transformation_function.name == "standard_scaler":
70
+ df_res[feature_name] = df_res[feature_name].map(
71
+ lambda x: x * param_dict['std_dev'] + param_dict["mean"])
72
+ elif td_transformation_function.name == "label_encoder":
73
+ dictionary = param_dict['value_to_index']
74
+ dictionary_ = {v: k for k, v in dictionary.items()}
75
+ df_res[feature_name] = df_res[feature_name].map(
76
+ lambda x: dictionary_[x])
77
+ return df_res
78
+
79
+
80
+ def get_model(project, model_name, evaluation_metric, sort_metrics_by):
81
+ """Retrieve desired model or download it from the Hopsworks Model Registry.
82
+ In second case, it will be physically downloaded to this directory"""
83
+ TARGET_FILE = "model.pkl"
84
+ list_of_files = [os.path.join(dirpath,filename) for dirpath, _, filenames \
85
+ in os.walk('.') for filename in filenames if filename == TARGET_FILE]
86
+
87
+ if list_of_files:
88
+ model_path = list_of_files[0]
89
+ model = joblib.load(model_path)
90
+ else:
91
+ if not os.path.exists(TARGET_FILE):
92
+ mr = project.get_model_registry()
93
+ # get best model based on custom metrics
94
+ model = mr.get_best_model(model_name,
95
+ evaluation_metric,
96
+ sort_metrics_by)
97
+ model_dir = model.download()
98
+ model = joblib.load(model_dir + "/model.pkl")
99
+
100
+ return model
101
+
102
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  def get_weather_data_weekly(city: str, start_date: datetime) -> pd.DataFrame:
104
+ #WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
105
+ ##end_date = f"{start_date + datetime.timedelta(days=6):%Y-%m-%d}"
106
+ next7days_weather=pd.read_csv('https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/Beijing/next7days?unitGroup=metric&include=days&key=5WNL2M94KKQ4R4F32LFV8DPE4&contentType=csv')
107
+ #answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/{start_date}/{end_date}?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
108
+
109
+
110
+ df_weather = pd.DataFrame(next7days_weather)
111
+ df_weather.rename(columns = {"datetime": "date"},
112
+ inplace = True)
113
+ df_weather.rename(columns = {"name": "city"},
114
+ inplace = True)
115
+ df_weather.rename(columns = {"sealevelpressure": "pressure"},
116
+ inplace = True)
117
+ df_weather = df_weather.drop(labels=['city','dew','precip','tempmax','pressure','tempmin','temp','feelslikemax','feelslikemin','feelslike','precipprob','precipcover','snow','snowdepth','cloudcover','severerisk','moonphase','preciptype','sunrise','sunset','conditions','description','icon','stations'], axis=1) #εˆ ι™€δΈη”¨ηš„εˆ—
118
+
119
+ return df_weather
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  def get_weather_df(data):
122
  col_names = [
 
157
 
158
  return new_data
159
 
 
 
 
 
160
 
161
+
162
+ def get_aplevel(temps:np.ndarray) -> list:
163
  boundary_list = np.array([0, 50, 100, 150, 200, 300]) # assert temps.shape == [x, 1]
164
  redf = np.logical_not(temps<=boundary_list) # temps.shape[0] x boundary_list.shape[0] ndarray
165
  hift = np.concatenate((np.roll(redf, -1)[:, :-1], np.full((temps.shape[0], 1), False)), axis = 1)
166
  cat = np.nonzero(np.not_equal(redf,hift))
167
 
 
 
 
 
168
  air_pollution_level = ['Good', 'Moderate', 'Unhealthy for sensitive Groups','Unhealthy' ,'Very Unhealthy', 'Hazardous']
169
+ level = [air_pollution_level[el] for el in cat[1]]
170
+ return level
171
+
172
+ def timestamp_2_time(x):
173
+ dt_obj = datetime.datetime.strptime(str(x), '%Y-%m-%d')
174
+ dt_obj = dt_obj.timestamp() * 1000
175
+ return int(dt_obj)