Spaces:
Build error
Build error
Update functions.py
Browse files- functions.py +120 -125
functions.py
CHANGED
@@ -1,123 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import requests
|
2 |
import os
|
|
|
3 |
import pandas as pd
|
4 |
import datetime
|
5 |
import numpy as np
|
|
|
6 |
from sklearn.preprocessing import OrdinalEncoder
|
7 |
from dotenv import load_dotenv
|
8 |
-
load_dotenv()
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
data['humidity'],
|
65 |
-
data['precip'],
|
66 |
-
data['precipprob'],
|
67 |
-
data['precipcover'],
|
68 |
-
data['snow'],
|
69 |
-
data['snowdepth'],
|
70 |
-
data['windgust'],
|
71 |
-
data['windspeed'],
|
72 |
-
data['winddir'],
|
73 |
-
data['pressure'],
|
74 |
-
data['cloudcover'],
|
75 |
-
data['visibility'],
|
76 |
-
data['solarradiation'],
|
77 |
-
data['solarenergy'],
|
78 |
-
data['uvindex'],
|
79 |
-
data['conditions']
|
80 |
-
]
|
81 |
-
|
82 |
def get_weather_data_weekly(city: str, start_date: datetime) -> pd.DataFrame:
|
83 |
-
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
84 |
-
end_date = f"{start_date + datetime.timedelta(days=6):%Y-%m-%d}"
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
data['feelslike'],
|
100 |
-
data['dew'],
|
101 |
-
data['humidity'],
|
102 |
-
data['precip'],
|
103 |
-
data['precipprob'],
|
104 |
-
data['precipcover'],
|
105 |
-
data['snow'],
|
106 |
-
data['snowdepth'],
|
107 |
-
data['windgust'],
|
108 |
-
data['windspeed'],
|
109 |
-
data['winddir'],
|
110 |
-
data['pressure'],
|
111 |
-
data['cloudcover'],
|
112 |
-
data['visibility'],
|
113 |
-
data['solarradiation'],
|
114 |
-
data['solarenergy'],
|
115 |
-
data['uvindex'],
|
116 |
-
data['conditions']
|
117 |
-
]
|
118 |
-
weather_df = get_weather_df(list_of_data)
|
119 |
-
final_df = pd.concat([final_df, weather_df])
|
120 |
-
return final_df
|
121 |
|
122 |
def get_weather_df(data):
|
123 |
col_names = [
|
@@ -158,23 +157,19 @@ def get_weather_df(data):
|
|
158 |
|
159 |
return new_data
|
160 |
|
161 |
-
def data_encoder(X):
|
162 |
-
X.drop(columns=['date', 'name'], inplace=True)
|
163 |
-
X['conditions'] = OrdinalEncoder().fit_transform(X[['conditions']])
|
164 |
-
return X
|
165 |
|
166 |
-
|
|
|
167 |
boundary_list = np.array([0, 50, 100, 150, 200, 300]) # assert temps.shape == [x, 1]
|
168 |
redf = np.logical_not(temps<=boundary_list) # temps.shape[0] x boundary_list.shape[0] ndarray
|
169 |
hift = np.concatenate((np.roll(redf, -1)[:, :-1], np.full((temps.shape[0], 1), False)), axis = 1)
|
170 |
cat = np.nonzero(np.not_equal(redf,hift))
|
171 |
|
172 |
-
level = [table[el] for el in cat[1]]
|
173 |
-
return level
|
174 |
-
|
175 |
-
def get_color(level:list):
|
176 |
air_pollution_level = ['Good', 'Moderate', 'Unhealthy for sensitive Groups','Unhealthy' ,'Very Unhealthy', 'Hazardous']
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
|
|
|
|
|
|
|
1 |
+
Hugging Face's logo
|
2 |
+
Hugging Face
|
3 |
+
Search models, datasets, users...
|
4 |
+
Models
|
5 |
+
Datasets
|
6 |
+
Spaces
|
7 |
+
Docs
|
8 |
+
Solutions
|
9 |
+
Pricing
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
Hugging Face is way more fun with friends and colleagues! π€ Join an organization
|
14 |
+
Spaces:
|
15 |
+
|
16 |
+
Mei000
|
17 |
+
/
|
18 |
+
air_quality_gb Copied
|
19 |
+
like
|
20 |
+
0
|
21 |
+
App
|
22 |
+
Files and versions
|
23 |
+
Community
|
24 |
+
air_quality_gb
|
25 |
+
/
|
26 |
+
functions.py
|
27 |
+
Mei000's picture
|
28 |
+
Mei000
|
29 |
+
Upload 3 files
|
30 |
+
557ecc6
|
31 |
+
about 6 hours ago
|
32 |
+
raw
|
33 |
+
history
|
34 |
+
blame
|
35 |
+
contribute
|
36 |
+
delete
|
37 |
+
5.54 kB
|
38 |
import requests
|
39 |
import os
|
40 |
+
import joblib
|
41 |
import pandas as pd
|
42 |
import datetime
|
43 |
import numpy as np
|
44 |
+
import time
|
45 |
from sklearn.preprocessing import OrdinalEncoder
|
46 |
from dotenv import load_dotenv
|
47 |
+
load_dotenv(override=True)
|
48 |
+
|
49 |
+
|
50 |
+
def decode_features(df, feature_view):
|
51 |
+
"""Decodes features in the input DataFrame using corresponding Hopsworks Feature Store transformation functions"""
|
52 |
+
df_res = df.copy()
|
53 |
+
|
54 |
+
import inspect
|
55 |
+
|
56 |
+
|
57 |
+
td_transformation_functions = feature_view._batch_scoring_server._transformation_functions
|
58 |
+
|
59 |
+
res = {}
|
60 |
+
for feature_name in td_transformation_functions:
|
61 |
+
if feature_name in df_res.columns:
|
62 |
+
td_transformation_function = td_transformation_functions[feature_name]
|
63 |
+
sig, foobar_locals = inspect.signature(td_transformation_function.transformation_fn), locals()
|
64 |
+
param_dict = dict([(param.name, param.default) for param in sig.parameters.values() if param.default != inspect._empty])
|
65 |
+
if td_transformation_function.name == "min_max_scaler":
|
66 |
+
df_res[feature_name] = df_res[feature_name].map(
|
67 |
+
lambda x: x * (param_dict["max_value"] - param_dict["min_value"]) + param_dict["min_value"])
|
68 |
+
|
69 |
+
elif td_transformation_function.name == "standard_scaler":
|
70 |
+
df_res[feature_name] = df_res[feature_name].map(
|
71 |
+
lambda x: x * param_dict['std_dev'] + param_dict["mean"])
|
72 |
+
elif td_transformation_function.name == "label_encoder":
|
73 |
+
dictionary = param_dict['value_to_index']
|
74 |
+
dictionary_ = {v: k for k, v in dictionary.items()}
|
75 |
+
df_res[feature_name] = df_res[feature_name].map(
|
76 |
+
lambda x: dictionary_[x])
|
77 |
+
return df_res
|
78 |
+
|
79 |
+
|
80 |
+
def get_model(project, model_name, evaluation_metric, sort_metrics_by):
|
81 |
+
"""Retrieve desired model or download it from the Hopsworks Model Registry.
|
82 |
+
In second case, it will be physically downloaded to this directory"""
|
83 |
+
TARGET_FILE = "model.pkl"
|
84 |
+
list_of_files = [os.path.join(dirpath,filename) for dirpath, _, filenames \
|
85 |
+
in os.walk('.') for filename in filenames if filename == TARGET_FILE]
|
86 |
+
|
87 |
+
if list_of_files:
|
88 |
+
model_path = list_of_files[0]
|
89 |
+
model = joblib.load(model_path)
|
90 |
+
else:
|
91 |
+
if not os.path.exists(TARGET_FILE):
|
92 |
+
mr = project.get_model_registry()
|
93 |
+
# get best model based on custom metrics
|
94 |
+
model = mr.get_best_model(model_name,
|
95 |
+
evaluation_metric,
|
96 |
+
sort_metrics_by)
|
97 |
+
model_dir = model.download()
|
98 |
+
model = joblib.load(model_dir + "/model.pkl")
|
99 |
+
|
100 |
+
return model
|
101 |
+
|
102 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
def get_weather_data_weekly(city: str, start_date: datetime) -> pd.DataFrame:
|
104 |
+
#WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
105 |
+
##end_date = f"{start_date + datetime.timedelta(days=6):%Y-%m-%d}"
|
106 |
+
next7days_weather=pd.read_csv('https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/Beijing/next7days?unitGroup=metric&include=days&key=5WNL2M94KKQ4R4F32LFV8DPE4&contentType=csv')
|
107 |
+
#answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/{start_date}/{end_date}?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
|
108 |
+
|
109 |
+
|
110 |
+
df_weather = pd.DataFrame(next7days_weather)
|
111 |
+
df_weather.rename(columns = {"datetime": "date"},
|
112 |
+
inplace = True)
|
113 |
+
df_weather.rename(columns = {"name": "city"},
|
114 |
+
inplace = True)
|
115 |
+
df_weather.rename(columns = {"sealevelpressure": "pressure"},
|
116 |
+
inplace = True)
|
117 |
+
df_weather = df_weather.drop(labels=['city','dew','precip','tempmax','pressure','tempmin','temp','feelslikemax','feelslikemin','feelslike','precipprob','precipcover','snow','snowdepth','cloudcover','severerisk','moonphase','preciptype','sunrise','sunset','conditions','description','icon','stations'], axis=1) #ε ι€δΈη¨ηε
|
118 |
+
|
119 |
+
return df_weather
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
def get_weather_df(data):
|
122 |
col_names = [
|
|
|
157 |
|
158 |
return new_data
|
159 |
|
|
|
|
|
|
|
|
|
160 |
|
161 |
+
|
162 |
+
def get_aplevel(temps:np.ndarray) -> list:
|
163 |
boundary_list = np.array([0, 50, 100, 150, 200, 300]) # assert temps.shape == [x, 1]
|
164 |
redf = np.logical_not(temps<=boundary_list) # temps.shape[0] x boundary_list.shape[0] ndarray
|
165 |
hift = np.concatenate((np.roll(redf, -1)[:, :-1], np.full((temps.shape[0], 1), False)), axis = 1)
|
166 |
cat = np.nonzero(np.not_equal(redf,hift))
|
167 |
|
|
|
|
|
|
|
|
|
168 |
air_pollution_level = ['Good', 'Moderate', 'Unhealthy for sensitive Groups','Unhealthy' ,'Very Unhealthy', 'Hazardous']
|
169 |
+
level = [air_pollution_level[el] for el in cat[1]]
|
170 |
+
return level
|
171 |
+
|
172 |
+
def timestamp_2_time(x):
|
173 |
+
dt_obj = datetime.datetime.strptime(str(x), '%Y-%m-%d')
|
174 |
+
dt_obj = dt_obj.timestamp() * 1000
|
175 |
+
return int(dt_obj)
|