text
stringlengths 0
4.99k
|
---|
\"wv (m/s)\", |
\"max. wv (m/s)\", |
\"wd (deg)\", |
] |
colors = [ |
\"blue\", |
\"orange\", |
\"green\", |
\"red\", |
\"purple\", |
\"brown\", |
\"pink\", |
\"gray\", |
\"olive\", |
\"cyan\", |
] |
date_time_key = \"Date Time\" |
def show_raw_visualization(data): |
time_data = data[date_time_key] |
fig, axes = plt.subplots( |
nrows=7, ncols=2, figsize=(15, 20), dpi=80, facecolor=\"w\", edgecolor=\"k\" |
) |
for i in range(len(feature_keys)): |
key = feature_keys[i] |
c = colors[i % (len(colors))] |
t_data = data[key] |
t_data.index = time_data |
t_data.head() |
ax = t_data.plot( |
ax=axes[i // 2, i % 2], |
color=c, |
title=\"{} - {}\".format(titles[i], key), |
rot=25, |
) |
ax.legend([titles[i]]) |
plt.tight_layout() |
show_raw_visualization(df) |
png |
This heat map shows the correlation between different features. |
def show_heatmap(data): |
plt.matshow(data.corr()) |
plt.xticks(range(data.shape[1]), data.columns, fontsize=14, rotation=90) |
plt.gca().xaxis.tick_bottom() |
plt.yticks(range(data.shape[1]), data.columns, fontsize=14) |
cb = plt.colorbar() |
cb.ax.tick_params(labelsize=14) |
plt.title(\"Feature Correlation Heatmap\", fontsize=14) |
plt.show() |
show_heatmap(df) |
png |
Data Preprocessing |
Here we are picking ~300,000 data points for training. Observation is recorded every 10 mins, that means 6 times per hour. We will resample one point per hour since no drastic change is expected within 60 minutes. We do this via the sampling_rate argument in timeseries_dataset_from_array utility. |
We are tracking data from past 720 timestamps (720/6=120 hours). This data will be used to predict the temperature after 72 timestamps (72/6=12 hours). |
Since every feature has values with varying ranges, we do normalization to confine feature values to a range of [0, 1] before training a neural network. We do this by subtracting the mean and dividing by the standard deviation of each feature. |
71.5 % of the data will be used to train the model, i.e. 300,693 rows. split_fraction can be changed to alter this percentage. |
The model is shown data for first 5 days i.e. 720 observations, that are sampled every hour. The temperature after 72 (12 hours * 6 observation per hour) observation will be used as a label. |
split_fraction = 0.715 |
train_split = int(split_fraction * int(df.shape[0])) |
step = 6 |
past = 720 |
future = 72 |
learning_rate = 0.001 |
batch_size = 256 |
epochs = 10 |
def normalize(data, train_split): |
data_mean = data[:train_split].mean(axis=0) |
data_std = data[:train_split].std(axis=0) |
return (data - data_mean) / data_std |
We can see from the correlation heatmap, few parameters like Relative Humidity and Specific Humidity are redundant. Hence we will be using select features, not all. |
print( |
\"The selected parameters are:\", |
\", \".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]), |
) |
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]] |
features = df[selected_features] |
features.index = df[date_time_key] |
features.head() |
features = normalize(features.values, train_split) |