Spaces:
Sleeping
Sleeping
Upload 6 files
Browse files- app2.py +105 -0
- logoAI.png +0 -0
- model.pt +3 -0
- requirements.txt +5 -0
- scaler1.pkl +3 -0
- scaler2.pkl +3 -0
app2.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
import joblib
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
import shap
|
7 |
+
import pandas as pd
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
|
10 |
+
# Load the model and scalers
|
11 |
+
@st.cache_resource
|
12 |
+
def load_model_and_scalers():
|
13 |
+
loaded_model = torch.jit.load('model.pt')
|
14 |
+
loaded_scaler1 = joblib.load('scaler1.pkl')
|
15 |
+
loaded_scaler2 = joblib.load('scaler2.pkl')
|
16 |
+
return loaded_model, loaded_scaler1, loaded_scaler2
|
17 |
+
|
18 |
+
loaded_model, loaded_scaler1, loaded_scaler2 = load_model_and_scalers()
|
19 |
+
|
20 |
+
# Add this new function to load the training data
|
21 |
+
@st.cache_data
|
22 |
+
def load_training_data():
|
23 |
+
data = pd.read_excel("peak5.xlsx")
|
24 |
+
X = data.iloc[:, 0:7].to_numpy()
|
25 |
+
return X, data.columns[:7].tolist()
|
26 |
+
|
27 |
+
X_train, feature_names = load_training_data()
|
28 |
+
|
29 |
+
# Create a wrapper function for SHAP
|
30 |
+
def f(X):
|
31 |
+
with torch.no_grad():
|
32 |
+
X_tensor = torch.tensor(X, dtype=torch.float32)
|
33 |
+
output = loaded_model(X_tensor).numpy()
|
34 |
+
return loaded_scaler2.inverse_transform(output)
|
35 |
+
|
36 |
+
def add_logo(logo_path, size=(200, 150)):
|
37 |
+
logo = Image.open('logoAI.png')
|
38 |
+
logo = logo.resize(size)
|
39 |
+
st.image(logo, use_column_width=False)
|
40 |
+
|
41 |
+
st.title('Explainable AI (XAI) for Predicting Peak Particle Velocity in Pile Driving on Bangkok Subsoil')
|
42 |
+
add_logo("logoAI.png")
|
43 |
+
|
44 |
+
# Create input fields
|
45 |
+
st.header('Enter Input Values:')
|
46 |
+
pile_width = st.number_input('Pile width (mm)', value=300.0,min_value=260.0,max_value=800.0)
|
47 |
+
pile_length = st.number_input('Pile length (m)', value=18.0,min_value=15.0,max_value=20.0)
|
48 |
+
weight = st.number_input('Weight (ton)', value=4.2,min_value=3.0,max_value=6.0)
|
49 |
+
drop_height = st.number_input('Drop height (m)', value=0.5)
|
50 |
+
distance = st.number_input('Distance (m)', min_value=3.0,value=9.0)
|
51 |
+
location = st.selectbox('Location', ['On ground', 'On foundation', 'On building'], index=0)
|
52 |
+
trigger = st.selectbox('Trigger', ['Longitudinal', 'Transverse', 'Vertical'], index=0)
|
53 |
+
|
54 |
+
# Convert location and trigger to numerical values
|
55 |
+
location_value = ['On ground', 'On foundation', 'On building'].index(location) + 1
|
56 |
+
trigger_value = ['Longitudinal', 'Transverse', 'Vertical'].index(trigger) + 1
|
57 |
+
|
58 |
+
# Button to make prediction
|
59 |
+
if st.button('Make Prediction'):
|
60 |
+
# Prepare input data
|
61 |
+
input = np.array([pile_width, pile_length, weight, drop_height, distance, location_value, trigger_value])
|
62 |
+
inputx = np.reshape(input, (1, 7))
|
63 |
+
|
64 |
+
# Transform input data
|
65 |
+
X_test1 = loaded_scaler1.transform(inputx).astype(np.float32)
|
66 |
+
X_test1 = torch.from_numpy(X_test1)
|
67 |
+
|
68 |
+
# Make prediction
|
69 |
+
with torch.no_grad():
|
70 |
+
test_outputs = loaded_model(X_test1)
|
71 |
+
test_outputs2 = loaded_scaler2.inverse_transform(test_outputs.cpu())
|
72 |
+
|
73 |
+
# Display results
|
74 |
+
st.subheader('Prediction Results:')
|
75 |
+
st.write(f"Peak Particle Velocity: {test_outputs2[0][0]:.2f} mm/s")
|
76 |
+
|
77 |
+
# Add SHAP explanation
|
78 |
+
st.subheader('Explanation of Prediction:')
|
79 |
+
|
80 |
+
# Create SHAP explainer
|
81 |
+
explainer = shap.KernelExplainer(f, shap.sample(loaded_scaler1.transform(X_train), 100))
|
82 |
+
shap_values = explainer.shap_values(X_test1.numpy())
|
83 |
+
|
84 |
+
# Create SHAP waterfall plot
|
85 |
+
shap_values_single = shap_values[0].flatten()
|
86 |
+
expected_value = explainer.expected_value[0]
|
87 |
+
|
88 |
+
# Convert feature values to strings
|
89 |
+
feature_values = [f"{x:.1f}" for x in inputx[0]]
|
90 |
+
|
91 |
+
explanation = shap.Explanation(
|
92 |
+
values=shap_values_single,
|
93 |
+
base_values=expected_value,
|
94 |
+
data=feature_values,
|
95 |
+
feature_names=feature_names
|
96 |
+
)
|
97 |
+
|
98 |
+
fig, ax = plt.subplots()
|
99 |
+
shap.plots.waterfall(explanation, show=False)
|
100 |
+
st.pyplot(fig)
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
st.sidebar.header('About')
|
105 |
+
st.sidebar.info('This app uses a pre-trained PyTorch model to predict peak particle velocity based on user input. It is specifically designed for Bangkok sub-soil conditions.\n paper:https://arxiv.org/abs/2409.05918')
|
logoAI.png
ADDED
![]() |
model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10fd93e7e130f38f36325646c41d7f6f72e9907041264de10a531084762c4e42
|
3 |
+
size 117093
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
joblib
|
3 |
+
gradio
|
4 |
+
scikit-learn
|
5 |
+
pillow
|
scaler1.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d7d2394b06f07aced32be1a4c7bb7839693bbe915ff9fb044c39ea453a8512e
|
3 |
+
size 767
|
scaler2.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d487a9f6ac4e940f5e747c3adae72a467c2e95049293456b7de6f0f21a684028
|
3 |
+
size 623
|