dperales commited on
Commit
1715696
·
1 Parent(s): 3f3d080

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -24
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import pandas as pd
3
  import numpy as np
 
4
  import matplotlib.pyplot as plt
5
  import matplotlib as mpl
6
  import pycaret
@@ -21,8 +22,8 @@ hide_streamlit_style = """
21
  st.markdown(hide_streamlit_style, unsafe_allow_html=True)
22
 
23
  with st.sidebar:
24
- image = Image.open('./itaca_logo.png')
25
- st.image(image,use_column_width=True)
26
  page = option_menu(menu_title='Menu',
27
  menu_icon="robot",
28
  options=["Clustering Analysis",
@@ -32,6 +33,24 @@ with st.sidebar:
32
  default_index=0
33
  )
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  st.title('ITACA Insurance Core AI Module')
36
 
37
  if page == "Clustering Analysis":
@@ -52,10 +71,9 @@ if page == "Clustering Analysis":
52
  all_files = os.listdir(directory)
53
  # Filter files to only include CSV files
54
  csv_files = [file for file in all_files if file.endswith(".csv")]
55
-
56
  # Select a CSV file from the list
57
  selected_csv = st.selectbox("Select a CSV file from the list", ["None"] + csv_files)
58
-
59
  # Upload the CSV file
60
  uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
61
 
@@ -63,11 +81,6 @@ if page == "Clustering Analysis":
63
  clusteringmodel = ['kmeans', 'ap', 'meanshift', 'sc', 'hclust', 'dbscan', 'optics', 'birch']
64
  selected_model = st.selectbox("Choose a clustering model", clusteringmodel)
65
 
66
- # Define the options for the dropdown list
67
- numclusters = [2, 3, 4, 5, 6]
68
- # selected_clusters = st.selectbox("Choose a number of clusters", numclusters)
69
- selected_clusters = st.slider("Choose a number of clusters", min_value=2, max_value=10, value=4)
70
-
71
  # Read and display the CSV file
72
  if selected_csv != "None" or uploaded_file is not None:
73
  if uploaded_file:
@@ -80,23 +93,51 @@ if page == "Clustering Analysis":
80
  else:
81
  insurance_claims = pd.read_csv(selected_csv)
82
 
83
- s = setup(insurance_claims, session_id = 123)
84
 
85
- exp_clustering = ClusteringExperiment()
 
86
 
87
- # init setup on exp
88
- exp_clustering.setup(insurance_claims, session_id = 123)
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  if st.button("Prediction"):
 
 
 
 
 
 
 
 
 
 
91
  with st.spinner("Analyzing..."):
92
  # train kmeans model
93
  cluster_model = create_model(selected_model, num_clusters = selected_clusters)
94
 
95
  cluster_model_2 = assign_model(cluster_model)
 
 
 
 
 
96
  cluster_model_2
97
 
98
- all_metrics = get_metrics()
99
- all_metrics
100
 
101
  cluster_results = pull()
102
  cluster_results
@@ -137,7 +178,6 @@ elif page == "Anomaly Detection":
137
  all_files = os.listdir(directory)
138
  # Filter files to only include CSV files
139
  csv_files = [file for file in all_files if file.endswith(".csv")]
140
-
141
  # Select a CSV file from the list
142
  selected_csv = st.selectbox("Select a CSV file from the list", ["None"] + csv_files)
143
 
@@ -159,15 +199,24 @@ elif page == "Anomaly Detection":
159
  insurance_claims = pd.read_csv (uploaded_file, sep=delimiter, encoding='latin-1')
160
  else:
161
  insurance_claims = pd.read_csv(selected_csv)
162
-
163
- s = setup(insurance_claims, session_id = 123)
164
-
165
- exp_anomaly = AnomalyExperiment()
166
 
167
- # init setup on exp
168
- exp_anomaly.setup(insurance_claims, session_id = 123)
169
 
170
  if st.button("Prediction"):
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  with st.spinner("Analyzing..."):
172
  # train model
173
  anomaly_model = create_model(selected_model)
@@ -180,5 +229,4 @@ elif page == "Anomaly Detection":
180
 
181
  # plot
182
  plot_model(anomaly_model, plot = 'tsne', display_format = 'streamlit')
183
- plot_model(anomaly_model, plot = 'umap', display_format = 'streamlit')
184
-
 
1
  import os
2
  import pandas as pd
3
  import numpy as np
4
+ import seaborn as sns
5
  import matplotlib.pyplot as plt
6
  import matplotlib as mpl
7
  import pycaret
 
22
  st.markdown(hide_streamlit_style, unsafe_allow_html=True)
23
 
24
  with st.sidebar:
25
+ image = Image.open('itaca_logo.png')
26
+ st.image(image, width=150) #,use_column_width=True)
27
  page = option_menu(menu_title='Menu',
28
  menu_icon="robot",
29
  options=["Clustering Analysis",
 
33
  default_index=0
34
  )
35
 
36
+ # Additional section below the option menu
37
+ # st.markdown("---") # Add a separator line
38
+ st.header("Settings")
39
+
40
+ # Define the options for the dropdown list
41
+ numclusters = [2, 3, 4, 5, 6]
42
+ # selected_clusters = st.selectbox("Choose a number of clusters", numclusters)
43
+ selected_clusters = st.slider("Choose a number of clusters", min_value=2, max_value=10, value=4)
44
+
45
+ p_remove_multicollinearity = st.checkbox("Remove Multicollinearity", value=False)
46
+ p_multicollinearity_threshold = st.slider("Choose multicollinearity thresholds", min_value=0.0, max_value=1.0, value=0.9)
47
+ # p_remove_outliers = st.checkbox("Remove Outliers", value=False)
48
+ # p_outliers_method = st.selectbox ("Choose an Outlier Method", ["iforest", "ee", "lof"])
49
+ p_transformation = st.checkbox("Choose Power Transform", value = False)
50
+ p_normalize = st.checkbox("Choose Normalize", value = False)
51
+ p_pca = st.checkbox("Choose PCA", value = False)
52
+ p_pca_method = st.selectbox ("Choose a PCA Method", ["linear", "kernel", "incremental"])
53
+
54
  st.title('ITACA Insurance Core AI Module')
55
 
56
  if page == "Clustering Analysis":
 
71
  all_files = os.listdir(directory)
72
  # Filter files to only include CSV files
73
  csv_files = [file for file in all_files if file.endswith(".csv")]
 
74
  # Select a CSV file from the list
75
  selected_csv = st.selectbox("Select a CSV file from the list", ["None"] + csv_files)
76
+
77
  # Upload the CSV file
78
  uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
79
 
 
81
  clusteringmodel = ['kmeans', 'ap', 'meanshift', 'sc', 'hclust', 'dbscan', 'optics', 'birch']
82
  selected_model = st.selectbox("Choose a clustering model", clusteringmodel)
83
 
 
 
 
 
 
84
  # Read and display the CSV file
85
  if selected_csv != "None" or uploaded_file is not None:
86
  if uploaded_file:
 
93
  else:
94
  insurance_claims = pd.read_csv(selected_csv)
95
 
96
+ insurance_claims.describe().T
97
 
98
+ cat_col = insurance_claims.select_dtypes(include=['object']).columns
99
+ num_col = insurance_claims.select_dtypes(exclude=['object']).columns
100
 
101
+ # insurance_claims[num_col].hist(bins=15, figsize=(20, 15), layout=(5, 4))
102
+ # Calculate the correlation matrix
103
+ corr_matrix = insurance_claims[num_col].corr()
104
+ # Create a Matplotlib figure
105
+ fig, ax = plt.subplots(figsize=(12, 8))
106
+ # Create a heatmap using seaborn
107
+ sns.heatmap(corr_matrix, annot=True, cmap='coolwarm', fmt='.2f', ax=ax)
108
+ # Set the title for the heatmap
109
+ ax.set_title('Correlation Heatmap')
110
+ # Display the heatmap in Streamlit
111
+ st.pyplot(fig)
112
+
113
+ all_columns = insurance_claims.columns.tolist()
114
+ selected_columns = st.multiselect("Choose columns", all_columns, default=all_columns)
115
 
116
  if st.button("Prediction"):
117
+ insurance_claims = insurance_claims[selected_columns].copy()
118
+
119
+ s = setup(insurance_claims, session_id = 123, remove_multicollinearity=p_remove_multicollinearity, multicollinearity_threshold=p_multicollinearity_threshold,
120
+ # remove_outliers=p_remove_outliers, outliers_method=p_outliers_method,
121
+ transformation=p_transformation,
122
+ normalize=p_normalize, pca=p_pca, pca_method=p_pca_method)
123
+ exp_clustering = ClusteringExperiment()
124
+ # init setup on exp
125
+ exp_clustering.setup(insurance_claims, session_id = 123)
126
+
127
  with st.spinner("Analyzing..."):
128
  # train kmeans model
129
  cluster_model = create_model(selected_model, num_clusters = selected_clusters)
130
 
131
  cluster_model_2 = assign_model(cluster_model)
132
+ # Calculate summary statistics for each cluster
133
+ cluster_summary = cluster_model_2.groupby('Cluster').agg(['count', 'mean', 'median', 'min', 'max',
134
+ 'std', 'var', 'sum', ('quantile_25', lambda x: x.quantile(0.25)),
135
+ ('quantile_75', lambda x: x.quantile(0.75)), 'skew'])
136
+ cluster_summary
137
  cluster_model_2
138
 
139
+ # all_metrics = get_metrics()
140
+ # all_metrics
141
 
142
  cluster_results = pull()
143
  cluster_results
 
178
  all_files = os.listdir(directory)
179
  # Filter files to only include CSV files
180
  csv_files = [file for file in all_files if file.endswith(".csv")]
 
181
  # Select a CSV file from the list
182
  selected_csv = st.selectbox("Select a CSV file from the list", ["None"] + csv_files)
183
 
 
199
  insurance_claims = pd.read_csv (uploaded_file, sep=delimiter, encoding='latin-1')
200
  else:
201
  insurance_claims = pd.read_csv(selected_csv)
 
 
 
 
202
 
203
+ all_columns = insurance_claims.columns.tolist()
204
+ selected_columns = st.multiselect("Choose columns", all_columns, default=all_columns)
205
 
206
  if st.button("Prediction"):
207
+ insurance_claims = insurance_claims[selected_columns].copy()
208
+
209
+ # s = setup(insurance_claims, session_id = 123)
210
+
211
+ s = setup(insurance_claims, session_id = 123, remove_multicollinearity=p_remove_multicollinearity, multicollinearity_threshold=p_multicollinearity_threshold,
212
+ # remove_outliers=p_remove_outliers, outliers_method=p_outliers_method,
213
+ transformation=p_transformation,
214
+ normalize=p_normalize, pca=p_pca, pca_method=p_pca_method)
215
+
216
+ exp_anomaly = AnomalyExperiment()
217
+ # init setup on exp
218
+ exp_anomaly.setup(insurance_claims, session_id = 123)
219
+
220
  with st.spinner("Analyzing..."):
221
  # train model
222
  anomaly_model = create_model(selected_model)
 
229
 
230
  # plot
231
  plot_model(anomaly_model, plot = 'tsne', display_format = 'streamlit')
232
+ plot_model(anomaly_model, plot = 'umap', display_format = 'streamlit')