Upload 5 files
Browse files- app.py +16 -0
- best_model.h5 +3 -0
- eda.py +17 -0
- prediction.py +46 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import streamlit as st
|
3 |
+
import eda
|
4 |
+
import prediction
|
5 |
+
|
6 |
+
page = st.sidebar.selectbox("choose page: ", ("Home page","Data exploration","Data Prediction"))
|
7 |
+
|
8 |
+
if page == "Home page":
|
9 |
+
st.title("ASL detection")
|
10 |
+
st.write("Name: Dicky Gabriel")
|
11 |
+
st.write("Batch: SBY-002")
|
12 |
+
st.write("Objective : Predict ASL image")
|
13 |
+
elif page == "Data exploration":
|
14 |
+
eda.run()
|
15 |
+
else:
|
16 |
+
prediction.run()
|
best_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37a2d5fe1e713c696bb59589651fdc7225cf8da24093378107afc1e692f5ddfe
|
3 |
+
size 228635520
|
eda.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import seaborn as sns
|
6 |
+
from matplotlib.ticker import MultipleLocator
|
7 |
+
|
8 |
+
def run():
|
9 |
+
st.title("ASL image Prediction")
|
10 |
+
|
11 |
+
st.subheader("Analysis Data for ASL")
|
12 |
+
|
13 |
+
st.write("This page made by Dicky Gabriel")
|
14 |
+
st.markdown("---")
|
15 |
+
|
16 |
+
if __name__ == "__main__":
|
17 |
+
run()
|
prediction.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import tensorflow as tf
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
def run():
|
7 |
+
# Load the saved model
|
8 |
+
model = tf.keras.models.load_model("best_model.h5")
|
9 |
+
|
10 |
+
# Define the label names
|
11 |
+
label_names = ['a', 'b', 'c', 'd', 'e', 'del', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'not', 'o',
|
12 |
+
'p', 'q', 'r', 's', 't', 'space', 'u', 'v', 'w', 'x', 'y', 'z']
|
13 |
+
|
14 |
+
# Define the Streamlit app
|
15 |
+
st.title("ASL image Prediction")
|
16 |
+
st.write("Choose an image to classify.")
|
17 |
+
|
18 |
+
# Allow the user to select an image file
|
19 |
+
uploaded_file = st.file_uploader("Choose an image file", type=["jpg", "jpeg", "png"])
|
20 |
+
|
21 |
+
if uploaded_file is not None:
|
22 |
+
# Load the image using TensorFlow
|
23 |
+
img = tf.keras.utils.load_img(uploaded_file, target_size=(150, 150, 3))
|
24 |
+
|
25 |
+
# Convert the PIL.Image.Image object to a NumPy array
|
26 |
+
x = tf.keras.utils.img_to_array(img)
|
27 |
+
|
28 |
+
# Expand the array to add a batch dimension
|
29 |
+
x = np.expand_dims(x, axis=0)
|
30 |
+
|
31 |
+
# Normalize the image data
|
32 |
+
x = x / 255.0
|
33 |
+
|
34 |
+
# Make the prediction using the loaded model
|
35 |
+
y_pred = model.predict(x)
|
36 |
+
|
37 |
+
# Get the index of the predicted class with the highest probability
|
38 |
+
class_idx = np.argmax(y_pred, axis=1)[0]
|
39 |
+
|
40 |
+
# Display the predicted class label and image to the user
|
41 |
+
st.write(f"Detection for uploaded image: {label_names[class_idx]}")
|
42 |
+
st.image(img, caption=f"{label_names[class_idx]}", use_column_width=True)
|
43 |
+
|
44 |
+
|
45 |
+
if __name__=="__main__":
|
46 |
+
run()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit==1.19.0
|
2 |
+
numpy==1.23.5
|
3 |
+
tensorflow==2.12.0
|
4 |
+
opencv-python==4.5.4.58
|