Geetansh
commited on
Commit
·
1c15775
1
Parent(s):
5cb39be
Model_demo_done
Browse files- app.py +61 -4
- examples/0.jpg +0 -0
- examples/1.jpg +0 -0
- examples/2_high_contrast.jpg +0 -0
- examples/4.jpg +0 -0
- examples/6.jpg +0 -0
- examples/7.jpg +0 -0
- examples/8.jpg +0 -0
- examples/8_high_contrast.jpg +0 -0
- model_final.keras +0 -0
- pipeline_1.pkl +3 -0
app.py
CHANGED
@@ -1,7 +1,64 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import tensorflow as tf
|
4 |
+
from tensorflow.keras.models import load_model
|
5 |
+
import pickle
|
6 |
+
from PIL import Image
|
7 |
+
import pandas as pd
|
8 |
|
9 |
+
# Load the trained model
|
10 |
+
model = load_model("./model_final.keras")
|
11 |
|
12 |
+
# Load the fitted pipeline
|
13 |
+
with open("pipeline_1.pkl", "rb") as f:
|
14 |
+
pipeline_1 = pickle.load(f)
|
15 |
+
|
16 |
+
def preprocess_and_predict(image):
|
17 |
+
"""
|
18 |
+
Preprocess the input image using the pipeline and make a prediction.
|
19 |
+
"""
|
20 |
+
# Resize and convert the input image to grayscale (28x28)
|
21 |
+
image = image.resize((28, 28)).convert("L")
|
22 |
+
|
23 |
+
# Flatten the image to a 784-length vector
|
24 |
+
image_array = np.array(image).reshape(1, -1).astype(np.float32)
|
25 |
+
|
26 |
+
# Convert the flattened array to a DataFrame (with appropriate column names)
|
27 |
+
image_df = pd.DataFrame(image_array, columns=[f"pixel{i}" for i in range(784)])
|
28 |
+
|
29 |
+
# Transform the input using the fitted pipeline
|
30 |
+
image_array_transformed = pipeline_1.transform(image_df).reshape(1,-1) #reshape to [[]] because tensorflow accepts matrices
|
31 |
+
|
32 |
+
# Make predictions with the model
|
33 |
+
predictions = model.predict(image_array_transformed)
|
34 |
+
|
35 |
+
# Get the predicted digit (the class with the highest probability)
|
36 |
+
predicted_digit = np.argmax(predictions, axis=1)[0]
|
37 |
+
|
38 |
+
return f"Predicted Digit: {predicted_digit}"
|
39 |
+
|
40 |
+
# Define sample examples with paths to example images
|
41 |
+
examples = [
|
42 |
+
["./examples/0.jpg"],
|
43 |
+
["./examples/1.jpg"],
|
44 |
+
["./examples/2_high_contrast.jpg"],
|
45 |
+
["./examples/4.jpg"],
|
46 |
+
["./examples/6.jpg"],
|
47 |
+
["./examples/7.jpg"],
|
48 |
+
["./examples/8_high_contrast.jpg"],
|
49 |
+
["./examples/8.jpg"]
|
50 |
+
]
|
51 |
+
|
52 |
+
# Define Gradio interface
|
53 |
+
demo = gr.Interface(
|
54 |
+
fn=preprocess_and_predict, # Function to be called
|
55 |
+
inputs=gr.Image(type="pil"), # Input type: Image
|
56 |
+
outputs="text", # Output type: Text
|
57 |
+
title="MNIST Digit Classifier", # Title
|
58 |
+
description="Upload an image of a digit (0-9) from the MNIST dataset (https://huggingface.co/datasets/ylecun/mnist) [The model will perform poorly for custom images bcz it has only been trained using \"as is\" images from MNIST i.e\n(i) pretty much centered\n (ii) 28x28 pixels\n (iii) perfectly black background\n (iv) white font color images. A custom image will have to be resized (to be 28x28) and still might not have the above things and thus, the model performs poorly], and the model will predict the digit.",
|
59 |
+
examples=examples # Add sample examples
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
# Launch the app
|
64 |
+
demo.launch()
|
examples/0.jpg
ADDED
![]() |
examples/1.jpg
ADDED
![]() |
examples/2_high_contrast.jpg
ADDED
![]() |
examples/4.jpg
ADDED
![]() |
examples/6.jpg
ADDED
![]() |
examples/7.jpg
ADDED
![]() |
examples/8.jpg
ADDED
![]() |
examples/8_high_contrast.jpg
ADDED
![]() |
model_final.keras
ADDED
Binary file (336 kB). View file
|
|
pipeline_1.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:336f8630e221b92815c405abe5cb61d0404bd26f79d1d56bafca881aa35fa80c
|
3 |
+
size 34094
|