Upload 18 files
Browse files- .gitattributes +1 -0
- Xeption_fruits.keras +3 -0
- app.py +43 -0
- images/ap1.jpeg +0 -0
- images/ap2.jpeg +0 -0
- images/ap3.jpeg +0 -0
- images/ba1.jpeg +0 -0
- images/ba2.jpeg +0 -0
- images/ba3.jpeg +0 -0
- images/pi1.jpeg +0 -0
- images/pi2.jpeg +0 -0
- images/pi3.jpeg +0 -0
- images/st1.jpeg +0 -0
- images/st2.jpeg +0 -0
- images/st3.jpeg +0 -0
- images/wa1.jpeg +0 -0
- images/wa2.jpeg +0 -0
- images/wa3.jpeg +0 -0
- requirements.txt +1 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Xeption_fruits.keras filter=lfs diff=lfs merge=lfs -text
|
Xeption_fruits.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71b9c47c37728a470dee0469d5fb29d39e1f1814c3554c04ef6a33b24d573cb3
|
3 |
+
size 250609323
|
app.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import tensorflow as tf
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
model_path = "p_inference_fruits/Xeption_fruits.keras"
|
7 |
+
model = tf.keras.models.load_model(model_path)
|
8 |
+
|
9 |
+
# Define the core prediction function
|
10 |
+
def predict_fruit(image):
|
11 |
+
# Preprocess image
|
12 |
+
print(type(image))
|
13 |
+
image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image
|
14 |
+
image = image.resize((150, 150)) #resize the image to 28x28 and converts it to gray scale
|
15 |
+
image = np.array(image)
|
16 |
+
image = np.expand_dims(image, axis=0) # same as image[None, ...]
|
17 |
+
|
18 |
+
# Predict
|
19 |
+
prediction = model.predict(image)
|
20 |
+
|
21 |
+
# No need to apply sigmoid, as the output layer already uses softmax
|
22 |
+
# Convert the probabilities to rounded values
|
23 |
+
prediction = np.round(prediction, 3)
|
24 |
+
|
25 |
+
# Separate the probabilities for each class
|
26 |
+
p_apple = prediction[0][0] # Probability for class 'articuno'
|
27 |
+
p_banana = prediction[0][1] # Probability for class 'moltres'
|
28 |
+
p_pinenapple = prediction[0][2] # Probability for class 'zapdos'
|
29 |
+
p_strawberries = prediction[0][3]
|
30 |
+
p_watermelon = prediction[0][4]
|
31 |
+
|
32 |
+
return {'apple': p_apple, 'banana': p_banana, 'pinenapple': p_pinenapple, 'strawberries': p_strawberries, 'watermelon': p_watermelon}
|
33 |
+
|
34 |
+
# Create the Gradio interface
|
35 |
+
input_image = gr.Image()
|
36 |
+
iface = gr.Interface(
|
37 |
+
fn=predict_fruit,
|
38 |
+
inputs=input_image,
|
39 |
+
outputs=gr.Label(),
|
40 |
+
examples=["p_inference_fruits/images/ap1.jpeg", "p_inference_fruits/images/ap2.jpeg", "p_inference_fruits/images/ap3.jpeg", "p_inference_fruits/images/ba1.jpeg", "p_inference_fruits/images/ba2.jpeg", "p_inference_fruits/images/ba3.jpeg", "p_inference_fruits/images/pi1.jpeg","p_inference_fruits/images/pi2.jpeg","p_inference_fruits/images/pi3.jpeg","p_inference_fruits/images/st1.jpeg", "p_inference_fruits/images/st2.jpeg", "p_inference_fruits/images/st3.jpeg","p_inference_fruits/images/wa1.jpeg","p_inference_fruits/images/wa2.jpeg","p_inference_fruits/images/wa3.jpeg"],
|
41 |
+
description="TEST.")
|
42 |
+
|
43 |
+
iface.launch()
|
images/ap1.jpeg
ADDED
images/ap2.jpeg
ADDED
images/ap3.jpeg
ADDED
images/ba1.jpeg
ADDED
images/ba2.jpeg
ADDED
images/ba3.jpeg
ADDED
images/pi1.jpeg
ADDED
images/pi2.jpeg
ADDED
images/pi3.jpeg
ADDED
images/st1.jpeg
ADDED
images/st2.jpeg
ADDED
images/st3.jpeg
ADDED
images/wa1.jpeg
ADDED
images/wa2.jpeg
ADDED
images/wa3.jpeg
ADDED
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
tensorflow
|