jaydemirandilla commited on
Commit
1723e60
·
1 Parent(s): 2ef05cf

Upload denseNet121.py

Browse files
Files changed (1) hide show
  1. denseNet121.py +67 -0
denseNet121.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ import tensorflow as tf
4
+ from PIL import Image, ImageDraw, ImageFont
5
+
6
+ # Function to load the modified model without recompiling
7
+ def load_modified_model(model_path):
8
+ return tf.keras.models.load_model(model_path)
9
+
10
+ # Load the trained model
11
+ print("Loading model...")
12
+ model = load_modified_model('denseNet121.h5')
13
+ print("Model loaded successfully.")
14
+
15
+ # Function to classify food vs. non-food image using the loaded model
16
+ def classify_food_vs_nonfood(image):
17
+ try:
18
+ # Preprocess image
19
+ image_size = (224, 224)
20
+ image = image.resize(image_size)
21
+ image_np = np.array(image) / 255.0
22
+ image_np_expanded = np.expand_dims(image_np, axis=0)
23
+
24
+ # Make prediction
25
+ prediction = model.predict(image_np_expanded)
26
+ final_prediction = np.argmax(prediction[0])
27
+
28
+ # Display result
29
+ results = {0: 'Food', 1: 'Non Food'}
30
+ label = results[final_prediction]
31
+
32
+ # Create a draw object
33
+ draw = ImageDraw.Draw(image)
34
+
35
+ # Specify font and size
36
+ font = ImageFont.load_default()
37
+
38
+ # Get text size
39
+ text_font = ImageFont.truetype("Hack-Regular.ttf", 24)
40
+ text_bbox = draw.textbbox((0, 0), label, font=text_font)
41
+ text_size = (text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1])
42
+
43
+ # Calculate text position
44
+ text_position = ((image_size[0] - text_size[0]) // 2, 10)
45
+
46
+ # Add text to the image
47
+ draw.text(text_position, label, fill=(255, 0, 0), font=text_font)
48
+
49
+ # Return modified image
50
+ return image
51
+ except Exception as e:
52
+ print("Error processing image:", e)
53
+
54
+ # Define inputs for Gradio interface
55
+ image_input = gr.inputs.Image(shape=(224, 224), type="pil")
56
+
57
+ # Define example images as file paths
58
+ ex_image_paths = ['image_1.jpeg', 'image_2.jpeg', 'image_3.jpeg', 'image_4.jpg', 'image_5.jpg']
59
+
60
+ # Launch Gradio interface with example images
61
+ food_vs_nonfood_interface = gr.Interface(classify_food_vs_nonfood,
62
+ inputs=image_input,
63
+ outputs="image",
64
+ title="Food vs NonFood Classifier",
65
+ description="Upload an image to classify whether it's food or non-food.",
66
+ examples=ex_image_paths)
67
+ food_vs_nonfood_interface.launch(inline=False)