Spaces:
Runtime error
Runtime error
import gradio as gr | |
import joblib | |
import cv2 | |
import numpy as np | |
from PIL import Image | |
from tensorflow.keras.models import load_model | |
# Define paths to models and load the scaler | |
model_paths = { | |
"Regressor_decision_tree": "multioutput_regressor_decision_tree.joblib", | |
"Regressor_ridge": "regressor_ridge.joblib", | |
"Regressor_elastic_net": "elastic_net_model.joblib", | |
"NN_6_Layers": "NN_Layers_6.keras", | |
"CNN": "cnn_model_bigger.keras", | |
"CNN_with_reductions": "cnn_model_bigger_with_reductions.keras" | |
} | |
scaler = joblib.load("scaler.joblib") | |
# Function to load models based on file extension | |
def load_model_by_type(path): | |
if path.endswith('.joblib'): | |
return joblib.load(path) | |
elif path.endswith('.keras'): | |
return load_model(path) | |
else: | |
raise ValueError(f"Unsupported file extension for file {path}") | |
# Load models with appropriate method | |
models = {name: load_model_by_type(path) for name, path in model_paths.items()} | |
def detect_objects(image, model_name): | |
model = models[model_name] | |
# Assuming Gradio passes image as a numpy array and checking if conversion is needed | |
if image.ndim == 3 and image.shape[2] == 3: # If the image is RGB | |
image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # Convert to grayscale using OpenCV | |
else: | |
image_gray = image # Use the image as is if already grayscale | |
# Check if the model requires CNN specific preprocessing | |
if model_name in ["CNN", "CNN_with_reductions"]: | |
image_processed = np.array(image_gray) | |
image_processed = image_processed.reshape(1, image_gray.shape[0], image_gray.shape[1], 1) | |
image_processed = image_processed.astype('float32') | |
image_processed /= 255 # Normalize pixel values | |
else: | |
# Assuming other models might expect flattened, scaled input | |
image_processed = image_gray.flatten().reshape(1, -1) | |
image_processed = scaler.transform(image_processed) | |
# Make prediction | |
predictions = model.predict(image_processed) | |
x, y, width, height = predictions[0] | |
# Draw bounding box on a copy of the original image (converted back to RGB for color drawing) | |
original_image_rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Ensure image is in RGB | |
cv2.rectangle(original_image_rgb, (int(x), int(y)), (int(x + width), int(y + height)), (0, 255, 0), 2) | |
return Image.fromarray(original_image_rgb) | |
# Gradio interface setup | |
iface = gr.Interface( | |
fn=detect_objects, | |
inputs=[gr.components.Image(), gr.components.Dropdown(list(model_paths.keys()))], | |
outputs=gr.components.Image(), | |
title="Object Detection", | |
description="Select a model and upload an image to detect objects." | |
) | |
iface.launch(show_error=True, share=True, debug=True) | |