Spaces:
Sleeping
Sleeping
File size: 5,635 Bytes
60af537 57419d8 9b2c5e1 57419d8 d4cb7c6 d3127bb 57419d8 6c34a8c c5224aa 57419d8 d3127bb c3d8605 f7b8e0e c3d8605 aca98af 90ff42e aca98af 90ff42e f504910 aca98af 740153b aca98af 1cc8cac aca98af 1cc8cac aca98af 90ff42e 920afea aca98af 66370da f504910 aca98af 26e2298 920afea e2db038 920afea f504910 d3127bb 20ca536 d3127bb fa09b4a d3127bb f504910 fa09b4a 1a11002 20ca536 f504910 63152db 20ca536 7991981 20ca536 70f0887 f504910 20ca536 f504910 723358f 1a11002 980264d fa09b4a 980264d 1a11002 fa09b4a 1a11002 fa09b4a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import numpy as np
import cv2
import os
from PIL import Image
import torchvision.transforms as transforms
import gradio as gr
from yolov5 import xai_yolov5
from yolov8 import xai_yolov8s
"""
def process_image(image, yolo_versions=["yolov5"]):
image = np.array(image)
image = cv2.resize(image, (640, 640))
result_images = []
for yolo_version in yolo_versions:
if yolo_version == "yolov5":
result_images.append(xai_yolov5(image))
elif yolo_version == "yolov8s":
result_images.append(xai_yolov8s(image))
else:
result_images.append((Image.fromarray(image), f"{yolo_version} not yet implemented."))
return result_images
"""
sample_images = {
"Sample 1": os.path.join(os.getcwd(), "data/xai/sample1.jpeg"),
"Sample 2": os.path.join(os.getcwd(), "data/xai/sample2.jpg"),
}
def load_sample_image(sample_name):
image_path = sample_images.get(sample_name)
if image_path and os.path.exists(image_path):
return Image.open(image_path)
return None
default_sample_image = load_sample_image("Sample 1")
"""
with gr.Blocks() as interface:
gr.Markdown("# XAI: Upload an image to visualize object detection of your models..")
gr.Markdown("Upload an image or select a sample image to visualize object detection.")
with gr.Row():
uploaded_image = gr.Image(type="pil", label="Upload an Image")
sample_selection = gr.Dropdown(
choices=list(sample_images.keys()),
label="Select a Sample Image",
type="value",
)
sample_display = gr.Image(label="Sample Image Preview", value=default_sample_image)
sample_selection.change(fn=load_sample_image, inputs=sample_selection, outputs=sample_display)
selected_models = gr.CheckboxGroup(
choices=["yolov5", "yolov8s"],
value=["yolov5"],
label="Select Model(s)",
)
result_gallery = gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500)
gr.Button("Run").click(
fn=process_image,
inputs=[uploaded_image, selected_models],
outputs=result_gallery,
)
"""
def load_sample_image(choice):
if choice in sample_images:
image_path = sample_images[choice]
return cv2.imread(image_path)[:, :, ::-1]
else:
raise ValueError("Invalid sample selection.")
def process_image(sample_choice, uploaded_image, yolo_versions=["yolov5"]):
if uploaded_image is not None:
image = uploaded_image # Use the uploaded image
else:
# Otherwise, use the selected sample image
image = load_sample_image(sample_choice)
image = np.array(image)
image = cv2.resize(image, (640, 640))
result_images = []
for yolo_version in yolo_versions:
if yolo_version == "yolov5":
result_images.append(xai_yolov5(image))
elif yolo_version == "yolov8s":
result_images.append(xai_yolov8s(image))
else:
result_images.append((Image.fromarray(image), f"{yolo_version} not yet implemented."))
return result_images
"""
import gradio as gr
with gr.Blocks() as interface:
gr.Markdown("# XAI: Visualize Object Detection of Your Models")
gr.Markdown("Select a sample image to visualize object detection.")
default_sample = "Sample 1"
with gr.Row():
sample_selection = gr.Radio(
choices=list(sample_images.keys()),
label="Select a Sample Image",
type="value",
value=default_sample, # Set default selection
)
sample_display = gr.Image(
value=load_sample_image(default_sample),
label="Selected Sample Image",
)
sample_selection.change(
fn=load_sample_image,
inputs=sample_selection,
outputs=sample_display,
)
selected_models = gr.CheckboxGroup(
choices=["yolov5", "yolov8s"],
value=["yolov5"],
label="Select Model(s)",
)
result_gallery = gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500)
gr.Button("Run").click(
fn=process_image,
inputs=[sample_selection, selected_models],
outputs=result_gallery,
)
interface.launch()
"""
with gr.Blocks() as interface:
gr.Markdown("# XAI: Visualize Object Detection of Your Models")
gr.Markdown("Select a sample image to visualize object detection.")
default_sample = "Sample 1"
with gr.Row():
sample_selection = gr.Radio(
choices=list(sample_images.keys()),
label="Select a Sample Image",
type="value",
value=default_sample, # Set default selection
)
sample_display = gr.Image(
value=load_sample_image(default_sample),
label="Selected Sample Image",
)
sample_selection.change(
fn=load_sample_image,
inputs=sample_selection,
outputs=sample_display,
)
# Adding an upload placeholder
gr.Markdown("**Or upload your own image:**")
upload_image = gr.Image(
label="Upload an Image",
type="filepath", # Corrected type for file path compatibility
)
selected_models = gr.CheckboxGroup(
choices=["yolov5", "yolov8s"],
value=["yolov5"],
label="Select Model(s)",
)
result_gallery = gr.Gallery(label="Results", elem_id="gallery", rows=2, height=500)
gr.Button("Run").click(
fn=process_image,
inputs=[sample_selection, upload_image, selected_models], # Include both options
outputs=result_gallery,
)
interface.launch(share=True) |