File size: 6,474 Bytes
6fb2f90
449b9ac
dbd2a18
9b2c5e1
aed0d09
dbd2a18
 
d00769c
 
6492b12
aed0d09
24f4b49
f7b8e0e
dbd2a18
c3d8605
dbd2a18
aca98af
6492b12
90ff42e
 
 
aca98af
 
1e3b3d6
6492b12
8978982
fa09b4a
8978982
fa09b4a
8978982
dbd2a18
8978982
d00769c
 
d3127bb
dbd2a18
8978982
d3127bb
 
1e3b3d6
d00769c
 
d3127bb
8978982
449b9ac
f504910
449b9ac
061bb0f
8978982
061bb0f
7ea4790
8978982
6492b12
 
 
 
 
 
 
8978982
6492b12
dbd2a18
24f7ea3
188ba59
355d6d4
 
 
 
 
a3ee2cf
188ba59
355d6d4
 
 
111100d
634cc88
a3ee2cf
355d6d4
111100d
dbd2a18
3f486c1
 
 
 
 
 
 
 
6492b12
8978982
7838123
dbd2a18
 
8978982
408a665
 
 
 
dbd2a18
b30ea65
dbd2a18
ad84640
 
 
 
dbd2a18
 
d00769c
dbd2a18
 
 
 
 
 
 
 
6492b12
dbd2a18
 
 
8978982
634cc88
dbd2a18
 
4329d33
9143e1a
4329d33
54af7c0
dbd2a18
13264bc
6492b12
 
8978982
408a665
 
 
 
 
634cc88
52d3078
 
8ff3f84
 
327d931
9f08712
13264bc
52d3078
8978982
173d15f
2872d33
173d15f
 
 
 
 
 
2872d33
 
1e3b3d6
173d15f
 
 
 
8978982
60b0ce7
173d15f
 
 
 
 
 
 
5c82496
09b4bbd
 
 
 
 
 
 
 
58ddcfb
43b1616
8978982
8e67d2a
dbd2a18
173d15f
2872d33
52d3078
6fb2f90
449b9ac
8978982
dbd2a18
8978982
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import netron
import threading
import gradio as gr
import os
from PIL import Image
import cv2
import numpy as np
from yolov5 import xai_yolov5
from yolov8 import xai_yolov8s

# Sample images directory
sample_images = {
    "Sample 1": os.path.join(os.getcwd(), "data/xai/sample1.jpeg"),
    "Sample 2": os.path.join(os.getcwd(), "data/xai/sample2.jpg"),
}

def load_sample_image(sample_name):
    """Load a sample image based on user selection."""
    image_path = sample_images.get(sample_name)
    if image_path and os.path.exists(image_path):
        return Image.open(image_path)
    return None

def process_image(sample_choice, uploaded_image, yolo_versions, target_lyr = -5, n_components = 8):
    """Process the image using selected YOLO models."""
    # Load sample or uploaded image
    if uploaded_image is not None:
        image = uploaded_image
    else:
        image = load_sample_image(sample_choice)

    # Preprocess image
    image = np.array(image)
    image = cv2.resize(image, (640, 640))
    result_images = []

    # Apply selected models
    for yolo_version in yolo_versions:
        if yolo_version == "yolov5":
            result_images.append(xai_yolov5(image, target_lyr = -5, n_components = 8)) 
        elif yolo_version == "yolov8s":
            result_images.append(xai_yolov8s(image))
        else:
            result_images.append((Image.fromarray(image), f"{yolo_version} not implemented."))
    return result_images

def view_model(selected_models):
    """Generate Netron visualization for the selected models."""
    netron_html = ""
    for model in selected_models:
        if model == "yolov5":
            netron_html = f"""
            <iframe 
                src="https://netron.app/?url=https://huggingface.co/FFusion/FFusionXL-BASE/blob/main/vae_encoder/model.onnx" 
                width="100%" 
                height="800" 
                frameborder="0">
            </iframe>
            """
    return netron_html if netron_html else "<p>No valid models selected for visualization.</p>"

custom_css = """

#custom-row {
    margin: 0 !important;
    padding: 0 !important;
    height: fit-content !important;
    display: flex !important;
    justify-content: center !important;
}
#highlighted-text {
    color: blue !important;
    font-size: 32px !important;
    font-weight: bold !important;
}
"""

# Then in the Gradio interface:

with gr.Blocks(css=custom_css) as interface:
    gr.Markdown("""
    ## NeuralVista
    <p>Welcome to <span class="highlighted-text">NeuralVista</span>, a powerful tool designed to help you visualize object detection models in action. 
    With the integration of state-of-the-art YOLO models, you can explore the performance of object detection algorithms on various images.</p>
    <p>Whether you're looking to analyze pre-existing samples or upload your own images, NeuralVista allows you to experiment with different YOLO versions, 
    providing you with valuable insights into how these models interpret and detect objects. Additionally, you can view deep feature factorization outputs 
    and gain a deeper understanding of model behavior at different layers, all within an intuitive interface.</p>
    """)
    
    # Default sample
    default_sample = "Sample 1"

    with gr.Row():
        # Left side: Sample selection and image upload
        with gr.Column():
            sample_selection = gr.Radio(
                choices=list(sample_images.keys()),
                label="Select a Sample Image",
                value=default_sample,
            )

            upload_image = gr.Image(
                label="Upload an Image",
                type="pil",  
            )

            selected_models = gr.CheckboxGroup(
                choices=["yolov5", "yolov8s"],
                value=["yolov5"],
                label="Select Model(s)",
            )

            run_button = gr.Button("Run", elem_id="run_button")

        with gr.Column():
            sample_display = gr.Image(
                value=load_sample_image(default_sample),  
                label="Selected Sample Image",
            )

    # Results and visualization
    with gr.Row(elem_classes="custom-row"):
        result_gallery = gr.Gallery(
            label="Results",
            rows=1, 
            height="auto",       # Adjust height automatically based on content
            columns=1 ,
            object_fit="contain"

        ) 
        netron_display = gr.HTML(label="Netron Visualization")

    # Update sample image
    sample_selection.change(
        fn=load_sample_image,
        inputs=sample_selection,
        outputs=sample_display,
    )
    with gr.Row(elem_classes="custom-row"):
        dff_gallery = gr.Gallery(
            label="Deep Feature Factorization",
            rows=2,          # 8 rows
            columns=4,       # 1 image per row
            object_fit="fit",
            height="auto"    # Adjust as needed
        ) 


    # Multi-threaded processing
    def run_both(sample_choice, uploaded_image, selected_models):
        results = []
        netron_html = ""

        # Thread to process the image
        def process_thread():
            nonlocal results
            target_lyr = -5 
            n_components = 8
            results = process_image(sample_choice, uploaded_image, selected_models, target_lyr = -5, n_components = 8)

        # Thread to generate Netron visualization
        def netron_thread():
            nonlocal netron_html
            netron_html = view_model(selected_models)

        # Launch threads
        t1 = threading.Thread(target=process_thread)
        t2 = threading.Thread(target=netron_thread)
        t1.start()
        t2.start()
        t1.join()
        t2.join()
        image1, text, image2 = results[0]
        if isinstance(image2, list):
            # Check if image2 contains exactly 8 images
            if len(image2) == 8:
                print("image2 contains 8 images.")
            else:
                print("Warning: image2 does not contain exactly 8 images.")
        else:
            print("Error: image2 is not a list of images.")
        return [(image1, text)], netron_html, image2

    # Run button click

    run_button.click(
        fn=run_both,
        inputs=[sample_selection, upload_image, selected_models],
        outputs=[result_gallery, netron_display, dff_gallery],
    )

# Launch Gradio interface
if __name__ == "__main__":
    interface.launch(share=True)