File size: 4,503 Bytes
d1b31ce
 
 
 
 
 
 
 
031ec86
 
 
d1b31ce
c0f6432
6e3fd26
c0f6432
031ec86
b0005f4
c0f6432
031ec86
 
c83f8fa
b0005f4
 
031ec86
 
c0f6432
 
 
 
 
c83f8fa
c0f6432
 
 
6be1573
c0f6432
 
 
 
 
 
 
 
 
 
 
 
 
 
c83f8fa
c0f6432
c83f8fa
c0f6432
 
 
 
 
 
 
 
 
6be1573
 
c83f8fa
6be1573
 
 
 
 
 
 
 
c83f8fa
 
 
6be1573
 
 
 
 
 
 
 
 
 
 
 
 
 
6e3fd26
031ec86
 
c0f6432
031ec86
c83f8fa
031ec86
 
 
c0f6432
031ec86
c83f8fa
031ec86
 
 
c0f6432
 
 
 
 
c83f8fa
 
c0f6432
 
 
 
 
 
 
 
 
 
c83f8fa
 
c0f6432
 
 
 
 
031ec86
b0005f4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
"""
File: app.py
Author: Elena Ryumina and Dmitry Ryumin
Description: Description: Main application file for Facial_Expression_Recognition.
             The file defines the Gradio interface, sets up the main blocks,
             and includes event handlers for various components.
License: MIT License
"""

import gradio as gr

# Importing necessary components for the Gradio app
from app.description import DESCRIPTION_STATIC, DESCRIPTION_DYNAMIC
from app.authors import AUTHORS
from app.app_utils import preprocess_image_and_predict, preprocess_video_and_predict


def clear_static_info():
    return (
        gr.Image(value=None, type="pil"),
        gr.Image(value=None, scale=1, elem_classes="dl5"),
        gr.Image(value=None, scale=1, elem_classes="dl2"),
        gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"),
    )

def clear_dynamic_info():
    return (
        gr.Video(value=None),
        gr.Video(value=None),
        gr.Video(value=None),
        gr.Video(value=None),
        gr.Plot(value=None),
    )

with gr.Blocks(css="app.css") as demo:
    with gr.Tab("Dynamic App"):
        gr.Markdown(value=DESCRIPTION_DYNAMIC)
        with gr.Row():
            with gr.Column(scale=2):
                input_video = gr.Video(elem_classes="video1")
                with gr.Row():
                    clear_btn_dynamic = gr.Button(
                        value="Clear", interactive=True, scale=1
                    )
                    submit_dynamic = gr.Button(
                        value="Submit", interactive=True, scale=1, elem_classes="submit"
                    )
            with gr.Column(scale=2, elem_classes="dl4"):
                with gr.Row():
                    output_video = gr.Video(label="Original video", scale=1, elem_classes="video2")
                    output_face = gr.Video(label="Pre-processed video", scale=1, elem_classes="video3")
                    output_heatmaps = gr.Video(label="Heatmaps", scale=1, elem_classes="video4")
                output_statistics = gr.Plot(label="Statistics of emotions", elem_classes="stat")
        gr.Examples(
            ["videos/video1.mp4",
            "videos/video2.mp4"],
            [input_video],
        )

    with gr.Tab("Static App"):
        gr.Markdown(value=DESCRIPTION_STATIC)
        with gr.Row():
            with gr.Column(scale=2, elem_classes="dl1"):
                input_image = gr.Image(label="Original image", type="pil")
                with gr.Row():
                    clear_btn = gr.Button(
                        value="Clear", interactive=True, scale=1, elem_classes="clear"
                    )
                    submit = gr.Button(
                        value="Submit", interactive=True, scale=1, elem_classes="submit"
                    )
            with gr.Column(scale=1, elem_classes="dl4"):
                with gr.Row():
                    output_image = gr.Image(label="Face", scale=1, elem_classes="dl5")
                    output_heatmap = gr.Image(label="Heatmap", scale=1, elem_classes="dl2")
                output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3")
        gr.Examples(
            [
                "images/fig7.jpg",
                "images/fig1.jpg",
                "images/fig2.jpg",
                "images/fig3.jpg",
                "images/fig4.jpg",
                "images/fig5.jpg",
                "images/fig6.jpg",
            ],
            [input_image],
        )
    with gr.Tab("Authors"):
        gr.Markdown(value=AUTHORS)

    submit.click(
        fn=preprocess_image_and_predict,
        inputs=[input_image],
        outputs=[output_image, output_heatmap, output_label],
        queue=True,
    )
    clear_btn.click(
        fn=clear_static_info,
        inputs=[],
        outputs=[input_image, output_image, output_heatmap, output_label],
        queue=True,
    )

    submit_dynamic.click(
        fn=preprocess_video_and_predict,
        inputs=input_video,
        outputs=[
            output_video,
            output_face,
            output_heatmaps, 
            output_statistics
        ],
        queue=True,
    )
    clear_btn_dynamic.click(
        fn=clear_dynamic_info,
        inputs=[],
        outputs=[
            input_video,
            output_video,
            output_face,
            output_heatmaps, 
            output_statistics
        ],
        queue=True,
    )

if __name__ == "__main__":
    demo.queue(api_open=False).launch(share=False)