""" File: app.py Author: Elena Ryumina and Dmitry Ryumin Description: Description: Main application file for Facial_Expression_Recognition. The file defines the Gradio interface, sets up the main blocks, and includes event handlers for various components. License: MIT License """ import gradio as gr # Importing necessary components for the Gradio app from app.description import DESCRIPTION from app.app_utils import preprocess_and_predict def clear(): return ( gr.Image(value=None, type="pil"), gr.Image(value=None, scale=1, elem_classes="dl2"), gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"), ) md = """ App developers: ``Elena Ryumina`` and ``Dmitry Ryumin`` Methodology developers: ``Elena Ryumina``, ``Denis Dresvyanskiy`` and ``Alexey Karpov`` Model developer: ``Elena Ryumina`` TensorFlow to PyTorch model converter: ``Maxim Markitantov`` and ``Elena Ryumina`` Citation If you are using EMO-AffectNetModel in your research, please consider to cite research [paper](https://www.sciencedirect.com/science/article/pii/S0925231222012656). Here is an example of BibTeX entry:
@article{RYUMINA2022,
    title        = {In Search of a Robust Facial Expressions Recognition Model: A Large-Scale Visual Cross-Corpus Study},
    author       = {Elena Ryumina and Denis Dresvyanskiy and Alexey Karpov},
    journal      = {Neurocomputing},
    year         = {2022},
    doi          = {10.1016/j.neucom.2022.10.013},
    url          = {https://www.sciencedirect.com/science/article/pii/S0925231222012656},
    }
""" with gr.Blocks(css="app.css") as demo: with gr.Tab("App"): gr.Markdown(value=DESCRIPTION) with gr.Row(): with gr.Column(scale=2, elem_classes="dl1"): input_image = gr.Image(type="pil") with gr.Row(): clear_btn = gr.Button( value="Clear", interactive=True, scale=1, elem_classes="clear" ) submit = gr.Button( value="Submit", interactive=True, scale=1, elem_classes="submit" ) with gr.Column(scale=1, elem_classes="dl4"): output_image = gr.Image(scale=1, elem_classes="dl2") output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3") gr.Examples( [ "images/fig7.jpg", "images/fig1.jpg", "images/fig2.jpg", "images/fig3.jpg", "images/fig4.jpg", "images/fig5.jpg", "images/fig6.jpg", ], [input_image], ) with gr.Tab("Authors"): gr.Markdown(value=md) submit.click( fn=preprocess_and_predict, inputs=[input_image], outputs=[output_image, output_label], queue=True, ) clear_btn.click( fn=clear, inputs=[], outputs=[input_image, output_image, output_label], queue=True, ) if __name__ == "__main__": demo.queue(api_open=False).launch(share=False)