sketch-to-BPMN / app.py
BenjiELCA's picture
add commentary to all the code
64b088f
import streamlit as st
from torchvision.transforms import functional as F
import gc
import numpy as np
from modules.streamlit_utils import *
from modules.utils import error
def main():
"""
Main function to run the Streamlit application for BPMN AI model recognition.
"""
# Check if the model is loaded in the session state
if 'model_loaded' not in st.session_state:
st.session_state.model_loaded = False
st.session_state.first_run = True
# Configure the Streamlit page and retrieve screen details
is_mobile, screen_width = configure_page()
# Display various UI components
display_banner(is_mobile)
display_title(is_mobile)
display_sidebar()
# Initialize session state variables
initialize_session_state()
cropped_image = None
# Load example or user-uploaded image
img_selected = load_example_image()
uploaded_file = load_user_image(img_selected, is_mobile)
# Display the uploaded image and allow cropping
if uploaded_file is not None:
cropped_image = display_image(uploaded_file, screen_width, is_mobile)
# Set score threshold for prediction if an image is uploaded
if uploaded_file is not None:
get_score_threshold(is_mobile)
# Launch prediction when the button is clicked
if st.button("πŸš€ Launch Prediction"):
st.session_state.image = launch_prediction(cropped_image, st.session_state.score_threshold, is_mobile, screen_width)
st.session_state.original_prediction = st.session_state.prediction.copy()
st.rerun()
# Create placeholders for different sections of the UI
prediction_result_placeholder = st.empty()
additional_options_placeholder = st.empty()
modeler_placeholder = st.empty()
# Display prediction results and options if predictions are available
if 'prediction' in st.session_state and uploaded_file:
if st.session_state.image != cropped_image:
print('Image has changed')
# Delete the prediction if the image has changed
del st.session_state.prediction
return
if len(st.session_state.prediction['labels']) == 0:
error("No prediction available. Please upload a BPMN image or decrease the detection score threshold.")
else:
with prediction_result_placeholder.container():
if is_mobile:
display_options(st.session_state.crop_image, st.session_state.score_threshold, is_mobile, int(5/6 * screen_width))
else:
with st.expander("Show result of prediction"):
display_options(st.session_state.crop_image, st.session_state.score_threshold, is_mobile, int(5/6 * screen_width))
# Provide additional options for modification if not on mobile
if not is_mobile:
with additional_options_placeholder.container():
state = modify_results()
# Display BPMN modeler options and result
with modeler_placeholder.container():
modeler_options(is_mobile)
display_bpmn_modeler(is_mobile, screen_width)
else:
# Clear placeholders if no predictions are available
prediction_result_placeholder.empty()
additional_options_placeholder.empty()
modeler_placeholder.empty()
# Create space for scrolling
for _ in range(50):
st.text("")
# Force garbage collection
gc.collect()
if __name__ == "__main__":
print('Starting the app...')
main()