Spaces:
Running
Running
ready for demo
Browse files- app.py +11 -10
- modules/htlm_webpage.py +4 -4
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import streamlit as st
|
2 |
-
import streamlit.components.v1 as components
|
3 |
from PIL import Image, ImageEnhance
|
4 |
import torch
|
5 |
from torchvision.transforms import functional as F
|
@@ -25,7 +24,6 @@ from streamlit_drawable_canvas import st_canvas
|
|
25 |
from streamlit_image_select import image_select
|
26 |
from streamlit_js_eval import streamlit_js_eval
|
27 |
|
28 |
-
|
29 |
def get_memory_usage():
|
30 |
process = psutil.Process()
|
31 |
mem_info = process.memory_info()
|
@@ -200,7 +198,7 @@ def prepare_image(image, pad=True, new_size=(1333, 1333)):
|
|
200 |
return new_scaled_size, image
|
201 |
|
202 |
# Function to display various options for image annotation
|
203 |
-
def display_options(image, score_threshold, is_mobile):
|
204 |
col1, col2, col3, col4, col5 = st.columns(5)
|
205 |
with col1:
|
206 |
write_class = st.toggle("Write Class", value=True)
|
@@ -230,7 +228,7 @@ def display_options(image, score_threshold, is_mobile):
|
|
230 |
if is_mobile is False:
|
231 |
width = 1000
|
232 |
else:
|
233 |
-
width =
|
234 |
|
235 |
# Display the original and annotated images side by side
|
236 |
image_comparison(
|
@@ -371,13 +369,16 @@ def main():
|
|
371 |
st.image(cropped_image, caption="Cropped Image", use_column_width=False, width=500)
|
372 |
else:
|
373 |
resized_image = original_image
|
374 |
-
st.image(resized_image, caption="Image", use_column_width=False, width=
|
375 |
cropped_image = original_image
|
376 |
|
377 |
if cropped_image is not None:
|
378 |
-
|
379 |
-
|
380 |
-
|
|
|
|
|
|
|
381 |
|
382 |
if st.button("Launch Prediction"):
|
383 |
st.session_state.crop_image = cropped_image
|
@@ -387,14 +388,14 @@ def main():
|
|
387 |
|
388 |
if 'prediction' in st.session_state and uploaded_file is not None:
|
389 |
with st.spinner('Waiting for result display...'):
|
390 |
-
display_options(st.session_state.crop_image, score_threshold, is_mobile)
|
391 |
|
392 |
with st.spinner('Waiting for BPMN modeler...'):
|
393 |
col1, col2 = st.columns(2)
|
394 |
with col1:
|
395 |
st.session_state.scale = st.slider("Set scale for XML file", min_value=0.1, max_value=2.0, value=1.0, step=0.1)
|
396 |
st.session_state.bpmn_xml = create_XML(st.session_state.prediction.copy(), st.session_state.text_mapping, st.session_state.scale)
|
397 |
-
display_bpmn_xml(st.session_state.bpmn_xml, is_mobile=is_mobile)
|
398 |
|
399 |
gc.collect()
|
400 |
|
|
|
1 |
import streamlit as st
|
|
|
2 |
from PIL import Image, ImageEnhance
|
3 |
import torch
|
4 |
from torchvision.transforms import functional as F
|
|
|
24 |
from streamlit_image_select import image_select
|
25 |
from streamlit_js_eval import streamlit_js_eval
|
26 |
|
|
|
27 |
def get_memory_usage():
|
28 |
process = psutil.Process()
|
29 |
mem_info = process.memory_info()
|
|
|
198 |
return new_scaled_size, image
|
199 |
|
200 |
# Function to display various options for image annotation
|
201 |
+
def display_options(image, score_threshold, is_mobile, screen_width):
|
202 |
col1, col2, col3, col4, col5 = st.columns(5)
|
203 |
with col1:
|
204 |
write_class = st.toggle("Write Class", value=True)
|
|
|
228 |
if is_mobile is False:
|
229 |
width = 1000
|
230 |
else:
|
231 |
+
width = screen_width
|
232 |
|
233 |
# Display the original and annotated images side by side
|
234 |
image_comparison(
|
|
|
369 |
st.image(cropped_image, caption="Cropped Image", use_column_width=False, width=500)
|
370 |
else:
|
371 |
resized_image = original_image
|
372 |
+
st.image(resized_image, caption="Image", use_column_width=False, width=int(4/5*screen_width))
|
373 |
cropped_image = original_image
|
374 |
|
375 |
if cropped_image is not None:
|
376 |
+
if is_mobile is False:
|
377 |
+
col1, col2, col3 = st.columns(3)
|
378 |
+
with col1:
|
379 |
+
score_threshold = st.slider("Set score threshold for prediction", min_value=0.0, max_value=1.0, value=0.5, step=0.05)
|
380 |
+
else:
|
381 |
+
score_threshold = st.slider("Set score threshold for prediction", min_value=0.0, max_value=1.0, value=0.5, step=0.05)
|
382 |
|
383 |
if st.button("Launch Prediction"):
|
384 |
st.session_state.crop_image = cropped_image
|
|
|
388 |
|
389 |
if 'prediction' in st.session_state and uploaded_file is not None:
|
390 |
with st.spinner('Waiting for result display...'):
|
391 |
+
display_options(st.session_state.crop_image, score_threshold, is_mobile, int(4/5*screen_width))
|
392 |
|
393 |
with st.spinner('Waiting for BPMN modeler...'):
|
394 |
col1, col2 = st.columns(2)
|
395 |
with col1:
|
396 |
st.session_state.scale = st.slider("Set scale for XML file", min_value=0.1, max_value=2.0, value=1.0, step=0.1)
|
397 |
st.session_state.bpmn_xml = create_XML(st.session_state.prediction.copy(), st.session_state.text_mapping, st.session_state.scale)
|
398 |
+
display_bpmn_xml(st.session_state.bpmn_xml, is_mobile=is_mobile, screen_width=int(4/5*screen_width))
|
399 |
|
400 |
gc.collect()
|
401 |
|
modules/htlm_webpage.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
import streamlit.components.v1 as components
|
3 |
|
4 |
-
def display_bpmn_xml(bpmn_xml, is_mobile=False):
|
5 |
|
6 |
if is_mobile:
|
7 |
html_template = f"""
|
@@ -78,7 +78,7 @@ def display_bpmn_xml(bpmn_xml, is_mobile=False):
|
|
78 |
try {{
|
79 |
await bpmnModeler.importXML(bpmnXML);
|
80 |
bpmnModeler.get('canvas').zoom('fit-viewport', 'auto');
|
81 |
-
bpmnModeler.get('canvas').zoom(0.
|
82 |
}} catch (err) {{
|
83 |
console.error('Error rendering BPMN diagram', err);
|
84 |
}}
|
@@ -141,7 +141,7 @@ def display_bpmn_xml(bpmn_xml, is_mobile=False):
|
|
141 |
</html>
|
142 |
"""
|
143 |
|
144 |
-
components.html(html_template, height=
|
145 |
|
146 |
else:
|
147 |
html_template = f"""
|
@@ -298,4 +298,4 @@ def display_bpmn_xml(bpmn_xml, is_mobile=False):
|
|
298 |
</html>
|
299 |
"""
|
300 |
|
301 |
-
components.html(html_template, height=1000, width=
|
|
|
1 |
import streamlit as st
|
2 |
import streamlit.components.v1 as components
|
3 |
|
4 |
+
def display_bpmn_xml(bpmn_xml, is_mobile=False, screen_width=300):
|
5 |
|
6 |
if is_mobile:
|
7 |
html_template = f"""
|
|
|
78 |
try {{
|
79 |
await bpmnModeler.importXML(bpmnXML);
|
80 |
bpmnModeler.get('canvas').zoom('fit-viewport', 'auto');
|
81 |
+
bpmnModeler.get('canvas').zoom(0.3); // Adjust this value for zooming out
|
82 |
}} catch (err) {{
|
83 |
console.error('Error rendering BPMN diagram', err);
|
84 |
}}
|
|
|
141 |
</html>
|
142 |
"""
|
143 |
|
144 |
+
components.html(html_template, height=screen_width, width=screen_width)
|
145 |
|
146 |
else:
|
147 |
html_template = f"""
|
|
|
298 |
</html>
|
299 |
"""
|
300 |
|
301 |
+
components.html(html_template, height=1000, width=1600)
|