cpv_test / app.py
leavoigt's picture
Update app.py
a3bf681
raw
history blame
2.43 kB
import appStore.doc_processing as processing
import appStore.groups as groups_extraction
from utils.uploadAndExample import add_upload
import streamlit as st
####################################### Dashboard ######################################################
# App
st.set_page_config(page_title = 'Vulnerable Groups Identification',
initial_sidebar_state='expanded', layout="wide")
with st.sidebar:
# upload and example doc
choice = st.sidebar.radio(label = 'Select the Document',
help = 'You can upload the document \
or else you can try a example document',
options = ('Upload Document', 'Try Example'),
horizontal = True)
add_upload(choice)
with st.container():
st.markdown("<h2 style='text-align: center; color: black;'> Vulnerable Groups Identification </h2>", unsafe_allow_html=True)
st.write(' ')
with st.expander("ℹ️ - About this app", expanded=False):
st.write(
"""
The Vulnerable Groups Identification App is an open-source\
digital tool which aims to assist policy analysts and \
other users in extracting and filtering relevant \
information from public documents.
""")
st.write('**Definitions**')
st.caption("""
- **Place holder**: Place holder \
Place holder \
Place holder \
Place holder \
Place holder
""")
st.write("""
What happens in the background?
- Step 1: Once the document is provided to app, it undergoes *Pre-processing*.\
In this step the document is broken into smaller paragraphs \
(based on word/sentence count).
- Step 2: The paragraphs are fed to **Target Classifier** which detects if
the paragraph contains any *Target* related information or not.
- Step 3: The paragraphs which are detected containing some target \
related information are then fed to multiple classifier to enrich the
Information Extraction.
The Step 2 and 3 are repated then similarly for Action and Policies & Plans.
""")
st.write("")
if st.button("Analyze Document"):
print("Analyse")
#groups_extraction.identify_groups()