import appStore.target as target_extraction
import appStore.netzero as netzero
import appStore.sector as sector
import appStore.adapmit as adapmit
import appStore.ghg as ghg
import appStore.doc_processing as processing
from utils.uploadAndExample import add_upload
import streamlit as st
st.set_page_config(page_title = 'Climate Policy Intelligence',
initial_sidebar_state='expanded', layout="wide")
with st.sidebar:
# upload and example doc
choice = st.sidebar.radio(label = 'Select the Document',
help = 'You can upload the document \
or else you can try a example document',
options = ('Upload Document', 'Try Example'),
horizontal = True)
add_upload(choice)
with st.container():
st.markdown("
Climate Policy Intelligence App
", unsafe_allow_html=True)
st.write(' ')
with st.expander("ℹ️ - About this app", expanded=False):
st.write(
"""
Climate Policy Understanding App is an open-source\
digital tool which aims to assist policy analysts and \
other users in extracting and filtering relevant \
information from public documents.
What Happens in background?
- Step 1: Once the document is provided to app, it undergoes *Pre-processing*.\
In this step the document is broken into smaller paragraphs \
(based on word/sentence count).
- Step 2: The paragraphs are fed to **Target Classifier** which detects if
the paragraph contains any *Target* related information or not.
- Step 3: The paragraphs which are detected containing some target \
related information are then fed to multiple classifier to enrich the
Information Extraction.
Classifiers
- Netzero:
""")
st.write("")
apps = [processing.app, target_extraction.app, netzero.app, ghg.app,
sector.app, adapmit.app]
multiplier_val =100/len(apps)
if st.button("Get the work done"):
prg = st.progress(0.0)
for i,func in enumerate(apps):
func()
prg.progress((i+1)*multiplier_val)
if 'key1' in st.session_state:
target_extraction.target_display()
st.write(st.session_state.key1)