import appStore.target as target_extraction import appStore.netzero as netzero import appStore.sector as sector import appStore.adapmit as adapmit import appStore.ghg as ghg import appStore.policyaction as policyaction import appStore.conditional as conditional import appStore.indicator as indicator import appStore.doc_processing as processing from utils.uploadAndExample import add_upload from PIL import Image import streamlit as st ####################################### Dashboard ###################################################### # App st.set_page_config(page_title = 'Vulnerable Groups Identification', initial_sidebar_state='expanded', layout="wide") with st.sidebar: # upload and example doc choice = st.sidebar.radio(label = 'Select the Document', help = 'You can upload the document \ or else you can try a example document', options = ('Upload Document', 'Try Example'), horizontal = True) add_upload(choice) with st.container(): st.markdown("

Vulnerable Groups Identification

", unsafe_allow_html=True) st.write(' ') with st.expander("ℹ️ - About this app", expanded=False): st.write( """ The Vulnerable Groups Identification App is an open-source\ digital tool which aims to assist policy analysts and \ other users in extracting and filtering relevant \ information from public documents. """) st.write('**Definitions**') st.caption(""" - **Place holder**: Place holder \ Place holder \ Place holder \ Place holder \ Place holder """) #c1, c2, c3 = st.columns([12,1,10]) #with c1: #image = Image.open('docStore/img/flow.jpg') #st.image(image) #with c3: #st.write(""" # What happens in the background? # - Step 1: Once the document is provided to app, it undergoes *Pre-processing*.\ # In this step the document is broken into smaller paragraphs \ # (based on word/sentence count). # - Step 2: The paragraphs are fed to **Target Classifier** which detects if # the paragraph contains any *Target* related information or not. # - Step 3: The paragraphs which are detected containing some target \ # related information are then fed to multiple classifier to enrich the # Information Extraction. # The Step 2 and 3 are repated then similarly for Action and Policies & Plans. # """) #st.write("") apps = [processing.app, target_extraction.app, netzero.app, ghg.app, policyaction.app, conditional.app, sector.app, adapmit.app,indicator.app] multiplier_val =1/len(apps) if st.button("Analyze Document"): prg = st.progress(0.0) for i,func in enumerate(apps): func() prg.progress((i+1)*multiplier_val) if 'key1' in st.session_state: with st.sidebar: topic = st.radio( "Which category you want to explore?", ('Target', 'Action', 'Policies/Plans')) if topic == 'Target': target_extraction.target_display() elif topic == 'Action': policyaction.action_display() else: policyaction.policy_display() # st.write(st.session_state.key1) #st.title("Identify references to vulnerable groups.") #st.write("""Vulnerable groups encompass various communities and individuals who are disproportionately affected by the impacts of climate change #due to their socioeconomic status, geographical location, or inherent characteristics. By incorporating the needs and perspectives of these groups #into national climate policies, governments can ensure equitable outcomes, promote social justice, and strive to build resilience within the most marginalized populations, #fostering a more sustainable and inclusive society as we navigate the challenges posed by climate change.This app allows you to identify whether a text contains any #references to vulnerable groups, for example when talking about policy documents.""") # Document upload #uploaded_file = st.file_uploader("Upload your file here") # Create text input box #input_text = st.text_area(label='Please enter your text here', value="This policy has been implemented to support women.") #st.write('Prediction:', model(input_text)) ######################################### Model ######################################################### # Load the model #model = SetFitModel.from_pretrained("leavoigt/vulnerable_groups") # Define the classes #id2label = { # 0: 'Agricultural communities', # 1: 'Children and Youth', # 2: 'Coastal communities', # 3: 'Drought-prone regions', # 4: 'Economically disadvantaged communities', # 5: 'Elderly population', # 6: 'Ethnic minorities and indigenous people', # 7: 'Informal sector workers', # 8: 'Migrants and Refugees', # 9: 'Other', # 10: 'People with Disabilities', # 11: 'Rural populations', # 12: 'Sexual minorities (LGBTQI+)', # 13: 'Urban populations', # 14: 'Women'} ### Process document to paragraphs # Source: https://blog.jcharistech.com/2021/01/21/how-to-save-uploaded-files-to-directory-in-streamlit-apps/ # Store uploaded file temporarily in directory to get file path (necessary for processing) # def save_uploadedfile(upl_file): # with open(os.path.join("tempDir",upl_file.name),"wb") as f: # f.write(upl_file.getbuffer()) # return st.success("Saved File:{} to tempDir".format(upl_file.name)) # if uploaded_file is not None: # # Save the file # file_details = {"FileName": uploaded_file.name, "FileType": uploaded_file.type} # save_uploadedfile(uploaded_file) # #Get the file path