Spaces:
Running
Running
import os | |
import streamlit as st | |
import pandas as pd | |
from io import StringIO | |
from util.evaluation import statistical_tests,calculate_correlations,calculate_divergences | |
def check_password(): | |
def password_entered(): | |
if password_input == os.getenv('PASSWORD'): | |
st.session_state['password_correct'] = True | |
else: | |
st.error("Incorrect Password, please try again.") | |
password_input = st.text_input("Enter Password:", type="password") | |
submit_button = st.button("Submit", on_click=password_entered) | |
if submit_button and not st.session_state.get('password_correct', False): | |
st.error("Please enter a valid password to access the demo.") | |
def app(): | |
st.title('Result Evaluation') | |
if not st.session_state.get('password_correct', False): | |
check_password() | |
else: | |
st.sidebar.success("Password Verified. Proceed with the demo.") | |
# Allow users to upload a CSV file with processed results | |
uploaded_file = st.file_uploader("Upload your processed CSV file", type="csv") | |
if uploaded_file is not None: | |
data = StringIO(uploaded_file.getvalue().decode('utf-8')) | |
df = pd.read_csv(data) | |
# Add ranks for each score within each row | |
ranks = df[['Privilege_Avg_Score', 'Protect_Avg_Score', 'Neutral_Avg_Score']].rank(axis=1, ascending=False) | |
df['Privilege_Rank'] = ranks['Privilege_Avg_Score'] | |
df['Protect_Rank'] = ranks['Protect_Avg_Score'] | |
df['Neutral_Rank'] = ranks['Neutral_Avg_Score'] | |
st.write('Uploaded Data:', df) | |
if st.button('Evaluate Data'): | |
with st.spinner('Evaluating data...'): | |
# Existing statistical tests | |
statistical_results = statistical_tests(df) | |
#st.write('Test Results:', test_results) | |
# evaluation_results = result_evaluation(test_results) | |
# st.write('Evaluation Results:', evaluation_results) | |
# New correlation calculations | |
correlation_results = calculate_correlations(df) | |
#st.write('Correlation Results:', correlation_results) | |
# New divergence calculations | |
divergence_results = calculate_divergences(df) | |
#st.write('Divergence Results:', divergence_results) | |
# Flatten the results for combining | |
#flat_test_results = {f"{key1}_{key2}": value2 for key1, value1 in test_results.items() for key2, value2 | |
#in (value1.items() if isinstance(value1, dict) else {key1: value1}.items())} | |
flat_statistical_results = {f"Statistical_{key1}": value1 for key1, value1 in statistical_results.items()} | |
flat_correlation_results = {f"Correlation_{key1}": value1 for key1, value1 in correlation_results.items()} | |
flat_divergence_results = {f"Divergence_{key1}": value1 for key1, value1 in divergence_results.items()} | |
# Combine all results | |
results_combined = {**flat_statistical_results, **flat_correlation_results, **flat_divergence_results} | |
# Convert to DataFrame for download | |
results_df = pd.DataFrame(list(results_combined.items()), columns=['Metric', 'Value']) | |
st.write('Combined Results:', results_df) | |
st.download_button( | |
label="Download Evaluation Results", | |
data=results_df.to_csv(index=False).encode('utf-8'), | |
file_name='evaluation_results.csv', | |
mime='text/csv', | |
) | |
if __name__ == "__main__": | |
app() | |