import pandas as pd import streamlit as st import matplotlib.pyplot as plt import numpy as np from second import double_main from multiple import multiple_main from weekly import generate_weekly_report from pre import preprocess_uploaded_file, add_app_description def single_main(uploaded_file): if uploaded_file is not None: # Process the csv files with header data = preprocess_uploaded_file(uploaded_file) # st.write(data) # Display scenarios with status "failed" grouped by functional area failed_scenarios = data[data['Status'] == 'FAILED'] passed_scenarios = data[data['Status'] == 'PASSED'] # Display total count of failures fail_count = len(failed_scenarios) st.markdown(f"Failing scenarios Count: {fail_count}") # Display total count of Passing pass_count = len(passed_scenarios) st.markdown(f"Passing scenarios Count: {pass_count}") # Use radio buttons for selecting status selected_status = st.radio("Select a status", ['Failed', 'Passed']) # Determine which scenarios to display based on selected status if selected_status == 'Failed': unique_areas = np.append(failed_scenarios['Functional area'].unique(), "All") selected_scenarios = failed_scenarios elif selected_status == 'Passed': unique_areas = np.append(passed_scenarios['Functional area'].unique(), "All") selected_scenarios = passed_scenarios else: selected_scenarios = None if selected_scenarios is not None: # st.write(f"Scenarios with status '{selected_status}' grouped by functional area:") st.markdown(f"### Scenarios with status '{selected_status}' grouped by functional area:") # Select a range of functional areas to filter scenarios selected_functional_areas = st.multiselect("Select functional areas", unique_areas, ["All"]) if "All" in selected_functional_areas: filtered_scenarios = selected_scenarios else: filtered_scenarios = selected_scenarios[selected_scenarios['Functional area'].isin(selected_functional_areas)] if not selected_functional_areas: # Check if the list is empty st.error("Please select at least one functional area.") else: # Calculate the average time spent for each functional area average_time_spent_seconds = filtered_scenarios.groupby('Functional area')['Time spent'].mean().reset_index() # Convert average time spent from seconds to minutes and seconds format average_time_spent_seconds['Time spent'] = pd.to_datetime(average_time_spent_seconds['Time spent'], unit='s').dt.strftime('%M:%S') # Group by functional area and get the start datetime for sorting start_datetime_group = filtered_scenarios.groupby('Functional area')['Start datetime'].min().reset_index() # Merge average_time_spent_seconds and start_datetime_group average_time_spent_seconds = average_time_spent_seconds.merge(start_datetime_group, on='Functional area') # Filter scenarios based on selected functional area if selected_status == 'Failed': grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario name', 'Error message','Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) elif selected_status == 'Passed': grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario name', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) else: grouped_filtered_scenarios = None grouped_filtered_scenarios.reset_index(inplace=True) grouped_filtered_scenarios.drop(columns=['level_1'], inplace=True) grouped_filtered_scenarios.index = grouped_filtered_scenarios.index + 1 st.markdown(grouped_filtered_scenarios) # Sort the average time spent table by start datetime average_time_spent_seconds = average_time_spent_seconds.sort_values(by='Start datetime') # Display average time spent on each functional area in a table st.markdown("### Average Time Spent on Each Functional Area") average_time_spent_seconds.index = average_time_spent_seconds.index + 1 st.dataframe(average_time_spent_seconds) # Check if selected_status is 'Failed' and grouped_filtered_scenarios length is less than or equal to 400 if selected_status != 'Passed' and len(grouped_filtered_scenarios) <= 400: # Create and display bar graph of errors by functional area st.write(f"### Bar graph showing number of '{selected_status}' scenarios in each functional area:") error_counts = grouped_filtered_scenarios['Functional area'].value_counts() plt.figure(figsize=(10, 6)) plt.bar(error_counts.index, error_counts.values) plt.xlabel('Functional Area') plt.ylabel('Number of Failures') plt.title(f"Number of '{selected_status}' scenarios by Functional Area") plt.xticks(rotation=45, ha='right') # Set y-axis limits and ticks for consistent interval of 1 y_max = max(error_counts.values) + 1 plt.ylim(0, y_max) plt.yticks(range(0, y_max, 1)) # Display individual numbers on y-axis for i, count in enumerate(error_counts.values): plt.text(i, count, str(count), ha='center', va='bottom') plt.tight_layout() # Add this line to adjust layout st.pyplot(plt) else: st.write("### No scenarios with status 'failed' found.") pass def main(): add_app_description() # Initially we are in multi file processing mode if "mode" not in st.session_state: st.session_state["mode"] = "multi" # Add a dropdown for mode selection selected_mode = st.sidebar.selectbox("Select Mode", ["Multi", "Compare", "Weekly"]) # Update the mode based on the selection st.session_state["mode"] = selected_mode.lower() # Display the selected mode mode_display = f'## Current mode: {st.session_state["mode"].title()} mode' st.sidebar.markdown(mode_display) # Only show the second file uploader in compare mode if st.session_state["mode"] == "multi": multiple_main() elif st.session_state["mode"] == "compare": uploaded_file_1 = st.sidebar.file_uploader("Upload CSV file 1", type="csv") uploaded_file_2 = st.sidebar.file_uploader("Upload CSV file 2", type="csv") if uploaded_file_1 is not None and uploaded_file_2 is not None: double_main(uploaded_file_1, uploaded_file_2) elif st.session_state["mode"] == "weekly": uploaded_files = st.sidebar.file_uploader("Upload CSV files for Weekly Report", type="csv", accept_multiple_files=True) if uploaded_files: generate_weekly_report(uploaded_files) if __name__ == "__main__": main()