|
import pandas as pd |
|
import streamlit as st |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
from pre import preprocess_uploaded_file |
|
|
|
|
|
def perform_analysis(uploaded_dataframes): |
|
|
|
combined_data = pd.concat(uploaded_dataframes, ignore_index=True) |
|
|
|
|
|
failed_scenarios = combined_data[combined_data['Status'] == 'FAILED'] |
|
passed_scenarios = combined_data[combined_data['Status'] == 'PASSED'] |
|
|
|
fail_count = len(failed_scenarios) |
|
st.markdown(f"Failing scenarios Count: {fail_count}") |
|
|
|
pass_count = len(passed_scenarios) |
|
st.markdown(f"Passing scenarios Count: {pass_count}") |
|
|
|
selected_status = st.radio("Select a status", ['Failed', 'Passed']) |
|
|
|
if selected_status == 'Failed': |
|
unique_areas = np.append(failed_scenarios['Functional area'].unique(), "All") |
|
selected_scenarios = failed_scenarios |
|
elif selected_status == 'Passed': |
|
unique_areas = np.append(passed_scenarios['Functional area'].unique(), "All") |
|
selected_scenarios = passed_scenarios |
|
else: |
|
selected_scenarios = None |
|
|
|
if selected_scenarios is not None: |
|
|
|
st.markdown(f"### Scenarios with status '{selected_status}' grouped by functional area:") |
|
|
|
|
|
selected_functional_areas = st.multiselect("Select functional areas", unique_areas, ["All"]) |
|
|
|
if "All" in selected_functional_areas: |
|
filtered_scenarios = selected_scenarios |
|
else: |
|
filtered_scenarios = selected_scenarios[selected_scenarios['Functional area'].isin(selected_functional_areas)] |
|
|
|
if not selected_functional_areas: |
|
st.error("Please select at least one functional area.") |
|
else: |
|
|
|
average_time_spent_seconds = filtered_scenarios.groupby('Functional area')['Time spent'].mean().reset_index() |
|
|
|
average_time_spent_seconds['Time spent'] = pd.to_datetime(average_time_spent_seconds['Time spent'], unit='s').dt.strftime('%M:%S') |
|
|
|
|
|
|
|
start_datetime_group = filtered_scenarios.groupby('Functional area')['Start datetime'].min().reset_index() |
|
end_datetime_group = filtered_scenarios.groupby('Functional area')['End datetime'].max().reset_index() |
|
|
|
|
|
total_time_spent_seconds = (end_datetime_group['End datetime'] - start_datetime_group['Start datetime']).dt.total_seconds() |
|
|
|
|
|
total_time_spent_seconds = pd.to_datetime(total_time_spent_seconds, unit='s').dt.strftime('%M:%S') |
|
|
|
|
|
average_time_spent_seconds = average_time_spent_seconds.merge(start_datetime_group, on='Functional area') |
|
average_time_spent_seconds = average_time_spent_seconds.merge(end_datetime_group, on='Functional area') |
|
average_time_spent_seconds['Total Time Spent'] = total_time_spent_seconds |
|
|
|
|
|
|
|
if selected_status == 'Failed': |
|
grouped_filtered_scenarios = filtered_scenarios.groupby('Environment')[['Functional area', 'Scenario name', 'Error message','Time spent(m:s)','Start datetime']].apply(lambda x: x.reset_index(drop=True)) |
|
elif selected_status == 'Passed': |
|
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario name', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) |
|
else: |
|
grouped_filtered_scenarios = None |
|
grouped_filtered_scenarios.reset_index(inplace=True) |
|
grouped_filtered_scenarios.drop(columns=['level_1'], inplace=True) |
|
grouped_filtered_scenarios.index = grouped_filtered_scenarios.index + 1 |
|
st.dataframe(grouped_filtered_scenarios) |
|
|
|
|
|
average_time_spent_seconds = average_time_spent_seconds.sort_values(by='Start datetime') |
|
|
|
|
|
st.markdown("### Total and Average Time Spent on Each Functional Area") |
|
average_time_spent_seconds.index = average_time_spent_seconds.index + 1 |
|
|
|
average_time_spent_seconds.rename(columns={'Start datetime': 'Start Datetime', 'End datetime': 'End Datetime', 'Time spent':'Average Time Spent'}, inplace=True) |
|
|
|
average_time_spent_seconds = average_time_spent_seconds[['Functional area', 'Total Time Spent', 'Start Datetime', 'End Datetime', 'Average Time Spent']] |
|
st.dataframe(average_time_spent_seconds) |
|
|
|
|
|
if selected_status != 'Passed': |
|
|
|
st.write(f"### Bar graph showing number of '{selected_status}' scenarios in each functional area:") |
|
error_counts = grouped_filtered_scenarios['Functional area'].value_counts() |
|
plt.figure(figsize=(12, 10)) |
|
bars = plt.bar(error_counts.index, error_counts.values) |
|
plt.xlabel('Functional Area') |
|
plt.ylabel('Number of Failures') |
|
plt.title(f"Number of '{selected_status}' scenarios by Functional Area") |
|
plt.xticks(rotation=45, ha='right', fontsize=10) |
|
|
|
y_max = max(error_counts.values) + 1 |
|
plt.ylim(0, y_max) |
|
plt.yticks(range(0, y_max, 1), fontsize=10) |
|
|
|
|
|
for bar in bars: |
|
height = bar.get_height() |
|
plt.text(bar.get_x() + bar.get_width() / 2, height, str(int(height)), |
|
ha='center', va='bottom') |
|
|
|
plt.tight_layout() |
|
st.pyplot(plt) |
|
pass |
|
|
|
def multiple_main(): |
|
|
|
|
|
num_environments = st.number_input("Enter the number of environments", min_value=1, value=1, step=1) |
|
|
|
|
|
uploaded_dataframes = [] |
|
|
|
|
|
for i in range(num_environments): |
|
uploaded_files = st.file_uploader(f"Upload CSV files for Environment {i + 1}", type="csv", accept_multiple_files=True) |
|
|
|
for uploaded_file in uploaded_files: |
|
|
|
data = preprocess_uploaded_file(uploaded_file) |
|
|
|
|
|
uploaded_dataframes.append(data) |
|
|
|
|
|
if uploaded_dataframes: |
|
|
|
perform_analysis(uploaded_dataframes) |
|
else: |
|
st.write("Please upload at least one CSV file.") |
|
|
|
pass |
|
|