Spaces:
Sleeping
Sleeping
Commit
·
7f6e787
1
Parent(s):
aad0805
Create multiple.py
Browse files- multiple.py +136 -0
multiple.py
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import streamlit as st
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import numpy as np
|
5 |
+
from pre import preprocess_uploaded_file
|
6 |
+
|
7 |
+
# Define the function to perform analysis
|
8 |
+
def perform_analysis(uploaded_dataframes):
|
9 |
+
# Concatenate all dataframes into a single dataframe
|
10 |
+
combined_data = pd.concat(uploaded_dataframes, ignore_index=True)
|
11 |
+
|
12 |
+
# Display scenarios with status "failed" grouped by functional area
|
13 |
+
failed_scenarios = combined_data[combined_data['Status'] == 'FAILED']
|
14 |
+
passed_scenarios = combined_data[combined_data['Status'] == 'PASSED']
|
15 |
+
# Display total count of failures
|
16 |
+
fail_count = len(failed_scenarios)
|
17 |
+
st.markdown(f"Failing scenarios Count: {fail_count}")
|
18 |
+
# Display total count of Passing
|
19 |
+
pass_count = len(passed_scenarios)
|
20 |
+
st.markdown(f"Passing scenarios Count: {pass_count}")
|
21 |
+
# Use radio buttons for selecting status
|
22 |
+
selected_status = st.radio("Select a status", ['Failed', 'Passed'])
|
23 |
+
# Determine which scenarios to display based on selected status
|
24 |
+
if selected_status == 'Failed':
|
25 |
+
unique_areas = np.append(failed_scenarios['Functional area'].unique(), "All")
|
26 |
+
selected_scenarios = failed_scenarios
|
27 |
+
elif selected_status == 'Passed':
|
28 |
+
unique_areas = np.append(passed_scenarios['Functional area'].unique(), "All")
|
29 |
+
selected_scenarios = passed_scenarios
|
30 |
+
else:
|
31 |
+
selected_scenarios = None
|
32 |
+
|
33 |
+
if selected_scenarios is not None:
|
34 |
+
# st.write(f"Scenarios with status '{selected_status}' grouped by functional area:")
|
35 |
+
st.markdown(f"### Scenarios with status '{selected_status}' grouped by functional area:")
|
36 |
+
|
37 |
+
# Select a range of functional areas to filter scenarios
|
38 |
+
selected_functional_areas = st.multiselect("Select functional areas", unique_areas, ["All"])
|
39 |
+
|
40 |
+
if "All" in selected_functional_areas:
|
41 |
+
filtered_scenarios = selected_scenarios
|
42 |
+
else:
|
43 |
+
filtered_scenarios = selected_scenarios[selected_scenarios['Functional area'].isin(selected_functional_areas)]
|
44 |
+
|
45 |
+
if not selected_functional_areas: # Check if the list is empty
|
46 |
+
st.error("Please select at least one functional area.")
|
47 |
+
else:
|
48 |
+
# Calculate the average time spent for each functional area
|
49 |
+
average_time_spent_seconds = filtered_scenarios.groupby('Functional area')['Time spent'].mean().reset_index()
|
50 |
+
# Convert average time spent from seconds to minutes and seconds format
|
51 |
+
average_time_spent_seconds['Time spent'] = pd.to_datetime(average_time_spent_seconds['Time spent'], unit='s').dt.strftime('%M:%S')
|
52 |
+
# Group by functional area and get the start datetime for sorting
|
53 |
+
start_datetime_group = filtered_scenarios.groupby('Functional area')['Start datetime'].min().reset_index()
|
54 |
+
# Merge average_time_spent_seconds and start_datetime_group
|
55 |
+
average_time_spent_seconds = average_time_spent_seconds.merge(start_datetime_group, on='Functional area')
|
56 |
+
# Filter scenarios based on selected functional area
|
57 |
+
if selected_status == 'Failed':
|
58 |
+
grouped_filtered_scenarios = filtered_scenarios.groupby('Environment')[['Functional area', 'Scenario name', 'Error message','Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True))
|
59 |
+
elif selected_status == 'Passed':
|
60 |
+
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario name', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True))
|
61 |
+
else:
|
62 |
+
grouped_filtered_scenarios = None
|
63 |
+
grouped_filtered_scenarios.reset_index(inplace=True)
|
64 |
+
grouped_filtered_scenarios.drop(columns=['level_1'], inplace=True)
|
65 |
+
# grouped_filtered_scenarios['level_1'] = index
|
66 |
+
grouped_filtered_scenarios.index = grouped_filtered_scenarios.index + 1
|
67 |
+
st.dataframe(grouped_filtered_scenarios)
|
68 |
+
# Sort the average time spent table by start datetime
|
69 |
+
average_time_spent_seconds = average_time_spent_seconds.sort_values(by='Start datetime')
|
70 |
+
|
71 |
+
# Display average time spent on each functional area in a table
|
72 |
+
st.markdown("### Average Time Spent on Each Functional Area")
|
73 |
+
average_time_spent_seconds.index = average_time_spent_seconds.index + 1
|
74 |
+
st.dataframe(average_time_spent_seconds)
|
75 |
+
|
76 |
+
# Check if selected_status is 'Failed' and grouped_filtered_scenarios length is less than or equal to 400
|
77 |
+
if selected_status != 'Passed':
|
78 |
+
# Create and display bar graph of errors by functional area
|
79 |
+
st.write(f"### Bar graph showing number of '{selected_status}' scenarios in each functional area:")
|
80 |
+
error_counts = grouped_filtered_scenarios['Functional area'].value_counts()
|
81 |
+
plt.figure(figsize=(12, 10))
|
82 |
+
bars = plt.bar(error_counts.index, error_counts.values)
|
83 |
+
plt.xlabel('Functional Area')
|
84 |
+
plt.ylabel('Number of Failures')
|
85 |
+
plt.title(f"Number of '{selected_status}' scenarios by Functional Area")
|
86 |
+
plt.xticks(rotation=45, ha='right', fontsize=10)
|
87 |
+
# Set y-axis limits and ticks for consistent interval of 1
|
88 |
+
y_max = max(error_counts.values) + 1
|
89 |
+
plt.ylim(0, y_max)
|
90 |
+
plt.yticks(range(0, y_max, 1), fontsize=10)
|
91 |
+
|
92 |
+
# Display individual numbers on y-axis
|
93 |
+
for bar in bars:
|
94 |
+
height = bar.get_height()
|
95 |
+
plt.text(bar.get_x() + bar.get_width() / 2, height, str(int(height)),
|
96 |
+
ha='center', va='bottom') # Reduce font size of individual numbers
|
97 |
+
|
98 |
+
plt.tight_layout() # Add this line to adjust layout
|
99 |
+
st.pyplot(plt)
|
100 |
+
pass
|
101 |
+
|
102 |
+
def multiple_main():
|
103 |
+
|
104 |
+
# Get the number of environments from the user
|
105 |
+
num_environments = st.number_input("Enter the number of environments", min_value=1, value=1, step=1)
|
106 |
+
|
107 |
+
# Initialize list to store uploaded dataframes
|
108 |
+
uploaded_dataframes = []
|
109 |
+
|
110 |
+
# Loop through the number of environments and create file uploaders
|
111 |
+
for i in range(num_environments):
|
112 |
+
uploaded_files = st.file_uploader(f"Upload CSV files for Environment {i + 1}", type="csv", accept_multiple_files=True)
|
113 |
+
|
114 |
+
for uploaded_file in uploaded_files:
|
115 |
+
# Preprocess the uploaded CSV file
|
116 |
+
data = preprocess_uploaded_file(uploaded_file)
|
117 |
+
|
118 |
+
|
119 |
+
# Extract environment name from filename
|
120 |
+
filename = uploaded_file.name
|
121 |
+
environment = filename.split('_Puppeteer')[0]
|
122 |
+
|
123 |
+
# Add environment column to the dataframe
|
124 |
+
data['Environment'] = environment
|
125 |
+
|
126 |
+
# Append the dataframe to the list
|
127 |
+
uploaded_dataframes.append(data)
|
128 |
+
|
129 |
+
# Check if any files were uploaded
|
130 |
+
if uploaded_dataframes:
|
131 |
+
# Perform analysis for uploaded data
|
132 |
+
perform_analysis(uploaded_dataframes)
|
133 |
+
else:
|
134 |
+
st.write("Please upload at least one CSV file.")
|
135 |
+
|
136 |
+
pass
|