BananaSauce commited on
Commit
3e5674a
·
verified ·
1 Parent(s): 19a7b89

updated chart midnight issue

Browse files
Files changed (1) hide show
  1. weekly.py +27 -6
weekly.py CHANGED
@@ -2,6 +2,7 @@ import pandas as pd
2
  import streamlit as st
3
  import plotly.graph_objects as go
4
  from pre import preprocess_uploaded_file
 
5
 
6
  def generate_weekly_report(uploaded_files):
7
  if not uploaded_files:
@@ -11,6 +12,21 @@ def generate_weekly_report(uploaded_files):
11
  combined_data = pd.DataFrame()
12
  for uploaded_file in uploaded_files:
13
  data = preprocess_uploaded_file(uploaded_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  combined_data = pd.concat([combined_data, data], ignore_index=True)
15
 
16
  if combined_data.empty:
@@ -23,7 +39,8 @@ def generate_weekly_report(uploaded_files):
23
  st.warning("No failed scenarios found in the uploaded data.")
24
  return
25
 
26
- failed_data['Date'] = pd.to_datetime(failed_data['Start datetime']).dt.date
 
27
 
28
  # UI for selecting environments and functional areas
29
  environments = combined_data['Environment'].unique()
@@ -31,7 +48,7 @@ def generate_weekly_report(uploaded_files):
31
 
32
  all_functional_areas = failed_data['Functional area'].unique()
33
  area_choice = st.radio("Choose Functional Areas to Display", ['All', 'Select Functional Areas'])
34
-
35
  if area_choice == 'Select Functional Areas':
36
  selected_functional_areas = st.multiselect("Select Functional Areas", options=all_functional_areas)
37
  if not selected_functional_areas:
@@ -61,6 +78,10 @@ def generate_weekly_report(uploaded_files):
61
  # Group by Date, Environment, and Functional area
62
  daily_failures = filtered_data.groupby(['Date', 'Environment', 'Functional area']).size().unstack(level=[1, 2], fill_value=0)
63
 
 
 
 
 
64
  # Y-axis scaling option
65
  y_axis_scale = st.radio("Y-axis Scaling", ["Fixed", "Dynamic"])
66
 
@@ -98,16 +119,16 @@ def generate_weekly_report(uploaded_files):
98
 
99
  # Add interactivity for scenario details
100
  st.write("Select a date and environment to see detailed scenario information:")
101
-
102
  selected_date = st.date_input("Select a date", min_value=start_date, max_value=end_date, value=start_date)
103
  selected_env = st.selectbox("Select an environment", options=selected_environments)
104
 
105
  if selected_date and selected_env:
106
  st.write(f"### Detailed Scenarios for {selected_date} - {selected_env}")
107
-
108
  day_scenarios = filtered_data[(filtered_data['Date'] == selected_date) &
109
  (filtered_data['Environment'] == selected_env)]
110
-
111
  if not day_scenarios.empty:
112
  st.dataframe(day_scenarios[['Functional area', 'Scenario name', 'Error message', 'Time spent(m:s)']])
113
  else:
@@ -118,7 +139,7 @@ def generate_weekly_report(uploaded_files):
118
  for env in selected_environments:
119
  env_data = filtered_data[filtered_data['Environment'] == env]
120
  total_failures = len(env_data)
121
-
122
  if len(daily_failures) > 0:
123
  avg_daily_failures = total_failures / len(daily_failures)
124
  if env in daily_failures.columns.levels[0]:
 
2
  import streamlit as st
3
  import plotly.graph_objects as go
4
  from pre import preprocess_uploaded_file
5
+ from datetime import datetime
6
 
7
  def generate_weekly_report(uploaded_files):
8
  if not uploaded_files:
 
12
  combined_data = pd.DataFrame()
13
  for uploaded_file in uploaded_files:
14
  data = preprocess_uploaded_file(uploaded_file)
15
+ # Extract date and time from filename
16
+ filename_parts = uploaded_file.name.split('_')
17
+ if len(filename_parts) >= 4:
18
+ file_datetime_str = f"{filename_parts[-2]}_{filename_parts[-1].split('.')[0]}"
19
+ try:
20
+ file_datetime = datetime.strptime(file_datetime_str, '%Y%m%d_%H%M%S')
21
+ file_date = file_datetime.date()
22
+ except ValueError:
23
+ st.error(f"Invalid date format in filename: {uploaded_file.name}")
24
+ return
25
+ else:
26
+ st.error(f"Filename does not contain expected date format: {uploaded_file.name}")
27
+ return
28
+
29
+ data['File Date'] = file_date
30
  combined_data = pd.concat([combined_data, data], ignore_index=True)
31
 
32
  if combined_data.empty:
 
39
  st.warning("No failed scenarios found in the uploaded data.")
40
  return
41
 
42
+ # Use 'File Date' for grouping
43
+ failed_data['Date'] = failed_data['File Date']
44
 
45
  # UI for selecting environments and functional areas
46
  environments = combined_data['Environment'].unique()
 
48
 
49
  all_functional_areas = failed_data['Functional area'].unique()
50
  area_choice = st.radio("Choose Functional Areas to Display", ['All', 'Select Functional Areas'])
51
+
52
  if area_choice == 'Select Functional Areas':
53
  selected_functional_areas = st.multiselect("Select Functional Areas", options=all_functional_areas)
54
  if not selected_functional_areas:
 
78
  # Group by Date, Environment, and Functional area
79
  daily_failures = filtered_data.groupby(['Date', 'Environment', 'Functional area']).size().unstack(level=[1, 2], fill_value=0)
80
 
81
+ # Ensure we have a continuous date range
82
+ date_range = pd.date_range(start=start_date, end=end_date)
83
+ daily_failures = daily_failures.reindex(date_range, fill_value=0)
84
+
85
  # Y-axis scaling option
86
  y_axis_scale = st.radio("Y-axis Scaling", ["Fixed", "Dynamic"])
87
 
 
119
 
120
  # Add interactivity for scenario details
121
  st.write("Select a date and environment to see detailed scenario information:")
122
+
123
  selected_date = st.date_input("Select a date", min_value=start_date, max_value=end_date, value=start_date)
124
  selected_env = st.selectbox("Select an environment", options=selected_environments)
125
 
126
  if selected_date and selected_env:
127
  st.write(f"### Detailed Scenarios for {selected_date} - {selected_env}")
128
+
129
  day_scenarios = filtered_data[(filtered_data['Date'] == selected_date) &
130
  (filtered_data['Environment'] == selected_env)]
131
+
132
  if not day_scenarios.empty:
133
  st.dataframe(day_scenarios[['Functional area', 'Scenario name', 'Error message', 'Time spent(m:s)']])
134
  else:
 
139
  for env in selected_environments:
140
  env_data = filtered_data[filtered_data['Environment'] == env]
141
  total_failures = len(env_data)
142
+
143
  if len(daily_failures) > 0:
144
  avg_daily_failures = total_failures / len(daily_failures)
145
  if env in daily_failures.columns.levels[0]: