whyumesh commited on
Commit
2667557
·
verified ·
1 Parent(s): 7f145ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +171 -173
app.py CHANGED
@@ -1,173 +1,171 @@
1
- import streamlit as st
2
- import os
3
- import threading
4
- import pyautogui
5
- import numpy as np
6
- import cv2
7
- import pyaudio
8
- import wave
9
- import keyboard
10
- from pywinauto.application import Application
11
- import pygetwindow as gw
12
- from logic import analyze_with_audio_video
13
- from dotenv import load_dotenv
14
-
15
- load_dotenv()
16
-
17
- # Audio settings
18
- FORMAT = pyaudio.paInt16
19
- CHANNELS = 1
20
- RATE = 44100
21
- CHUNK = 1024
22
-
23
- # File paths
24
- audio_filename = "output.wav"
25
- video_filename = "output.mp4"
26
-
27
- # Initialize Streamlit
28
- st.set_page_config(page_title="T.A.P.A.S", page_icon=":camera:", layout="wide")
29
- st.title("T.A.P.A.S - Technical Assistance Platform for Advanced Solution")
30
-
31
- # Initialize session state for outputs
32
- if 'outputs' not in st.session_state or not isinstance(st.session_state.outputs, dict):
33
- st.session_state.outputs = {}
34
-
35
- if 'current_session' not in st.session_state:
36
- st.session_state.current_session = 'Session 1'
37
-
38
- def cleanup_files():
39
- """Deletes old files before a new recording session starts."""
40
- files_to_delete = [audio_filename, video_filename]
41
- for file in files_to_delete:
42
- if os.path.exists(file):
43
- os.remove(file)
44
- print(f"Deleted old file: {file}")
45
-
46
- def record_audio(filename, stop_event):
47
- audio = pyaudio.PyAudio()
48
- stream = audio.open(format=FORMAT, channels=CHANNELS,
49
- rate=RATE, input=True,
50
- frames_per_buffer=CHUNK)
51
- frames = []
52
-
53
- while not stop_event.is_set():
54
- data = stream.read(CHUNK)
55
- frames.append(data)
56
-
57
- stream.stop_stream()
58
- audio.terminate()
59
-
60
- with wave.open(filename, 'wb') as wf:
61
- wf.setnchannels(CHANNELS)
62
- wf.setsampwidth(audio.get_sample_size(FORMAT))
63
- wf.setframerate(RATE)
64
- wf.writeframes(b''.join(frames))
65
-
66
- def record_screen(filename, stop_event, mouse_positions):
67
- screen_size = pyautogui.size()
68
- fourcc = cv2.VideoWriter_fourcc(*"mp4v")
69
- out = cv2.VideoWriter(filename, fourcc, 8, (screen_size.width, screen_size.height))
70
-
71
- while not stop_event.is_set():
72
- img = pyautogui.screenshot()
73
- frame = np.array(img)
74
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
75
- # Capture mouse cursor
76
- x, y = pyautogui.position()
77
- cv2.circle(frame, (x, y), 10, (0, 255, 0), -1)
78
- out.write(frame)
79
- mouse_positions.append((x, y)) # Track mouse positions
80
-
81
- out.release()
82
-
83
- def minimize_browser():
84
- browser_window = None
85
- for window in gw.getAllTitles():
86
- if "chrome" in window.lower() or "firefox" in window.lower() or "edge" in window.lower():
87
- browser_window = window
88
- break
89
-
90
- if browser_window:
91
- app = Application().connect(title_re=browser_window)
92
- app.window(title_re=browser_window).minimize()
93
- else:
94
- print("Browser window not found.")
95
-
96
- def main():
97
- stop_event = threading.Event()
98
-
99
- # Sidebar for session selection
100
- with st.sidebar:
101
- st.title("Sessions")
102
- session_name = st.text_input("New Session Name", "")
103
- if st.button("Start New Session") and session_name:
104
- st.session_state.current_session = session_name
105
- st.session_state.outputs[session_name] = []
106
- session_names = list(st.session_state.outputs.keys())
107
- if session_names:
108
- session_selection = st.selectbox("Choose a session", session_names)
109
- if session_selection:
110
- st.session_state.current_session = session_selection
111
-
112
- st.header(f"Current Session: {st.session_state.current_session}")
113
-
114
- # Initialize the current session's outputs if it doesn't exist
115
- if st.session_state.current_session not in st.session_state.outputs:
116
- st.session_state.outputs[st.session_state.current_session] = []
117
-
118
- col1, col2 = st.columns(2)
119
- with col1:
120
- start_button = st.button("Start")
121
- with col2:
122
- stop_button = st.button("Stop")
123
-
124
- if start_button:
125
- minimize_browser()
126
- cleanup_files()
127
-
128
- audio_thread = threading.Thread(target=record_audio, args=(audio_filename, stop_event))
129
- mouse_positions = []
130
- screen_thread = threading.Thread(target=record_screen, args=(video_filename, stop_event, mouse_positions))
131
-
132
- audio_thread.start()
133
- screen_thread.start()
134
-
135
- st.write("Recording started. Press 'q' or click 'Stop' to stop.")
136
-
137
- while True:
138
- if keyboard.is_pressed('q') or stop_button:
139
- stop_event.set()
140
- break
141
-
142
- audio_thread.join()
143
- screen_thread.join()
144
-
145
- if not os.path.exists(audio_filename):
146
- st.error("Audio file was not created!")
147
- return
148
- if not os.path.exists(video_filename):
149
- st.error("Video file was not created!")
150
- return
151
-
152
- # Analyze the video and audio files together
153
- result = analyze_with_audio_video(video_filename, audio_filename)
154
- st.session_state.outputs[st.session_state.current_session].append(result)
155
-
156
- # Text input for additional queries
157
- additional_query = st.text_input("Type your query here if you're not satisfied with the solution:")
158
-
159
- if st.button("Submit Query") and additional_query:
160
- # Process the additional query (this would involve sending it to the model)
161
- result = analyze_with_audio_video(video_filename, audio_filename)
162
- st.session_state.outputs[st.session_state.current_session].append(f"Query: {additional_query}\n{result}")
163
-
164
- # Display all outputs for the current session
165
- for output in st.session_state.outputs[st.session_state.current_session]:
166
- st.markdown(f"""
167
- <div style="background-color: darkgray; border-radius: 10px; padding: 10px; margin-bottom: 10px; color: black;">
168
- <i class="fas fa-check-circle"></i> {output}
169
- </div>
170
- """, unsafe_allow_html=True)
171
-
172
- if __name__ == "__main__":
173
- main()
 
1
+ import streamlit as st
2
+ import os
3
+ import threading
4
+ import pyautogui
5
+ import numpy as np
6
+ import cv2
7
+ import pyaudio
8
+ import wave
9
+ import keyboard
10
+ from logic import analyze_with_audio_video
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+ # Audio settings
16
+ FORMAT = pyaudio.paInt16
17
+ CHANNELS = 1
18
+ RATE = 44100
19
+ CHUNK = 1024
20
+
21
+ # File paths
22
+ audio_filename = "output.wav"
23
+ video_filename = "output.mp4"
24
+
25
+ # Initialize Streamlit
26
+ st.set_page_config(page_title="T.A.P.A.S", page_icon=":camera:", layout="wide")
27
+ st.title("T.A.P.A.S - Technical Assistance Platform for Advanced Solution")
28
+
29
+ # Initialize session state for outputs
30
+ if 'outputs' not in st.session_state or not isinstance(st.session_state.outputs, dict):
31
+ st.session_state.outputs = {}
32
+
33
+ if 'current_session' not in st.session_state:
34
+ st.session_state.current_session = 'Session 1'
35
+
36
+ def cleanup_files():
37
+ """Deletes old files before a new recording session starts."""
38
+ files_to_delete = [audio_filename, video_filename]
39
+ for file in files_to_delete:
40
+ if os.path.exists(file):
41
+ os.remove(file)
42
+ print(f"Deleted old file: {file}")
43
+
44
+ def record_audio(filename, stop_event):
45
+ audio = pyaudio.PyAudio()
46
+ stream = audio.open(format=FORMAT, channels=CHANNELS,
47
+ rate=RATE, input=True,
48
+ frames_per_buffer=CHUNK)
49
+ frames = []
50
+
51
+ while not stop_event.is_set():
52
+ data = stream.read(CHUNK)
53
+ frames.append(data)
54
+
55
+ stream.stop_stream()
56
+ audio.terminate()
57
+
58
+ with wave.open(filename, 'wb') as wf:
59
+ wf.setnchannels(CHANNELS)
60
+ wf.setsampwidth(audio.get_sample_size(FORMAT))
61
+ wf.setframerate(RATE)
62
+ wf.writeframes(b''.join(frames))
63
+
64
+ def record_screen(filename, stop_event, mouse_positions):
65
+ screen_size = pyautogui.size()
66
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
67
+ out = cv2.VideoWriter(filename, fourcc, 8, (screen_size.width, screen_size.height))
68
+
69
+ while not stop_event.is_set():
70
+ img = pyautogui.screenshot()
71
+ frame = np.array(img)
72
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
73
+ # Capture mouse cursor
74
+ x, y = pyautogui.position()
75
+ cv2.circle(frame, (x, y), 10, (0, 255, 0), -1)
76
+ out.write(frame)
77
+ mouse_positions.append((x, y)) # Track mouse positions
78
+
79
+ out.release()
80
+
81
+ # def minimize_browser():
82
+ # browser_window = None
83
+ # for window in gw.getAllTitles():
84
+ # if "chrome" in window.lower() or "firefox" in window.lower() or "edge" in window.lower():
85
+ # browser_window = window
86
+ # break
87
+
88
+ # if browser_window:
89
+ # app = Application().connect(title_re=browser_window)
90
+ # app.window(title_re=browser_window).minimize()
91
+ # else:
92
+ # print("Browser window not found.")
93
+
94
+ def main():
95
+ stop_event = threading.Event()
96
+
97
+ # Sidebar for session selection
98
+ with st.sidebar:
99
+ st.title("Sessions")
100
+ session_name = st.text_input("New Session Name", "")
101
+ if st.button("Start New Session") and session_name:
102
+ st.session_state.current_session = session_name
103
+ st.session_state.outputs[session_name] = []
104
+ session_names = list(st.session_state.outputs.keys())
105
+ if session_names:
106
+ session_selection = st.selectbox("Choose a session", session_names)
107
+ if session_selection:
108
+ st.session_state.current_session = session_selection
109
+
110
+ st.header(f"Current Session: {st.session_state.current_session}")
111
+
112
+ # Initialize the current session's outputs if it doesn't exist
113
+ if st.session_state.current_session not in st.session_state.outputs:
114
+ st.session_state.outputs[st.session_state.current_session] = []
115
+
116
+ col1, col2 = st.columns(2)
117
+ with col1:
118
+ start_button = st.button("Start")
119
+ with col2:
120
+ stop_button = st.button("Stop")
121
+
122
+ if start_button:
123
+ minimize_browser()
124
+ cleanup_files()
125
+
126
+ audio_thread = threading.Thread(target=record_audio, args=(audio_filename, stop_event))
127
+ mouse_positions = []
128
+ screen_thread = threading.Thread(target=record_screen, args=(video_filename, stop_event, mouse_positions))
129
+
130
+ audio_thread.start()
131
+ screen_thread.start()
132
+
133
+ st.write("Recording started. Press 'q' or click 'Stop' to stop.")
134
+
135
+ while True:
136
+ if keyboard.is_pressed('q') or stop_button:
137
+ stop_event.set()
138
+ break
139
+
140
+ audio_thread.join()
141
+ screen_thread.join()
142
+
143
+ if not os.path.exists(audio_filename):
144
+ st.error("Audio file was not created!")
145
+ return
146
+ if not os.path.exists(video_filename):
147
+ st.error("Video file was not created!")
148
+ return
149
+
150
+ # Analyze the video and audio files together
151
+ result = analyze_with_audio_video(video_filename, audio_filename)
152
+ st.session_state.outputs[st.session_state.current_session].append(result)
153
+
154
+ # Text input for additional queries
155
+ additional_query = st.text_input("Type your query here if you're not satisfied with the solution:")
156
+
157
+ if st.button("Submit Query") and additional_query:
158
+ # Process the additional query (this would involve sending it to the model)
159
+ result = analyze_with_audio_video(video_filename, audio_filename)
160
+ st.session_state.outputs[st.session_state.current_session].append(f"Query: {additional_query}\n{result}")
161
+
162
+ # Display all outputs for the current session
163
+ for output in st.session_state.outputs[st.session_state.current_session]:
164
+ st.markdown(f"""
165
+ <div style="background-color: darkgray; border-radius: 10px; padding: 10px; margin-bottom: 10px; color: black;">
166
+ <i class="fas fa-check-circle"></i> {output}
167
+ </div>
168
+ """, unsafe_allow_html=True)
169
+
170
+ if __name__ == "__main__":
171
+ main()