whyumesh commited on
Commit
8dd4c1b
·
verified ·
1 Parent(s): 2819047

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -171
app.py DELETED
@@ -1,171 +0,0 @@
1
- import streamlit as st
2
- import os
3
- import threading
4
- import pyautogui
5
- import numpy as np
6
- import cv2
7
- import pyaudio
8
- import wave
9
- import keyboard
10
- from logic import analyze_with_audio_video
11
- from dotenv import load_dotenv
12
-
13
- load_dotenv()
14
-
15
- # Audio settings
16
- FORMAT = pyaudio.paInt16
17
- CHANNELS = 1
18
- RATE = 44100
19
- CHUNK = 1024
20
-
21
- # File paths
22
- audio_filename = "output.wav"
23
- video_filename = "output.mp4"
24
-
25
- # Initialize Streamlit
26
- st.set_page_config(page_title="T.A.P.A.S", page_icon=":camera:", layout="wide")
27
- st.title("T.A.P.A.S - Technical Assistance Platform for Advanced Solution")
28
-
29
- # Initialize session state for outputs
30
- if 'outputs' not in st.session_state or not isinstance(st.session_state.outputs, dict):
31
- st.session_state.outputs = {}
32
-
33
- if 'current_session' not in st.session_state:
34
- st.session_state.current_session = 'Session 1'
35
-
36
- def cleanup_files():
37
- """Deletes old files before a new recording session starts."""
38
- files_to_delete = [audio_filename, video_filename]
39
- for file in files_to_delete:
40
- if os.path.exists(file):
41
- os.remove(file)
42
- print(f"Deleted old file: {file}")
43
-
44
- def record_audio(filename, stop_event):
45
- audio = pyaudio.PyAudio()
46
- stream = audio.open(format=FORMAT, channels=CHANNELS,
47
- rate=RATE, input=True,
48
- frames_per_buffer=CHUNK)
49
- frames = []
50
-
51
- while not stop_event.is_set():
52
- data = stream.read(CHUNK)
53
- frames.append(data)
54
-
55
- stream.stop_stream()
56
- audio.terminate()
57
-
58
- with wave.open(filename, 'wb') as wf:
59
- wf.setnchannels(CHANNELS)
60
- wf.setsampwidth(audio.get_sample_size(FORMAT))
61
- wf.setframerate(RATE)
62
- wf.writeframes(b''.join(frames))
63
-
64
- def record_screen(filename, stop_event, mouse_positions):
65
- screen_size = pyautogui.size()
66
- fourcc = cv2.VideoWriter_fourcc(*"mp4v")
67
- out = cv2.VideoWriter(filename, fourcc, 8, (screen_size.width, screen_size.height))
68
-
69
- while not stop_event.is_set():
70
- img = pyautogui.screenshot()
71
- frame = np.array(img)
72
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
73
- # Capture mouse cursor
74
- x, y = pyautogui.position()
75
- cv2.circle(frame, (x, y), 10, (0, 255, 0), -1)
76
- out.write(frame)
77
- mouse_positions.append((x, y)) # Track mouse positions
78
-
79
- out.release()
80
-
81
- # def minimize_browser():
82
- # browser_window = None
83
- # for window in gw.getAllTitles():
84
- # if "chrome" in window.lower() or "firefox" in window.lower() or "edge" in window.lower():
85
- # browser_window = window
86
- # break
87
-
88
- # if browser_window:
89
- # app = Application().connect(title_re=browser_window)
90
- # app.window(title_re=browser_window).minimize()
91
- # else:
92
- # print("Browser window not found.")
93
-
94
- def main():
95
- stop_event = threading.Event()
96
-
97
- # Sidebar for session selection
98
- with st.sidebar:
99
- st.title("Sessions")
100
- session_name = st.text_input("New Session Name", "")
101
- if st.button("Start New Session") and session_name:
102
- st.session_state.current_session = session_name
103
- st.session_state.outputs[session_name] = []
104
- session_names = list(st.session_state.outputs.keys())
105
- if session_names:
106
- session_selection = st.selectbox("Choose a session", session_names)
107
- if session_selection:
108
- st.session_state.current_session = session_selection
109
-
110
- st.header(f"Current Session: {st.session_state.current_session}")
111
-
112
- # Initialize the current session's outputs if it doesn't exist
113
- if st.session_state.current_session not in st.session_state.outputs:
114
- st.session_state.outputs[st.session_state.current_session] = []
115
-
116
- col1, col2 = st.columns(2)
117
- with col1:
118
- start_button = st.button("Start")
119
- with col2:
120
- stop_button = st.button("Stop")
121
-
122
- if start_button:
123
- minimize_browser()
124
- cleanup_files()
125
-
126
- audio_thread = threading.Thread(target=record_audio, args=(audio_filename, stop_event))
127
- mouse_positions = []
128
- screen_thread = threading.Thread(target=record_screen, args=(video_filename, stop_event, mouse_positions))
129
-
130
- audio_thread.start()
131
- screen_thread.start()
132
-
133
- st.write("Recording started. Press 'q' or click 'Stop' to stop.")
134
-
135
- while True:
136
- if keyboard.is_pressed('q') or stop_button:
137
- stop_event.set()
138
- break
139
-
140
- audio_thread.join()
141
- screen_thread.join()
142
-
143
- if not os.path.exists(audio_filename):
144
- st.error("Audio file was not created!")
145
- return
146
- if not os.path.exists(video_filename):
147
- st.error("Video file was not created!")
148
- return
149
-
150
- # Analyze the video and audio files together
151
- result = analyze_with_audio_video(video_filename, audio_filename)
152
- st.session_state.outputs[st.session_state.current_session].append(result)
153
-
154
- # Text input for additional queries
155
- additional_query = st.text_input("Type your query here if you're not satisfied with the solution:")
156
-
157
- if st.button("Submit Query") and additional_query:
158
- # Process the additional query (this would involve sending it to the model)
159
- result = analyze_with_audio_video(video_filename, audio_filename)
160
- st.session_state.outputs[st.session_state.current_session].append(f"Query: {additional_query}\n{result}")
161
-
162
- # Display all outputs for the current session
163
- for output in st.session_state.outputs[st.session_state.current_session]:
164
- st.markdown(f"""
165
- <div style="background-color: darkgray; border-radius: 10px; padding: 10px; margin-bottom: 10px; color: black;">
166
- <i class="fas fa-check-circle"></i> {output}
167
- </div>
168
- """, unsafe_allow_html=True)
169
-
170
- if __name__ == "__main__":
171
- main()