duelmas commited on
Commit
be56bb2
·
1 Parent(s): 29a7bdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -4
app.py CHANGED
@@ -1,7 +1,40 @@
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ import cv2
4
+ # Load the video file
5
+ cap = cv2.VideoCapture('video_file.mp4')
6
+ # Check if the video has audio
7
+ if cap.get(cv2.CAP_PROP_AUDIO_STATUS):
8
+ # Read the video frames
9
+ while True:
10
+ ret, frame = cap.read()
11
+ # Convert the frame to grayscale and apply thresholding
12
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
13
+ _, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)
14
 
15
+ # Display the resulting image
16
+ imshow("Thresholded Image", thresh)
17
+ # Check if the user presses the 'q' key
18
+ if cv2.waitKey(1) & 0xFF == ord('q'):
19
+ # Break out of the loop
20
+ break
21
+ else:
22
+ print("No Audio Found")
23
+
24
+ # Release the video capture
25
+ cap.release()
26
+
27
+ # Load the audio file
28
+ audio, sr = librosa.load('audio_file.wav')
29
+
30
+ # Generate a new audio file with the same duration as the video
31
+ new_audio = np.zeros((len(frame), sr))
32
+ for i in range(len(frame)):
33
+ # Calculate the time stamp for each pixel in the frame
34
+ t = (i * framerate) + start_time
35
+
36
+ # Add the corresponding value from the audio signal to the new audio array
37
+ new_audio[i] = audio[int(t)]
38
+
39
+ # Save the new audio file
40
+ librosa.save('output_file.wav', new_audio, sr)