Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,8 @@ st.set_page_config(
|
|
18 |
page_title="VidText"
|
19 |
)
|
20 |
|
21 |
-
def youtube_video_downloader
|
|
|
22 |
yt_vid = YouTube(url)
|
23 |
title = yt_vid.title
|
24 |
vid_dld = (
|
@@ -32,11 +33,10 @@ def youtube_video_downloader(url):
|
|
32 |
|
33 |
|
34 |
def audio_extraction(video_file, output_format):
|
35 |
-
|
36 |
-
|
37 |
audio = extract_audio(
|
38 |
-
input_path=os.
|
39 |
-
fspath(video_file), output_path=f"{str(video_file)[:-4]}.mp3", output_format=f"{output_format}"
|
40 |
)
|
41 |
return audio
|
42 |
|
@@ -53,7 +53,7 @@ def load_asr_model():
|
|
53 |
asr_model = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3")
|
54 |
return asr_model
|
55 |
|
56 |
-
def
|
57 |
# stime = time.time()
|
58 |
transcriber_model = load_asr_model()
|
59 |
text_extract = transcriber_model(processed_audio)
|
@@ -83,7 +83,7 @@ with youtube_url_tab:
|
|
83 |
with st.spinner("Transcribing..."):
|
84 |
audio = audio_extraction(os.fspath(yt_video), "mp3")
|
85 |
audio = audio_processing(audio)
|
86 |
-
ytvideo_transcript =
|
87 |
st.success(f"Transcription successful")
|
88 |
st.write(ytvideo_transcript)
|
89 |
# st.write(f'Completed in {run_time}')
|
@@ -109,7 +109,7 @@ with file_select_tab:
|
|
109 |
with st.spinner("Transcribing..."):
|
110 |
audio = audio_extraction(video_file, "mp3")
|
111 |
audio = audio_processing(audio)
|
112 |
-
video_transcript =
|
113 |
st.success(f"Transcription successful")
|
114 |
st.write(video_transcript)
|
115 |
|
|
|
18 |
page_title="VidText"
|
19 |
)
|
20 |
|
21 |
+
def youtube_video_downloader
|
22 |
+
(url):
|
23 |
yt_vid = YouTube(url)
|
24 |
title = yt_vid.title
|
25 |
vid_dld = (
|
|
|
33 |
|
34 |
|
35 |
def audio_extraction(video_file, output_format):
|
36 |
+
temp_filename = video_file.name
|
37 |
+
video_path = f"{temp_filename}"
|
38 |
audio = extract_audio(
|
39 |
+
input_path=os.fspath(video_file), output_path=f"{str(video_path)[:-4]}.mp3", output_format=f"{output_format}"
|
|
|
40 |
)
|
41 |
return audio
|
42 |
|
|
|
53 |
asr_model = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3")
|
54 |
return asr_model
|
55 |
|
56 |
+
def transcriber_pass(processed_audio):
|
57 |
# stime = time.time()
|
58 |
transcriber_model = load_asr_model()
|
59 |
text_extract = transcriber_model(processed_audio)
|
|
|
83 |
with st.spinner("Transcribing..."):
|
84 |
audio = audio_extraction(os.fspath(yt_video), "mp3")
|
85 |
audio = audio_processing(audio)
|
86 |
+
ytvideo_transcript = transcriber_pass(audio)
|
87 |
st.success(f"Transcription successful")
|
88 |
st.write(ytvideo_transcript)
|
89 |
# st.write(f'Completed in {run_time}')
|
|
|
109 |
with st.spinner("Transcribing..."):
|
110 |
audio = audio_extraction(video_file, "mp3")
|
111 |
audio = audio_processing(audio)
|
112 |
+
video_transcript = transcriber_pass(audio)
|
113 |
st.success(f"Transcription successful")
|
114 |
st.write(video_transcript)
|
115 |
|