tensorkelechi commited on
Commit
c76caf9
·
verified ·
1 Parent(s): 48266b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -33,6 +33,7 @@ def youtube_video_downloader(url):
33
 
34
  def audio_extraction(video_file, output_format):
35
  temp_filename = video_file.name
 
36
  video_path = f"{temp_filename}"
37
  audio = extract_audio(
38
  input_path=os.fspath(video_file), output_path=f"{str(video_path)[:-4]}.mp3", output_format=f"{output_format}"
@@ -51,13 +52,11 @@ def audio_processing(mp3_audio):
51
  def load_asr_model():
52
  asr_model = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3")
53
  return asr_model
54
-
 
 
55
  def transcriber_pass(processed_audio):
56
- # stime = time.time()
57
- transcriber_model = load_asr_model()
58
  text_extract = transcriber_model(processed_audio)
59
- # et = time.time()
60
- # run_time = et - stime
61
  return text_extract['text']
62
 
63
  def generate_ai_summary(transcript):
@@ -80,7 +79,7 @@ with youtube_url_tab:
80
  if url:
81
  if st.button("Transcribe", key="yturl"):
82
  with st.spinner("Transcribing..."):
83
- audio = audio_extraction(os.fspath(yt_video), "mp3")
84
  audio = audio_processing(audio)
85
  ytvideo_transcript = transcriber_pass(audio)
86
  st.success(f"Transcription successful")
@@ -106,7 +105,7 @@ with file_select_tab:
106
  if video_file:
107
  if st.button("Transcribe", key="vidfile"):
108
  with st.spinner("Transcribing..."):
109
- audio = audio_extraction(video_file, "mp3")
110
  audio = audio_processing(audio)
111
  video_transcript = transcriber_pass(audio)
112
  st.success(f"Transcription successful")
 
33
 
34
  def audio_extraction(video_file, output_format):
35
  temp_filename = video_file.name
36
+
37
  video_path = f"{temp_filename}"
38
  audio = extract_audio(
39
  input_path=os.fspath(video_file), output_path=f"{str(video_path)[:-4]}.mp3", output_format=f"{output_format}"
 
52
  def load_asr_model():
53
  asr_model = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3")
54
  return asr_model
55
+
56
+ transcriber_model = load_asr_model()
57
+
58
  def transcriber_pass(processed_audio):
 
 
59
  text_extract = transcriber_model(processed_audio)
 
 
60
  return text_extract['text']
61
 
62
  def generate_ai_summary(transcript):
 
79
  if url:
80
  if st.button("Transcribe", key="yturl"):
81
  with st.spinner("Transcribing..."):
82
+ audio = audio_extraction(yt_video, "mp3")
83
  audio = audio_processing(audio)
84
  ytvideo_transcript = transcriber_pass(audio)
85
  st.success(f"Transcription successful")
 
105
  if video_file:
106
  if st.button("Transcribe", key="vidfile"):
107
  with st.spinner("Transcribing..."):
108
+ audio = audio_extraction(os.fspath(video_file), "mp3")
109
  audio = audio_processing(audio)
110
  video_transcript = transcriber_pass(audio)
111
  st.success(f"Transcription successful")