Zeimoto commited on
Commit
eaaa4aa
1 Parent(s): 5cc4f06
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -1,6 +1,9 @@
1
  import streamlit as st
2
  from st_audiorec import st_audiorec
3
 
 
 
 
4
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
5
  from datasets import load_dataset
6
  import torch
@@ -12,9 +15,10 @@ audio_transcription: str = None
12
  def main ():
13
 
14
  init_model()
 
15
  # x = st.slider('Select a value')
16
  # st.write(x, 'squared is', x * x)
17
-
18
  wav_audio_data = st_audiorec()
19
 
20
  if wav_audio_data is not None:
@@ -23,7 +27,7 @@ def main ():
23
  dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation")
24
  sample = dataset[0]["audio"]
25
  st.write('Sample:', sample)
26
-
27
  async def init_model ():
28
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
29
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
 
1
  import streamlit as st
2
  from st_audiorec import st_audiorec
3
 
4
+ import librosa
5
+ import soundfile
6
+
7
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
8
  from datasets import load_dataset
9
  import torch
 
15
  def main ():
16
 
17
  init_model()
18
+ print('Init model successful')
19
  # x = st.slider('Select a value')
20
  # st.write(x, 'squared is', x * x)
21
+ """
22
  wav_audio_data = st_audiorec()
23
 
24
  if wav_audio_data is not None:
 
27
  dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation")
28
  sample = dataset[0]["audio"]
29
  st.write('Sample:', sample)
30
+ """
31
  async def init_model ():
32
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
33
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32