MusIre commited on
Commit
527e644
·
1 Parent(s): ce7e2d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -20
app.py CHANGED
@@ -3,30 +3,42 @@ import gradio as gr # Add this import statement
3
 
4
  subprocess.run(["python", "-m", "pip", "install", "--upgrade", "pip"])
5
  subprocess.run(["pip", "install", "gradio", "--upgrade"])
6
- subprocess.run(["pip", "install", "datasets"])
7
  subprocess.run(["pip", "install", "transformers"])
8
- subprocess.run(["pip", "install", "librosa", "soundfile"])
9
  subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "-f", "https://download.pytorch.org/whl/torch_stable.html"])
10
 
 
 
 
11
  import gradio as gr
12
- from transformers import WhisperProcessor, WhisperForConditionalGeneration
13
- import numpy as np
14
 
15
- # Load model and processor
16
- processor = WhisperProcessor.from_pretrained("openai/whisper-small")
17
- model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
18
- forced_decoder_ids = processor.get_decoder_prompt_ids(language="italian", task="transcribe")
19
 
 
20
  def transcribe_audio(audio):
21
- # Assuming sampling_rate is known
22
- sampling_rate = 16000 # Change this to the actual sampling rate of your audio
23
-
24
- # Ensure to pass the sampling_rate parameter
25
- input_features = processor(audio, sampling_rate=sampling_rate, return_tensors="pt").input_features
26
- predicted_ids = model.generate(input_features)
27
- transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
28
- return transcription[0]
29
-
30
- # Create Gradio interface
31
- audio_input = gr.Audio()
32
- gr.Interface(fn=transcribe_audio, inputs=audio_input, outputs="text").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  subprocess.run(["python", "-m", "pip", "install", "--upgrade", "pip"])
5
  subprocess.run(["pip", "install", "gradio", "--upgrade"])
 
6
  subprocess.run(["pip", "install", "transformers"])
 
7
  subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "-f", "https://download.pytorch.org/whl/torch_stable.html"])
8
 
9
+ # Install necessary libraries
10
+ !pip install gradio torch torchaudio
11
+
12
  import gradio as gr
13
+ import torchaudio
14
+ from transformers import pipeline
15
 
16
+ # Load the Whispy/Whisper Italian ASR model
17
+ whisper_italian_asr = pipeline("whisper-italian")
 
 
18
 
19
+ # Define the ASR function
20
  def transcribe_audio(audio):
21
+ # Save the audio file
22
+ torchaudio.save("user_audio.wav", audio.squeeze().numpy(), 16000)
23
+
24
+ # Load the saved audio file
25
+ user_audio, _ = torchaudio.load("user_audio.wav", normalize=True)
26
+
27
+ # Perform ASR using the Whispy/Whisper Italian model
28
+ transcription = whisper_italian_asr(user_audio.numpy())
29
+
30
+ return transcription[0]["transcription"]
31
+
32
+ # Create the Gradio interface
33
+ audio_input = gr.Audio(preprocess=torchaudio.transforms.Resample(orig_freq=44100, new_freq=16000))
34
+
35
+ iface = gr.Interface(
36
+ fn=transcribe_audio,
37
+ inputs=audio_input,
38
+ outputs="text",
39
+ live=True,
40
+ interpretation="default"
41
+ )
42
+
43
+ # Launch the Gradio app
44
+ iface.launch(share=True)