File size: 1,463 Bytes
1c3348e
 
 
 
 
 
 
 
 
d1dfef4
fcc37fb
 
 
d1dfef4
 
1c3348e
d1dfef4
 
1c3348e
 
d1dfef4
1c3348e
 
d1dfef4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c3348e
2432ca4
460d74e
2432ca4
1c3348e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import gradio as gr
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import librosa

processor = WhisperProcessor.from_pretrained("Neurai/NeuraSpeech_WhisperBase")
model = WhisperForConditionalGeneration.from_pretrained("Neurai/NeuraSpeech_WhisperBase")
forced_decoder_ids = processor.get_decoder_prompt_ids(language="fa", task="transcribe")


def transcribe(audio):
    if audio is None:
        return "No audio input provided. Please record or upload an audio file."

    sample_rate, array = audio
    sr = 16000
    array = librosa.to_mono(array)
    array = librosa.resample(array, orig_sr=sample_rate, target_sr=16000)
    input_features = processor(array, sampling_rate=sr, return_tensors="pt").input_features

    # generate token ids
    predicted_ids = model.generate(input_features)
    # decode token ids to text
    transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
    return transcription


# input_audio = gr.Audio(
#     sources=["microphone"],
#     waveform_options=gr.WaveformOptions(
#         waveform_color="#01C6FF",
#         waveform_progress_color="#0066B4",
#         skip_length=2,
#         show_controls=True,
#     ),
# )
# demo = gr.Interface(
#     fn=reverse_audio,
#     inputs=input_audio,
#     outputs="text"
# )
demo = gr.Interface(
    fn=transcribe,
    inputs=[gr.Audio(sources=["microphone"])],
    outputs="text"
)

if __name__ == "__main__":
    demo.launch()