File size: 6,088 Bytes
f40e092 1c6d288 f40e092 1c6d288 f40e092 d089c2e 1c6d288 f40e092 79b926e f40e092 9993123 f40e092 9993123 f40e092 6d8d501 f40e092 b3ed5a5 71086b1 f40e092 b3ed5a5 f40e092 79b926e 9dc7dd3 9993123 f40e092 d089c2e a1036d7 d089c2e f40e092 d089c2e 86f5d25 f40e092 d089c2e f40e092 79b926e f40e092 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import gradio as gr
import whisper
model = whisper.load_model("small")
current_size = 'small'
def change_model(size):
if size == current_size:
return
model = whisper.load_model(size)
current_size = size
def inference(audio):
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device)
_, probs = model.detect_language(mel)
options = whisper.DecodingOptions(fp16 = False)
result = whisper.decode(model, mel, options)
print(result.text)
return result.text
title="Whisper OpenAI, deployed with jthteo"
description="Whisper is a general-purpose speech recognition model into English. It has been trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification."
css = """
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: black;
background: black;
}
input[type='range'] {
accent-color: black;
}
.dark input[type='range'] {
accent-color: #dfdfdf;
}
.container {
max-width: 800px;
margin: auto;
padding-top: 1.5rem;
}
.details:hover {
text-decoration: underline;
}
.gr-button {
white-space: nowrap;
}
.gr-button:focus {
border-color: rgb(147 197 253 / var(--tw-border-opacity));
outline: none;
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
--tw-border-opacity: 1;
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
--tw-ring-opacity: .5;
}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
.prompt h4{
margin: 1.25em 0 .25em 0;
font-weight: bold;
font-size: 115%;
}
"""
block = gr.Blocks(css=css)
with block:
gr.HTML(
"""
<div style="text-align: center; max-width: 800px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<svg
width="0.65em"
height="0.65em"
viewBox="0 0 115 115"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<circle cx="62" cy="45" r="36" stroke="blue" stroke-width="4" fill="blue" />
<polygon points="40, 30, 84, 30, 62, 69" style="fill:red;stroke:red;stroke-width:5;" />
</svg>
<h1 style="font-weight: 900; margin-bottom: 7px; color:red;">
Whisper
</h1>
<svg
width="0.65em"
height="0.65em"
viewBox="0 0 115 115"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<circle cx="62" cy="45" r="36" stroke="blue" stroke-width="4" fill="blue" />
<polygon points="40, 30, 84, 30, 62, 69" style="fill:red;stroke:red;stroke-width:5;" />
</svg>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
Whisper is a general-purpose speech recognition model. It has been trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. </p>
<p>This is a fork by JTHTEO.</p>
<p>The sizes of the different Whisper models can be found in this <a href="https://github.com/openai/whisper/blob/main/model-card.md">Model Card</a>. </p>
</p>
</div>
"""
)
with gr.Group():
with gr.Box():
wmodel = gr.Radio(
choices=["tiny", "base", "small", "medium", "large", "small.en", "medium.en"],
label="Model used",
value="small")
with gr.Row().style(mobile_collapse=False, equal_height=True):
audio = gr.Audio(
label="Input Audio",
show_label=False,
source="microphone",
type="filepath"
)
btn = gr.Button("Transcribe")
text = gr.Textbox(show_label=False)
##events###
wmodel.change(change_model, inputs=[wmodel], outputs=[])
btn.click(inference, inputs=[audio], outputs=[text],api_name="audio_whisper")
##footer###
gr.HTML('''
<div class="footer">
<p>Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> - Gradio Demo by 🤗 Hugging Face, this is a fork by JTHTEO
</p>
</div>
''')
block.launch() |