Spaces:
Sleeping
Sleeping
import streamlit as st | |
import whisper | |
import os | |
# Load the Whisper model | |
model = whisper.load_model("base") # You can choose "tiny", "small", "medium", or "large" | |
def transcribe_audio(audio_file, language): | |
# Save the uploaded audio file to a temporary location | |
temp_file_path = "temp_audio.wav" | |
with open(temp_file_path, "wb") as f: | |
f.write(audio_file.getbuffer()) | |
# Transcribe the audio | |
result = model.transcribe(temp_file_path, language=language) | |
os.remove(temp_file_path) # Clean up the temporary file | |
return result['text'] | |
# Streamlit app layout | |
st.title("Audio Transcription App") | |
st.write("Upload an audio file to transcribe it using Whisper.") | |
# File uploader for audio files | |
audio_file = st.file_uploader("Choose an audio file", type=["wav", "mp3", "m4a"]) | |
# Language selection | |
language = st.selectbox("Select Language", options=["en", "es", "fr", "de", "it", "zh"]) | |
if audio_file is not None: | |
st.audio(audio_file, format='audio/wav') | |
# Transcribe the audio when the button is clicked | |
if st.button("Transcribe"): | |
with st.spinner("Transcribing..."): | |
transcription = transcribe_audio(audio_file, language) | |
st.success("Transcription complete!") | |
st.text_area("Transcription:", transcription, height=300) | |