MinuteBotSpaces / app.py
jonnatakusuma's picture
revert
da1cda6
raw
history blame
2.11 kB
import streamlit as st
import whisper
from tempfile import NamedTemporaryFile
import ffmpeg
st.title("MinuteBot App")
# upload audio file with streamlit
audio_file = st.file_uploader("Unggah Meeting Audio", type=["mp3", "wav", "m4a"])
# model = whisper.load_model("base") # loading the base model
st.text("MinuteBot Model telah dimuat:")
def load_whisper_model():
return model
if st.sidebar.button("Transkripsikan Audio"):
if audio_file is not None:
with NamedTemporaryFile() as temp:
temp.write(audio_file.getvalue())
temp.seek(0)
model = whisper.load_model("large")
result = model.transcribe(temp.name)
st.write(result["text"])
st.sidebar.header("Putar Berkas Audio")
st.sidebar.audio(audio_file)
# import streamlit as st
# from tempfile import NamedTemporaryFile
# import ffmpeg
# from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
# import librosa
# # HF_TOKEN = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
# st.title("TemplarX-Medium-Indonesian Transcription App")
# st.text("Model Whisper (TemplarX-medium-Indonesian) telah dimuat:")
# def load_whisper_model():
# model_name = "jonnatakusuma/TemplarX-medium-Indonesian"
# tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_name)
# model = Wav2Vec2ForCTC.from_pretrained(model_name, use_auth_token=True)
# return tokenizer, model
# audio_file = st.file_uploader("Unggah Meeting Audio", type=["mp3", "wav", "m4a"])
# if st.sidebar.button("Transkripsikan Audio"):
# if audio_file is not None:
# with NamedTemporaryFile() as temp:
# temp.write(audio_file.read())
# temp.seek(0)
# tokenizer, model = load_whisper_model()
# # Read the audio file and transcribe using the fine-tuned model
# audio_path = temp.name
# audio_input, _ = librosa.load(audio_path, sr=16000)
# transcription = model.stt(text)
# st.write(transcription)
# st.sidebar.header("Putar Berkas Audio")
# st.sidebar.audio(audio_file, format='audio/wav')