Spaces:
Runtime error
Runtime error
Commit
•
3564020
0
Parent(s):
Duplicate from dmatekenya/Chichewa-Automatic-Transcription
Browse filesCo-authored-by: Dunstan Matekenya <dmatekenya@users.noreply.huggingface.co>
- .gitattributes +35 -0
- README.md +14 -0
- app.py +60 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Chichewa Automatic Transcription
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.35.2
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: openrail
|
11 |
+
duplicated_from: dmatekenya/Chichewa-Automatic-Transcription
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
from transformers import WhisperForConditionalGeneration, WhisperProcessor, WhisperFeatureExtractor
|
3 |
+
import gradio as gr
|
4 |
+
import librosa
|
5 |
+
|
6 |
+
# Prepare model for prediction
|
7 |
+
MODEL_SPECS_ID = "dmatekenya/whisper-small_finetuned_sh_chich"
|
8 |
+
MODEL_SPECS_BASE_ID = "openai/whisper-small"
|
9 |
+
MODEL_SPECS_BASE_LAN_SW = "swahili"
|
10 |
+
MODEL_SPECS_BASE_LAN_SH = "shona"
|
11 |
+
FEATURE_EXTRACTOR = WhisperFeatureExtractor.from_pretrained(MODEL_SPECS_ID)
|
12 |
+
PROCESSOR_SH = WhisperProcessor.from_pretrained(MODEL_SPECS_BASE_ID,
|
13 |
+
language=MODEL_SPECS_BASE_LAN_SH, task="transcribe")
|
14 |
+
MODEL = WhisperForConditionalGeneration.from_pretrained(MODEL_SPECS_ID)
|
15 |
+
|
16 |
+
|
17 |
+
def transcribe(audio_file):
|
18 |
+
y, sr = librosa.load(audio_file, sr=16000)
|
19 |
+
|
20 |
+
input_features = PROCESSOR_SH(y, return_tensors="pt", sampling_rate=sr).input_features
|
21 |
+
generated_ids = MODEL.generate(inputs=input_features)
|
22 |
+
|
23 |
+
transcription = PROCESSOR_SH.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
24 |
+
|
25 |
+
return transcription
|
26 |
+
|
27 |
+
|
28 |
+
def transcribe_audio(mic=None, file=None):
|
29 |
+
if mic is not None:
|
30 |
+
audio = mic
|
31 |
+
elif file is not None:
|
32 |
+
audio = file
|
33 |
+
else:
|
34 |
+
return "You must either provide a mic recording or a file"
|
35 |
+
transcription = transcribe(audio_file=audio)
|
36 |
+
return transcription
|
37 |
+
|
38 |
+
title = "Transcribe Chichewa Audio"
|
39 |
+
description = """
|
40 |
+
<img src="https://i.ibb.co/5nQdGSs/logo.png">
|
41 |
+
IN THIS DEMO, TEST THE FIRST AUTOMATED SPEECH RECOGNITION (ASR) MODEL FOR CHICHEWA BY TRANSCRIBING YOUR CHICHEWA VOICE NOTES.
|
42 |
+
FOR AUDIO FILES, PLEASE UPLOAD SHORT VOICE NOTES ONLY (NO LONGER THAN 30 SEC).
|
43 |
+
"""
|
44 |
+
|
45 |
+
article = "Read more about the [ChichewaSpeech2Text](https://dmatekenya.github.io/Chichewa-Speech2Text/README.html) project \
|
46 |
+
and make sure to sign-up for our first [voice note donation event](https://forms.gle/fHLESutofVvb2YFM9) on July 22. \
|
47 |
+
You stand a chance to win Airtel or TNM units if you choose to participate in the raffle after the event"
|
48 |
+
|
49 |
+
gr.Interface(
|
50 |
+
fn=transcribe_audio,
|
51 |
+
theme='grass',
|
52 |
+
title=title,
|
53 |
+
description=description,
|
54 |
+
article=article,
|
55 |
+
inputs=[
|
56 |
+
gr.Audio(source="microphone", type="filepath", optional=True),
|
57 |
+
gr.Audio(source="upload", type="filepath", optional=True),
|
58 |
+
],
|
59 |
+
outputs="text",
|
60 |
+
).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
librosa
|
3 |
+
torch
|