Bishan commited on
Commit
d476d74
0 Parent(s):

Duplicate from Bishan/Speech_To_Text_Odia

Browse files
Files changed (5) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +76 -0
  4. packages.txt +4 -0
  5. requirements.txt +8 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Speech To Text Odia
3
+ emoji: 👁
4
+ colorFrom: gray
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.39.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: Bishan/Speech_To_Text_Odia
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import soundfile as sf
2
+ import torch
3
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor,Wav2Vec2ProcessorWithLM
4
+ import gradio as gr
5
+ import sox
6
+ import subprocess
7
+
8
+
9
+ def read_file_and_process(wav_file):
10
+ filename = wav_file.split('.')[0]
11
+ filename_16k = filename + "16k.wav"
12
+ resampler(wav_file, filename_16k)
13
+ speech, _ = sf.read(filename_16k)
14
+ inputs = processor(speech, sampling_rate=16_000, return_tensors="pt", padding=True)
15
+
16
+ return inputs
17
+
18
+
19
+ def resampler(input_file_path, output_file_path):
20
+ command = (
21
+ f"ffmpeg -hide_banner -loglevel panic -i {input_file_path} -ar 16000 -ac 1 -bits_per_raw_sample 16 -vn "
22
+ f"{output_file_path}"
23
+ )
24
+ subprocess.call(command, shell=True)
25
+
26
+
27
+ def parse_transcription_with_lm(logits):
28
+ result = processor_with_LM.batch_decode(logits.cpu().numpy())
29
+ text = result.text
30
+ transcription = text[0].replace('<s>','')
31
+ return transcription
32
+
33
+ def parse_transcription(logits):
34
+ predicted_ids = torch.argmax(logits, dim=-1)
35
+ transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
36
+ return transcription
37
+
38
+ def parse(wav_file, applyLM):
39
+ input_values = read_file_and_process(wav_file)
40
+ with torch.no_grad():
41
+ logits = model(**input_values).logits
42
+
43
+ if applyLM:
44
+ return parse_transcription_with_lm(logits)
45
+ else:
46
+ return parse_transcription(logits)
47
+
48
+
49
+ # processor = Wav2Vec2Processor.from_pretrained("Harveenchadha/vakyansh-wav2vec2-odia-orm-100")
50
+ # processor_with_LM = Wav2Vec2ProcessorWithLM.from_pretrained("Harveenchadha/vakyansh-wav2vec2-odia-orm-100")
51
+ # model = Wav2Vec2ForCTC.from_pretrained("Harveenchadha/vakyansh-wav2vec2-odia-orm-100")
52
+
53
+
54
+ processor = Wav2Vec2Processor.from_pretrained("Harveenchadha/odia_large_wav2vec2")
55
+ processor_with_LM = Wav2Vec2ProcessorWithLM.from_pretrained("Harveenchadha/odia_large_wav2vec2")
56
+ model = Wav2Vec2ForCTC.from_pretrained("Harveenchadha/odia_large_wav2vec2")
57
+
58
+
59
+ # processor = Wav2Vec2Processor.from_pretrained("theainerd/wav2vec2-large-xlsr-53-odia")
60
+ # processor_with_LM = Wav2Vec2ProcessorWithLM.from_pretrained("theainerd/wav2vec2-large-xlsr-53-odia")
61
+ # model = Wav2Vec2ForCTC.from_pretrained("theainerd/wav2vec2-large-xlsr-53-odia")
62
+
63
+
64
+
65
+
66
+ input_ = gr.Audio(source="upload", type="filepath")
67
+ txtbox = gr.Textbox(
68
+ label="Output from model will appear here:",
69
+ lines=5
70
+ )
71
+ chkbox = gr.Checkbox(label="Apply LM", value=False)
72
+
73
+
74
+ gr.Interface(parse, inputs = [input_, chkbox], outputs=txtbox,
75
+ streaming=True, interactive=True,
76
+ analytics_enabled=False, show_tips=False, enable_queue=True).launch(inline=False);
packages.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ libsndfile1
2
+ sox
3
+ ffmpeg
4
+
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ https://github.com/kpu/kenlm/archive/master.zip
3
+ pyctcdecode
4
+ soundfile
5
+ torch
6
+ transformers
7
+ sox
8
+ scipy