HoneyTian commited on
Commit
5ce1fe8
1 Parent(s): 982edf4
Files changed (8) hide show
  1. .gitattributes +1 -0
  2. .gitignore +15 -0
  3. Dockerfile +30 -0
  4. README.md +1 -1
  5. examples/wenet/test.py +80 -0
  6. main.py +102 -0
  7. project_settings.py +12 -0
  8. requirements.txt +2 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.wav filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ data/
3
+ dotenv/
4
+ pretrained_models
5
+
6
+ .git/
7
+ .idea/
8
+
9
+ **/cache/
10
+ **/__pycache__/
11
+
12
+ **/*.env
13
+ **/*.mp3
14
+ **/*.png
15
+ **/*.wav
Dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.6
5
+
6
+ WORKDIR /code
7
+
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ RUN pip install --upgrade pip
11
+
12
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
13
+
14
+ # Set up a new user named "user" with user ID 1000
15
+ RUN useradd -m -u 1000 user
16
+
17
+ # Switch to the "user" user
18
+ USER user
19
+
20
+ # Set home to the user's home directory
21
+ ENV HOME=/home/user \
22
+ PATH=/home/user/.local/bin:$PATH
23
+
24
+ # Set the working directory to the user's home directory
25
+ WORKDIR $HOME/app
26
+
27
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
28
+ COPY --chown=user . $HOME/app
29
+
30
+ CMD ["python", "main.py"]
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Asr
3
  emoji: 📈
4
  colorFrom: purple
5
  colorTo: green
 
1
  ---
2
+ title: ASR
3
  emoji: 📈
4
  colorFrom: purple
5
  colorTo: green
examples/wenet/test.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from pathlib import Path
5
+
6
+ import huggingface_hub
7
+ import sherpa
8
+
9
+ from project_settings import project_path
10
+
11
+
12
+ def get_args():
13
+ parser = argparse.ArgumentParser()
14
+ parser.add_argument(
15
+ "--repo_id",
16
+ default="csukuangfj/wenet-chinese-model",
17
+ # default="csukuangfj/wenet-english-model",
18
+ type=str
19
+ )
20
+
21
+ parser.add_argument("--model_filename", default="final.zip", type=str)
22
+ parser.add_argument("--tokens_filename", default="units.txt", type=str)
23
+
24
+ parser.add_argument(
25
+ "--pretrained_model_dir",
26
+ default=(project_path / "pretrained_models").as_posix(),
27
+ type=str
28
+ )
29
+ args = parser.parse_args()
30
+ return args
31
+
32
+
33
+ def main():
34
+ args = get_args()
35
+
36
+ pretrained_model_dir = Path(args.pretrained_model_dir)
37
+ pretrained_model_dir.mkdir(exist_ok=True)
38
+
39
+ model_dir = pretrained_model_dir / "huggingface" / args.repo_id
40
+ model_dir.mkdir(exist_ok=True)
41
+
42
+ print("download model")
43
+ model_filename = huggingface_hub.hf_hub_download(
44
+ repo_id=args.repo_id,
45
+ filename=args.model_filename,
46
+ subfolder=".",
47
+ local_dir=model_dir.as_posix(),
48
+ )
49
+ print(model_filename)
50
+
51
+ print("download tokens")
52
+ token_filename = huggingface_hub.hf_hub_download(
53
+ repo_id=args.repo_id,
54
+ filename=args.tokens_filename,
55
+ subfolder=".",
56
+ local_dir=model_dir.as_posix(),
57
+ )
58
+ print(token_filename)
59
+
60
+ feat_config = sherpa.FeatureConfig(normalize_samples=False)
61
+ feat_config.fbank_opts.frame_opts.samp_freq = sample_rate
62
+ feat_config.fbank_opts.mel_opts.num_bins = 80
63
+ feat_config.fbank_opts.frame_opts.dither = 0
64
+
65
+ config = sherpa.OfflineRecognizerConfig(
66
+ nn_model=nn_model,
67
+ tokens=tokens,
68
+ use_gpu=False,
69
+ feat_config=feat_config,
70
+ decoding_method=decoding_method,
71
+ num_active_paths=num_active_paths,
72
+ )
73
+
74
+ recognizer = sherpa.OfflineRecognizer(config)
75
+
76
+ return
77
+
78
+
79
+ if __name__ == "__main__":
80
+ main()
main.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+
5
+ import gradio as gr
6
+
7
+ from examples import examples
8
+ from project_settings import project_path
9
+
10
+
11
+ def get_args():
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument(
14
+ "--examples_dir",
15
+ default=(project_path / "data/examples").as_posix(),
16
+ type=str
17
+ )
18
+ parser.add_argument(
19
+ "--trained_model_dir",
20
+ default=(project_path / "trained_models").as_posix(),
21
+ type=str
22
+ )
23
+ args = parser.parse_args()
24
+ return args
25
+
26
+
27
+ def update_model_dropdown(language: str):
28
+ if language in language_to_models:
29
+ choices = language_to_models[language]
30
+ return gr.Dropdown(
31
+ choices=choices,
32
+ value=choices[0],
33
+ interactive=True,
34
+ )
35
+
36
+ raise ValueError(f"Unsupported language: {language}")
37
+
38
+
39
+ def main():
40
+ title = "# Automatic Speech Recognition with Next-gen Kaldi"
41
+
42
+ language_choices = ["Chinese"]
43
+
44
+ language_to_models = {
45
+ "Chinese": ["None"]
46
+ }
47
+
48
+ # components
49
+ language_radio = gr.Radio(
50
+ label="Language",
51
+ choices=language_choices,
52
+ value=language_choices[0],
53
+ )
54
+ model_dropdown = gr.Dropdown(
55
+ choices=language_to_models[language_choices[0]],
56
+ label="Select a model",
57
+ value=language_to_models[language_choices[0]][0],
58
+ )
59
+
60
+ language_radio.change(
61
+ update_model_dropdown,
62
+ inputs=language_radio,
63
+ outputs=model_dropdown,
64
+ )
65
+
66
+
67
+ # blocks
68
+ with gr.Blocks() as blocks:
69
+ gr.Markdown(value=title)
70
+
71
+ with gr.Tabs():
72
+ with gr.TabItem("Upload from disk"):
73
+ uploaded_file = gr.Audio(
74
+ sources=["upload"],
75
+ type="filepath",
76
+ label="Upload from disk",
77
+ )
78
+ upload_button = gr.Button("Submit for recognition")
79
+ uploaded_output = gr.Textbox(label="Recognized speech from uploaded file")
80
+ uploaded_html_info = gr.HTML(label="Info")
81
+
82
+ gr.Examples(
83
+ examples=examples,
84
+ inputs=[
85
+ language_radio,
86
+ model_dropdown,
87
+ decoding_method_radio,
88
+ num_active_paths_slider,
89
+ punct_radio,
90
+ uploaded_file,
91
+ ],
92
+ outputs=[uploaded_output, uploaded_html_info],
93
+ fn=process_uploaded_file,
94
+ )
95
+
96
+ blocks.queue().launch()
97
+
98
+ return
99
+
100
+
101
+ if __name__ == "__main__":
102
+ main()
project_settings.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ project_path = os.path.abspath(os.path.dirname(__file__))
8
+ project_path = Path(project_path)
9
+
10
+
11
+ if __name__ == '__main__':
12
+ pass
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio==4.29.0
2
+ https://huggingface.co/csukuangfj/wheels/resolve/main/2023-01-30/k2_sherpa-1.1-cp38-cp38-linux_x86_64.whl