Neilblaze commited on
Commit
ee5ce46
β€’
1 Parent(s): 7f40be1

Playing around xD

Browse files
Files changed (4) hide show
  1. README.md +4 -6
  2. app.py +63 -0
  3. requirements.txt +6 -0
  4. utils.py +128 -0
README.md CHANGED
@@ -1,13 +1,11 @@
1
  ---
2
  title: WhisperAnything
3
- emoji: πŸ¦€
4
- colorFrom: pink
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 3.18.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: WhisperAnything
3
+ emoji: πŸŽ™
4
+ colorFrom: red
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 3.18.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ ---
 
 
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from multilingual_translation import text_to_text_generation
2
+ from utils import lang_ids, data_scraping
3
+ import whisper
4
+ import gradio as gr
5
+
6
+ lang_list = list(lang_ids.keys())
7
+ model_list = data_scraping()
8
+ model = whisper.load_model("small")
9
+
10
+ def transcribe(audio):
11
+
12
+ #time.sleep(3)
13
+ # load audio and pad/trim it to fit 30 seconds
14
+ audio = whisper.load_audio(audio)
15
+ audio = whisper.pad_or_trim(audio)
16
+
17
+ # make log-Mel spectrogram and move to the same device as the model
18
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
19
+
20
+ # detect the spoken language
21
+ _, probs = model.detect_language(mel)
22
+ print(f"Detected language: {max(probs, key=probs.get)}")
23
+
24
+ # decode the audio
25
+ options = whisper.DecodingOptions(fp16 = False)
26
+ result = whisper.decode(model, mel, options)
27
+
28
+ finalResult = text_to_text_generation(prompt='return.text', model_id='facebook/m2m100_418M', device='cpu',target_lang='English')
29
+ return finalResult
30
+
31
+ # api endpoint to return the transcription in EN as a json response
32
+
33
+ # @app.route('/transcribe', methods=['POST'])
34
+ # def transcribe_api():
35
+ # if request.method == 'POST':
36
+ # audio = request.files['audio']
37
+ # audio = audio.read()
38
+ # audio = io.BytesIO(audio)
39
+ # audio = whisper.load_audio(audio)
40
+ # audio = whisper.pad_or_trim(audio)
41
+ # mel = whisper.log_mel_spectrogram(audio).to(model.device)
42
+ # _, probs = model.detect_language(mel)
43
+ # print(f"Detected language: {max(probs, key=probs.get)}")
44
+ # options = whisper.DecodingOptions(fp16 = False)
45
+ # result = whisper.decode(model, mel, options)
46
+ # return jsonify(result)
47
+
48
+
49
+
50
+
51
+
52
+ gr.Interface(
53
+ title = 'OpenAI Whisper ASR Gradio Web UI',
54
+ fn=transcribe,
55
+ inputs=[
56
+ gr.inputs.Audio(source="microphone", type="filepath")
57
+ ],
58
+ outputs=[
59
+ "textbox"
60
+ ],
61
+ live=True).launch(debug=True, enable_queue=True)
62
+
63
+ # output = gr.outputs.Textbox(label="Output Text")
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ beautifulsoup4==4.11.2
3
+ multilingual_translation==0.0.5
4
+ requests==2.28.1
5
+ tensorflow
6
+ git+https://github.com/openai/whisper.git
utils.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+ import requests
3
+
4
+
5
+ lang_ids = {
6
+ "Afrikaans": "af",
7
+ "Amharic": "am",
8
+ "Arabic": "ar",
9
+ "Asturian": "ast",
10
+ "Azerbaijani": "az",
11
+ "Bashkir": "ba",
12
+ "Belarusian": "be",
13
+ "Bulgarian": "bg",
14
+ "Bengali": "bn",
15
+ "Breton": "br",
16
+ "Bosnian": "bs",
17
+ "Catalan": "ca",
18
+ "Cebuano": "ceb",
19
+ "Czech": "cs",
20
+ "Welsh": "cy",
21
+ "Danish": "da",
22
+ "German": "de",
23
+ "Greeek": "el",
24
+ "English": "en",
25
+ "Spanish": "es",
26
+ "Estonian": "et",
27
+ "Persian": "fa",
28
+ "Fulah": "ff",
29
+ "Finnish": "fi",
30
+ "French": "fr",
31
+ "Western Frisian": "fy",
32
+ "Irish": "ga",
33
+ "Gaelic": "gd",
34
+ "Galician": "gl",
35
+ "Gujarati": "gu",
36
+ "Hausa": "ha",
37
+ "Hebrew": "he",
38
+ "Hindi": "hi",
39
+ "Croatian": "hr",
40
+ "Haitian": "ht",
41
+ "Hungarian": "hu",
42
+ "Armenian": "hy",
43
+ "Indonesian": "id",
44
+ "Igbo": "ig",
45
+ "Iloko": "ilo",
46
+ "Icelandic": "is",
47
+ "Italian": "it",
48
+ "Japanese": "ja",
49
+ "Javanese": "jv",
50
+ "Georgian": "ka",
51
+ "Kazakh": "kk",
52
+ "Central Khmer": "km",
53
+ "Kannada": "kn",
54
+ "Korean": "ko",
55
+ "Luxembourgish": "lb",
56
+ "Ganda": "lg",
57
+ "Lingala": "ln",
58
+ "Lao": "lo",
59
+ "Lithuanian": "lt",
60
+ "Latvian": "lv",
61
+ "Malagasy": "mg",
62
+ "Macedonian": "mk",
63
+ "Malayalam": "ml",
64
+ "Mongolian": "mn",
65
+ "Marathi": "mr",
66
+ "Malay": "ms",
67
+ "Burmese": "my",
68
+ "Nepali": "ne",
69
+ "Dutch": "nl",
70
+ "Norwegian": "no",
71
+ "Northern Sotho": "ns",
72
+ "Occitan": "oc",
73
+ "Oriya": "or",
74
+ "Panjabi": "pa",
75
+ "Polish": "pl",
76
+ "Pushto": "ps",
77
+ "Portuguese": "pt",
78
+ "Romanian": "ro",
79
+ "Russian": "ru",
80
+ "Sindhi": "sd",
81
+ "Sinhala": "si",
82
+ "Slovak": "sk",
83
+ "Slovenian": "sl",
84
+ "Somali": "so",
85
+ "Albanian": "sq",
86
+ "Serbian": "sr",
87
+ "Swati": "ss",
88
+ "Sundanese": "su",
89
+ "Swedish": "sv",
90
+ "Swahili": "sw",
91
+ "Tamil": "ta",
92
+ "Thai": "th",
93
+ "Tagalog": "tl",
94
+ "Tswana": "tn",
95
+ "Turkish": "tr",
96
+ "Ukrainian": "uk",
97
+ "Urdu": "ur",
98
+ "Uzbek": "uz",
99
+ "Vietnamese": "vi",
100
+ "Wolof": "wo",
101
+ "Xhosa": "xh",
102
+ "Yiddish": "yi",
103
+ "Yoruba": "yo",
104
+ "Chinese": "zh",
105
+ "Zulu": "zu",
106
+ }
107
+
108
+ def model_url_list():
109
+ url_list = []
110
+ for i in range(0, 5):
111
+ url_list.append(f"https://huggingface.co/models?other=m2m_100&p={i}&sort=downloads")
112
+ return url_list
113
+
114
+ def data_scraping():
115
+ url_list = model_url_list()
116
+ model_list = []
117
+ for url in url_list:
118
+ response = requests.get(url)
119
+ soup = BeautifulSoup(response.text, "html.parser")
120
+ div_class = 'grid grid-cols-1 gap-5 2xl:grid-cols-2'
121
+ div = soup.find('div', {'class': div_class})
122
+ for a in div.find_all('a', href=True):
123
+ model_list.append(a['href'])
124
+
125
+ for i in range(len(model_list)):
126
+ model_list[i] = model_list[i][1:]
127
+
128
+ return model_list