JBHF commited on
Commit
90d90db
1 Parent(s): 18caafd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -23
app.py CHANGED
@@ -1,6 +1,12 @@
1
  # JBHF/VERTAAL-APP-EAGLE-SHELTER/app.py - 17-04-2024, 15u30m CET (app-17-04-2024-15u30m-CET.py)
2
  # WERKT AL: DE OPGENOMEN AUDIO MBV DEZE APP, audio.wav, HOEFT NIET PERSÉ GEPERSISTEERD TE WORDEN !!!!!!
3
 
 
 
 
 
 
 
4
  # https://github.com/theevann/streamlit-audiorecorder
5
  # An audio Recorder for streamlit
6
  #
@@ -240,35 +246,51 @@ st.button("Rerun")
240
  # infer_faster_whisper_large_v2 (CPU VERSIE !) 08-04-2024-COLAB-CPU-PYTHON3-tvscitechtalk.ipynb
241
  # https://colab.research.google.com/drive/1EreiFx825oIrR2P43XSXjHXx01EWi6ZH#scrollTo=vuLjbPxexPDj&uniqifier=5
242
 
243
- st.header("Nu gaat de app de ingesproken tekst daadwerkelijk vertalen naar het Nederlands:", divider='rainbow')
 
 
 
 
 
244
 
245
- from faster_whisper import WhisperModel
246
 
247
- model_size = "large-v2"
248
 
249
- # Run on GPU with FP16
250
- # model = WhisperModel(model_size, device="cuda", compute_type="float16") # ORIGINAL, DRAAIT OP COLAB T4 GPU OK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
 
252
- # TEST: Run on CPU
253
- # model = WhisperModel(model_size, device="cpu", compute_type="float16") # JB, DRAAIT OP COLAB CPU OK ?
254
- # ValueError: Requested float16 compute type, but the target device or backend do not support efficient float16 computation.
255
- #
256
- # st.write("Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\")")
257
- # model = WhisperModel(model_size, device="cpu") # , compute_type="float16") # JB, DRAAIT OP COLAB CPU OK: JA; HF SPACES STREAMLIT FREE TIER: JB OK !
258
- # JB: Dit gebruikt mijn HF Token !
259
- # st.write("Ready Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\")")
260
 
261
- # st.write("Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\", compute_type=\"int8\")")
262
- st.write("Laden van het vertaal model (duurt gewoonlijk plm 15 seconden) ...")
263
 
264
- model = WhisperModel(model_size, device="cpu", compute_type="int8") # , compute_type="float16") # JB
265
- # JB: Dit gebruikt mijn HF Token !
266
- # st.write("Ready Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\")")
267
- # LOADING OF model = WhisperModel(model_size, device="cpu") TAKES ABOUT 1 MINUTE ON HF SPACES STREAMLIT FREE TIER
268
- #
269
- # st.write("Ready Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\", compute_type=\"int8\")")
270
- # LOADING OF model = WhisperModel(model_size, device=\"cpu\", compute_type=\"int8\") TAKES ABOUT 33 sec (Na RERUN 1 minute) ON HF SPACES STREAMLIT FREE TIER
271
- st.write("Klaar met het laden van het vertaal model")
272
 
273
  # USING:
274
  # model = WhisperModel(model_size, device="cpu", compute_type="int8") # JB
 
1
  # JBHF/VERTAAL-APP-EAGLE-SHELTER/app.py - 17-04-2024, 15u30m CET (app-17-04-2024-15u30m-CET.py)
2
  # WERKT AL: DE OPGENOMEN AUDIO MBV DEZE APP, audio.wav, HOEFT NIET PERSÉ GEPERSISTEERD TE WORDEN !!!!!!
3
 
4
+ # 18-04-2024:
5
+ # GEBRUIK ALS session_state, VOORBEELD:
6
+ # st.session_state['cleaned_up'] = True
7
+ # EN
8
+ # st.session_state.get('cleaned_up')
9
+
10
  # https://github.com/theevann/streamlit-audiorecorder
11
  # An audio Recorder for streamlit
12
  #
 
246
  # infer_faster_whisper_large_v2 (CPU VERSIE !) 08-04-2024-COLAB-CPU-PYTHON3-tvscitechtalk.ipynb
247
  # https://colab.research.google.com/drive/1EreiFx825oIrR2P43XSXjHXx01EWi6ZH#scrollTo=vuLjbPxexPDj&uniqifier=5
248
 
249
+ # LAAD HET WHISPER TRANSCRIPTION MODEL SLECHTS 1 KEER GEDURENDE EEN SESSIE !
250
+ # 18-04-2024:
251
+ # GEBRUIK ALS session_state, VOORBEELD:
252
+ # st.session_state['cleaned_up'] = True
253
+ # EN
254
+ # st.session_state.get('cleaned_up')
255
 
256
+ # st.session_state['WhisperModel'] = ""
257
 
258
+ if st.session_state.get('WhisperModel') != "WhisperModelAlreadyLoaded":
259
 
260
+ st.header("Nu gaat de app de ingesproken tekst daadwerkelijk vertalen naar het Nederlands:", divider='rainbow')
261
+
262
+ from faster_whisper import WhisperModel
263
+
264
+ model_size = "large-v2"
265
+
266
+ # Run on GPU with FP16
267
+ # model = WhisperModel(model_size, device="cuda", compute_type="float16") # ORIGINAL, DRAAIT OP COLAB T4 GPU OK
268
+
269
+ # TEST: Run on CPU
270
+ # model = WhisperModel(model_size, device="cpu", compute_type="float16") # JB, DRAAIT OP COLAB CPU OK ?
271
+ # ValueError: Requested float16 compute type, but the target device or backend do not support efficient float16 computation.
272
+ #
273
+ # st.write("Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\")")
274
+ # model = WhisperModel(model_size, device="cpu") # , compute_type="float16") # JB, DRAAIT OP COLAB CPU OK: JA; HF SPACES STREAMLIT FREE TIER: JB OK !
275
+ # JB: Dit gebruikt mijn HF Token !
276
+ # st.write("Ready Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\")")
277
+
278
+ # st.write("Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\", compute_type=\"int8\")")
279
+ st.write("Laden van het vertaal model (duurt gewoonlijk plm 15 seconden) ...")
280
+
281
+ model = WhisperModel(model_size, device="cpu", compute_type="int8") # , compute_type="float16") # JB
282
+ # JB: Dit gebruikt mijn HF Token !
283
+ # st.write("Ready Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\")")
284
+ # LOADING OF model = WhisperModel(model_size, device="cpu") TAKES ABOUT 1 MINUTE ON HF SPACES STREAMLIT FREE TIER
285
+ #
286
+ # st.write("Ready Loading the WhisperModel: model = WhisperModel(model_size, device=\"cpu\", compute_type=\"int8\")")
287
+ # LOADING OF model = WhisperModel(model_size, device=\"cpu\", compute_type=\"int8\") TAKES ABOUT 33 sec (Na RERUN 1 minute) ON HF SPACES STREAMLIT FREE TIER
288
+ st.write("Klaar met het laden van het vertaal model")
289
+
290
+ st.session_state['WhisperModel'] = "WhisperModelAlreadyLoaded"
291
 
 
 
 
 
 
 
 
 
292
 
 
 
293
 
 
 
 
 
 
 
 
 
294
 
295
  # USING:
296
  # model = WhisperModel(model_size, device="cpu", compute_type="int8") # JB