Manjot Singh commited on
Commit
75d8840
·
1 Parent(s): 7a5c35e

added logging for cuda

Browse files
Files changed (3) hide show
  1. app.py +5 -0
  2. audio_processing.py +1 -0
  3. requirements.txt +2 -1
app.py CHANGED
@@ -1,5 +1,10 @@
1
  import gradio as gr
2
  from audio_processing import process_audio, print_results
 
 
 
 
 
3
 
4
  def transcribe_audio(audio_file, translate, model_size):
5
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
 
1
  import gradio as gr
2
  from audio_processing import process_audio, print_results
3
+ import torch
4
+
5
+ print(f"CUDA available: {torch.cuda.is_available()}")
6
+ print(f"Current device: {torch.cuda.current_device()}")
7
+ print(f"Device name: {torch.cuda.get_device_name()}")
8
 
9
  def transcribe_audio(audio_file, translate, model_size):
10
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
audio_processing.py CHANGED
@@ -34,6 +34,7 @@ def process_audio(audio_file, translate=False, model_size="small"):
34
 
35
  try:
36
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
37
  compute_type = "float32"
38
  audio = whisperx.load_audio(audio_file)
39
  model = whisperx.load_model(model_size, device, compute_type=compute_type)
 
34
 
35
  try:
36
  device = "cuda" if torch.cuda.is_available() else "cpu"
37
+ print(f"Using device: {device}")
38
  compute_type = "float32"
39
  audio = whisperx.load_audio(audio_file)
40
  model = whisperx.load_model(model_size, device, compute_type=compute_type)
requirements.txt CHANGED
@@ -14,4 +14,5 @@ setuptools>=65
14
  nltk
15
  python-dotenv
16
  cdifflib
17
- pydub
 
 
14
  nltk
15
  python-dotenv
16
  cdifflib
17
+ pydub
18
+ cuda-python