Kr08 commited on
Commit
48be59b
·
verified ·
1 Parent(s): 40a48e2

Update model_utils.py

Browse files
Files changed (1) hide show
  1. model_utils.py +5 -5
model_utils.py CHANGED
@@ -9,7 +9,7 @@ whisper_processor = None
9
  whisper_model = None
10
  whisper_model_small = None
11
 
12
- +@spaces.GPU
13
  def load_models():
14
  global whisper_processor, whisper_model, whisper_model_small
15
  if whisper_processor is None:
@@ -19,25 +19,25 @@ def load_models():
19
  if whisper_model_small is None:
20
  whisper_model_small = whisper.load_model(WHISPER_MODEL_SIZE)
21
 
22
- +@spaces.GPU
23
  def get_device():
24
  return "cuda:0" if torch.cuda.is_available() else "cpu"
25
 
26
- +@spaces.GPU
27
  def get_processor():
28
  global whisper_processor
29
  if whisper_processor is None:
30
  load_models()
31
  return whisper_processor
32
 
33
- +@spaces.GPU
34
  def get_model():
35
  global whisper_model
36
  if whisper_model is None:
37
  load_models()
38
  return whisper_model
39
 
40
- +@spaces.GPU
41
  def get_whisper_model_small():
42
  global whisper_model_small
43
  if whisper_model_small is None:
 
9
  whisper_model = None
10
  whisper_model_small = None
11
 
12
+ @spaces.GPU
13
  def load_models():
14
  global whisper_processor, whisper_model, whisper_model_small
15
  if whisper_processor is None:
 
19
  if whisper_model_small is None:
20
  whisper_model_small = whisper.load_model(WHISPER_MODEL_SIZE)
21
 
22
+ @spaces.GPU
23
  def get_device():
24
  return "cuda:0" if torch.cuda.is_available() else "cpu"
25
 
26
+ @spaces.GPU
27
  def get_processor():
28
  global whisper_processor
29
  if whisper_processor is None:
30
  load_models()
31
  return whisper_processor
32
 
33
+ @spaces.GPU
34
  def get_model():
35
  global whisper_model
36
  if whisper_model is None:
37
  load_models()
38
  return whisper_model
39
 
40
+ @spaces.GPU
41
  def get_whisper_model_small():
42
  global whisper_model_small
43
  if whisper_model_small is None: