jhj0517 commited on
Commit
f962fd9
·
unverified ·
2 Parent(s): 021feb7 5fb27a8

Merge pull request #46 from jhj0517/fix-compute-type-bug

Browse files
modules/faster_whisper_inference.py CHANGED
@@ -26,7 +26,7 @@ class FasterWhisperInference(BaseInterface):
26
  self.translatable_models = ["large", "large-v1", "large-v2"]
27
  self.default_beam_size = 1
28
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
29
- self.compute_type = "float16" if self.device == "cuda" else "int8"
30
 
31
  def transcribe_file(self,
32
  fileobjs: list,
 
26
  self.translatable_models = ["large", "large-v1", "large-v2"]
27
  self.default_beam_size = 1
28
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
29
+ self.compute_type = "float16" if self.device == "cuda" else "float32"
30
 
31
  def transcribe_file(self,
32
  fileobjs: list,