srd4 commited on
Commit
4f4670a
1 Parent(s): e87d782

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +11 -11
handler.py CHANGED
@@ -1,28 +1,28 @@
1
  from typing import Dict
2
- from faster_whisper import WhisperModel, Streaming
3
  import io
4
  import re
5
 
6
  class EndpointHandler:
7
  def __init__(self, model_dir=None):
8
- # Use int8 on CPU to reduce memory usage and potentially increase speed.
9
- compute_type = "int8" if model_dir == "cpu" else "float16"
 
10
 
11
- # Initialize WhisperModel with given model_size and compute_type
12
- model_size = "medium" if model_dir is None else model_dir
13
- self.model = WhisperModel(model_size, device=model_dir, compute_type=compute_type)
14
 
15
  def __call__(self, data: Dict) -> Dict[str, str]:
16
  audio_bytes = data["inputs"]
17
  audio_file = io.BytesIO(audio_bytes)
18
 
19
- # Use Streaming interface to leverage VAD and potential speed improvements.
20
- # Small beam size to speed up transcription. Adjust based on performance/accuracy needs.
21
  beam_size = 1
22
- streaming = Streaming(device=model_dir, compute_type=compute_type, vad=True)
23
- segments, info = streaming.transcribe(audio_file, beam_size=beam_size)
24
 
25
- # Aggregate transcribed text and remove any extra spaces.
26
  text = " ".join(segment.text.strip() for segment in segments)
27
  text = re.sub(' +', ' ', text)
28
 
 
1
  from typing import Dict
2
+ from faster_whisper import WhisperModel
3
  import io
4
  import re
5
 
6
  class EndpointHandler:
7
  def __init__(self, model_dir=None):
8
+ # The compute_type is set to "float16" for efficient GPU computation
9
+ # For "int8" computation on CPU, the compute_type would be "int8"
10
+ compute_type = "float16"
11
 
12
+ # Initialize WhisperModel with large-v2 model size and specified compute_type
13
+ model_size = "large-v2" if model_dir is None else model_dir
14
+ self.model = WhisperModel(model_size, device="cuda", compute_type=compute_type)
15
 
16
  def __call__(self, data: Dict) -> Dict[str, str]:
17
  audio_bytes = data["inputs"]
18
  audio_file = io.BytesIO(audio_bytes)
19
 
20
+ # Transcribe audio file with a smaller beam size for faster inference
21
+ # Note: Adjust beam_size based on desired accuracy vs speed trade-off
22
  beam_size = 1
23
+ segments, info = self.model.transcribe(audio_file, beam_size=beam_size)
 
24
 
25
+ # Aggregate transcribed text and remove any extra spaces
26
  text = " ".join(segment.text.strip() for segment in segments)
27
  text = re.sub(' +', ' ', text)
28