File size: 877 Bytes
9fa76a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from typing import  Dict
from transformers.pipelines.audio_utils import ffmpeg_read
import whisper
import torch

SAMPLE_RATE = 16000



class EndpointHandler():
    def __init__(self, path=""):
        # load the model
        self.model = whisper.load_model("large")


    def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:
        """
        Args:
            data (:obj:):
                includes the deserialized audio file as bytes
        Return:
            A :obj:`dict`:. base64 encoded image
        """
        # process input
        inputs = data.pop("inputs", data)
        audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE)
        audio_tensor= torch.from_numpy(audio_nparray)
        
        # run inference pipeline
        result = self.model.transcribe(audio_nparray)

        # postprocess the prediction
        return {"text": result["text"]}