Transformers
PyTorch
perceiver
Inference Endpoints
IAMRonHIT commited on
Commit
6097b91
1 Parent(s): 7d9b1a1

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +82 -0
handler.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
3
+ from optimum.onnxruntime import ORTModelForSequenceClassification
4
+ import torch
5
+ from PIL import Image
6
+ import numpy as np
7
+ import librosa
8
+
9
+ class EndpointHandler:
10
+ def __init__(self, path=""):
11
+ """
12
+ Initialize the handler. This loads the tokenizer and model required for inference.
13
+ We will load the `ronai-multimodal-perceiver-tsx` model for multimodal input handling.
14
+ """
15
+ # Load the tokenizer and model
16
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
17
+ self.model = ORTModelForSequenceClassification.from_pretrained(path)
18
+
19
+ # Initialize a pipeline for text classification (adjust task type if needed)
20
+ self.pipeline = pipeline("text-classification", model=self.model, tokenizer=self.tokenizer)
21
+
22
+ def preprocess(self, data: Dict[str, Any]) -> Dict[str, Any]:
23
+ """
24
+ Preprocess input data based on the modality.
25
+ This handler supports text, image, and audio data.
26
+ """
27
+ inputs = data.get("inputs", None)
28
+
29
+ if isinstance(inputs, str):
30
+ # Preprocessing for text input
31
+ tokens = self.tokenizer(inputs, return_tensors="pt")
32
+ return tokens
33
+
34
+ elif isinstance(inputs, Image.Image):
35
+ # Preprocessing for image input (convert to tensor)
36
+ image = np.array(inputs)
37
+ image_tensor = torch.tensor(image).unsqueeze(0) # Add batch dimension
38
+ return image_tensor
39
+
40
+ elif isinstance(inputs, np.ndarray):
41
+ # Preprocessing for raw array input (e.g., audio, point clouds)
42
+ return torch.tensor(inputs).unsqueeze(0)
43
+
44
+ elif isinstance(inputs, bytes):
45
+ # Preprocessing for audio input (convert to mel spectrogram)
46
+ audio, sr = librosa.load(inputs, sr=None)
47
+ mel_spectrogram = librosa.feature.melspectrogram(audio, sr=sr)
48
+ mel_tensor = torch.tensor(mel_spectrogram).unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions
49
+ return mel_tensor
50
+
51
+ else:
52
+ raise ValueError("Unsupported input type. Must be string (text), image (PIL), or array (audio, etc.).")
53
+
54
+ def postprocess(self, outputs: Any) -> List[Dict[str, Any]]:
55
+ """
56
+ Post-process the model output to a human-readable format.
57
+ For text classification, this returns label and score.
58
+ """
59
+ logits = outputs.logits
60
+ probabilities = torch.nn.functional.softmax(logits, dim=-1)
61
+ predicted_class_id = probabilities.argmax().item()
62
+ score = probabilities[0, predicted_class_id].item()
63
+
64
+ return [{"label": self.model.config.id2label[predicted_class_id], "score": score}]
65
+
66
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
67
+ """
68
+ Handles the incoming request, processes the input, runs inference, and returns results.
69
+ Args:
70
+ data (Dict[str, Any]): The input data for inference.
71
+ - data["inputs"] could be a string (text), PIL.Image (image), np.ndarray (audio or point clouds).
72
+ Returns:
73
+ A list of dictionaries containing the model's prediction.
74
+ """
75
+ # Step 1: Preprocess input data
76
+ preprocessed_data = self.preprocess(data)
77
+
78
+ # Step 2: Perform model inference
79
+ outputs = self.pipeline(preprocessed_data)
80
+
81
+ # Step 3: Post-process and return the predictions
82
+ return self.postprocess(outputs)