nizar-sayad commited on
Commit
56fd19b
1 Parent(s): 0f2976b

add custom handler

Browse files
Files changed (2) hide show
  1. handler.py +32 -0
  2. requirements.txt +3 -0
handler.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoModelForMultipleChoice, AutoTokenizer
3
+ import torch
4
+
5
+
6
+
7
+ class EndpointHandler:
8
+ def __init__(self, path=""):
9
+ # load model and processor from path
10
+ self.model = AutoModelForMultipleChoice.from_pretrained(path)
11
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
12
+
13
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
14
+ """
15
+ Args:
16
+ data (:dict:):
17
+ The payload with the text prompt.
18
+ """
19
+ # process input
20
+ input = data.pop("input", data)
21
+ gen_outputs_no_input_decoded = data.pop("gen_outputs_no_input_decoded", None)
22
+
23
+ rank_inputs = self.tokenizer([[input, gen_output_no_input_decoded] for gen_output_no_input_decoded in gen_outputs_no_input_decoded],
24
+ return_tensors="pt",
25
+ padding=True)
26
+
27
+ rank_labels = torch.tensor(0).unsqueeze(0)
28
+
29
+ rank_outputs = self.model(**{k: v.unsqueeze(0) for k, v in rank_inputs.items()}, labels=rank_labels)
30
+
31
+ rank_predictions = torch.nn.functional.softmax(rank_outputs.logits, dim=-1)[0].tolist()
32
+ return {"rank_predictions": rank_predictions}
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ accelerate
2
+ bitsandbytes
3
+ transformers