Added binary file handing to inference endpoint and made return value a dictionary

#1
by nicklorch - opened
Files changed (1) hide show
  1. handler.py +15 -8
handler.py CHANGED
@@ -14,18 +14,25 @@ class EndpointHandler():
14
  self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
15
 
16
  def __call__(self, data):
17
- inputs = data.pop("inputs", None)
18
- text_input = inputs["text"] if "text" in inputs else None
19
- image_input = inputs["image"] if "image" in inputs else None
 
 
 
 
 
 
 
20
 
21
  if text_input:
22
  processor = self.processor(text=text_input, return_tensors="pt", padding=True).to(device)
23
  with torch.no_grad():
24
- return self.text_model(**processor).pooler_output.tolist()
25
- elif image_input:
26
- image = Image.open(BytesIO(base64.b64decode(image_input)))
27
  processor = self.processor(images=image, return_tensors="pt").to(device)
28
  with torch.no_grad():
29
- return self.image_model(**processor).image_embeds.tolist()
30
  else:
31
- return None
 
14
  self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
15
 
16
  def __call__(self, data):
17
+
18
+ text_input = None
19
+ if isinstance(data, dict):
20
+ inputs = data.pop("inputs", None)
21
+ text_input = inputs.get('text',None)
22
+ image_data = BytesIO(base64.b64decode(inputs['image'])) if 'image' in inputs else None
23
+ else:
24
+ # assuming its an image sent via binary
25
+ image_data = BytesIO(data)
26
+
27
 
28
  if text_input:
29
  processor = self.processor(text=text_input, return_tensors="pt", padding=True).to(device)
30
  with torch.no_grad():
31
+ return {'embeddings':self.text_model(**processor).pooler_output.tolist()[0]}
32
+ elif image_data:
33
+ image = Image.open(image_data)
34
  processor = self.processor(images=image, return_tensors="pt").to(device)
35
  with torch.no_grad():
36
+ return {'embeddings':self.image_model(**processor).image_embeds.tolist()[0]}
37
  else:
38
+ return {'embeddings':None}