sooh-j commited on
Commit
046b29c
1 Parent(s): d6ea99d

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +31 -8
handler.py CHANGED
@@ -92,13 +92,15 @@ class EndpointHandler():
92
  # }
93
  # })
94
  inputs = data.pop("inputs", data)
95
- try:
96
- imageBase64 = inputs["image"]
97
- image = Image.open(BytesIO(base64.b64decode(imageBase64.split(",")[1].encode())))
98
 
99
- except:
100
- image_url = inputs['image']
101
- image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
 
 
 
 
102
 
103
  question = inputs["question"]
104
 
@@ -107,14 +109,35 @@ class EndpointHandler():
107
 
108
  # image = Image.open(requests.get(imageBase64, stream=True).raw)
109
  # image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
 
111
  prompt = f"Question: {question}, Answer:"
112
- processed = self.processor(images=image, text=prompt, return_tensors="pt").to(self.device, torch.float16)
113
 
114
  # answer = self._generate_answer(
115
  # model_path, prompt, image,
116
  # )
117
- out = self.model.generate(**processed)
 
 
118
 
119
  result = {}
120
  text_output = self.processor.decode(out[0], skip_special_tokens=True)
 
92
  # }
93
  # })
94
  inputs = data.pop("inputs", data)
95
+ parameters = data.pop("parameters", {})
 
 
96
 
97
+ # try:
98
+ # imageBase64 = inputs["image"]
99
+ # image = Image.open(BytesIO(base64.b64decode(imageBase64.split(",")[1].encode())))
100
+
101
+ # except:
102
+ # image_url = inputs['image']
103
+ # image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
104
 
105
  question = inputs["question"]
106
 
 
109
 
110
  # image = Image.open(requests.get(imageBase64, stream=True).raw)
111
  # image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
112
+ #### https://huggingface.co/SlowPacer/witron-image-captioning/blob/main/handler.py
113
+ inputs = data.pop("inputs", data)
114
+ parameters = data.pop("parameters", {})
115
+
116
+ if isinstance(inputs, Image.Image):
117
+ image = [inputs]
118
+ else:
119
+ inputs = isinstance(inputs, str) and [inputs] or inputs
120
+ image = [Image.open(BytesIO(base64.b64decode(_img))) for _img in inputs]
121
+
122
+ # processed_images = self.processor(images=raw_images, return_tensors="pt")
123
+ # processed_images["pixel_values"] = processed_images["pixel_values"].to(device)
124
+ # processed_images = {**processed_images, **parameters}
125
+
126
+ # with torch.no_grad():
127
+ # out = self.model.generate(**processed_images)
128
+ # captions = self.processor.batch_decode(out, skip_special_tokens=True)
129
+ ####
130
 
131
+
132
  prompt = f"Question: {question}, Answer:"
133
+ processed = self.processor(images=image, text=prompt, return_tensors="pt").to(self.device)#, torch.float16)
134
 
135
  # answer = self._generate_answer(
136
  # model_path, prompt, image,
137
  # )
138
+
139
+ with torch.no_grad():
140
+ out = self.model.generate(**processed)
141
 
142
  result = {}
143
  text_output = self.processor.decode(out[0], skip_special_tokens=True)