Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -134,7 +134,7 @@ def predict_sentiment(text, image):
|
|
134 |
print(dir(image))
|
135 |
image = read_image(image, mode=ImageReadMode.RGB)
|
136 |
# image = transforms.ToTensor()(image).unsqueeze(0)
|
137 |
-
|
138 |
text_inputs = tokenizer(
|
139 |
text,
|
140 |
max_length=512,
|
@@ -150,17 +150,18 @@ def predict_sentiment(text, image):
|
|
150 |
)
|
151 |
image_transformations = torch.jit.script(image_transformations)
|
152 |
image = image_transformations(image)
|
|
|
153 |
model_input = {
|
154 |
"input_ids" : text_inputs.input_ids,
|
155 |
"pixel_values":image,
|
156 |
"attention_mask" : text_inputs.attention_mask,
|
157 |
}
|
158 |
print(text_inputs)
|
159 |
-
print(image)
|
160 |
print(model_input)
|
161 |
prediction = None
|
162 |
with torch.no_grad():
|
163 |
-
prediction = model(input_ids=text_inputs.
|
164 |
print(prediction)
|
165 |
return prediction
|
166 |
|
|
|
134 |
print(dir(image))
|
135 |
image = read_image(image, mode=ImageReadMode.RGB)
|
136 |
# image = transforms.ToTensor()(image).unsqueeze(0)
|
137 |
+
print(image)
|
138 |
text_inputs = tokenizer(
|
139 |
text,
|
140 |
max_length=512,
|
|
|
150 |
)
|
151 |
image_transformations = torch.jit.script(image_transformations)
|
152 |
image = image_transformations(image)
|
153 |
+
|
154 |
model_input = {
|
155 |
"input_ids" : text_inputs.input_ids,
|
156 |
"pixel_values":image,
|
157 |
"attention_mask" : text_inputs.attention_mask,
|
158 |
}
|
159 |
print(text_inputs)
|
160 |
+
print("its going in ",image)
|
161 |
print(model_input)
|
162 |
prediction = None
|
163 |
with torch.no_grad():
|
164 |
+
prediction = model(input_ids=[text_inputs.input_id],attention_mask=[text_inputs.attention_mask], pixel_values=[image])
|
165 |
print(prediction)
|
166 |
return prediction
|
167 |
|