Ensure GIFs can be visualized as well
Browse files
app.py
CHANGED
@@ -77,6 +77,8 @@ def init():
|
|
77 |
# ja_image_features = np.load("./hakuhodo_features.npy")
|
78 |
ml_image_features = np.load("./resized_ml_features.npy")
|
79 |
ja_image_features = np.load("./resized_ja_features.npy")
|
|
|
|
|
80 |
|
81 |
# Convert features to Tensors: Float32 on CPU and Float16 on GPU
|
82 |
if device == "cpu":
|
@@ -188,7 +190,7 @@ def visualize_gradcam(viz_image_id):
|
|
188 |
|
189 |
image_url = st.session_state.images_info.loc[viz_image_id]["image_url"]
|
190 |
image_response = requests.get(image_url)
|
191 |
-
image = Image.open(BytesIO(image_response.content), formats=["JPEG"])
|
192 |
|
193 |
img_dim = 224
|
194 |
if st.session_state.active_model == "M-CLIP (multiple languages)":
|
@@ -309,7 +311,7 @@ def visualize_gradcam(viz_image_id):
|
|
309 |
|
310 |
progress_bar.progress(
|
311 |
(t + 1) / len(tokenized_text),
|
312 |
-
text=f"Processing token {t+1} of {len(tokenized_text)}
|
313 |
)
|
314 |
progress_bar.empty()
|
315 |
|
@@ -381,7 +383,7 @@ with search_row[3]:
|
|
381 |
with search_row[4]:
|
382 |
st.radio(
|
383 |
"CLIP Model",
|
384 |
-
options=["M-CLIP (multiple languages)", "J-CLIP (日本語
|
385 |
key="active_model",
|
386 |
on_change=string_search,
|
387 |
horizontal=True,
|
|
|
77 |
# ja_image_features = np.load("./hakuhodo_features.npy")
|
78 |
ml_image_features = np.load("./resized_ml_features.npy")
|
79 |
ja_image_features = np.load("./resized_ja_features.npy")
|
80 |
+
# ml_image_features = np.load("./tiled_ml_features.npy")
|
81 |
+
# ja_image_features = np.load("./tiled_ja_features.npy")
|
82 |
|
83 |
# Convert features to Tensors: Float32 on CPU and Float16 on GPU
|
84 |
if device == "cpu":
|
|
|
190 |
|
191 |
image_url = st.session_state.images_info.loc[viz_image_id]["image_url"]
|
192 |
image_response = requests.get(image_url)
|
193 |
+
image = Image.open(BytesIO(image_response.content), formats=["JPEG", "GIF"])
|
194 |
|
195 |
img_dim = 224
|
196 |
if st.session_state.active_model == "M-CLIP (multiple languages)":
|
|
|
311 |
|
312 |
progress_bar.progress(
|
313 |
(t + 1) / len(tokenized_text),
|
314 |
+
text=f"Processing token {t+1} of {len(tokenized_text)}",
|
315 |
)
|
316 |
progress_bar.empty()
|
317 |
|
|
|
383 |
with search_row[4]:
|
384 |
st.radio(
|
385 |
"CLIP Model",
|
386 |
+
options=["M-CLIP (multiple languages)", "J-CLIP (日本語)"],
|
387 |
key="active_model",
|
388 |
on_change=string_search,
|
389 |
horizontal=True,
|