Spaces:
Running
Running
File size: 1,730 Bytes
ed1b9ba 3281fd3 b0daa79 ed1b9ba cdee39b ed1b9ba cdee39b ed1b9ba cdee39b 1d20657 ed1b9ba cdee39b ed1b9ba cdee39b ed1b9ba cdee39b ed1b9ba cdee39b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import gradio as gr
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from huggingface_hub.keras_mixin import from_pretrained_keras
from PIL import Image
import utils
_RESOLUTION = 224
def get_model() -> tf.keras.Model:
"""Initiates a tf.keras.Model from HF Hub."""
inputs = tf.keras.Input((_RESOLUTION, _RESOLUTION, 3))
hub_module = from_pretrained_keras("probing-vits/cait_xxs24_224_classification")
logits, sa_atn_score_dict, ca_atn_score_dict = hub_module(inputs, training=False)
return tf.keras.Model(
inputs, [logits, sa_atn_score_dict, ca_atn_score_dict]
)
_MODEL = get_model()
def show_plot(image):
"""Function to be called when user hits submit on the UI."""
original_image, preprocessed_image = utils.preprocess_image(
image, _RESOLUTION
)
_, _, ca_atn_score_dict = _MODEL.predict(preprocessed_image)
# Compute the saliency map and superimpose.
saliency_attention = utils.get_cls_attention_map(
preprocessed_image, ca_atn_score_dict, block_key="ca_ffn_block_0_att"
)
fig = plt.figure()
plt.imshow(original_image.astype("int32"))
plt.imshow(saliency_attention.squeeze(), cmap="cividis", alpha=0.9)
plt.axis("off")
return fig
title = "Generate Class Saliency Plots"
article = "Class saliency maps as investigated in [Going deeper with Image Transformers](https://arxiv.org/abs/2103.17239) (Touvron et al.)."
iface = gr.Interface(
show_plot,
inputs=gr.inputs.Image(type="pil", label="Input Image"),
outputs=gr.outputs.Plot(type="auto"),
title=title,
article=article,
allow_flagging="never",
examples=[["./butterfly_cropped.png"]],
)
iface.launch(debug=True)
|