vision-diffmask / app.py
din0s's picture
Add code
d4ab5ac unverified
raw
history blame
1.81 kB
import sys
sys.path.insert(0, './code')
from datamodules.transformations import UnNest
from models.interpretation import ImageInterpretationNet
from transformers import ViTFeatureExtractor, ViTForImageClassification
from utils.plot import smoothen, draw_mask_on_image, draw_heatmap_on_image
import gradio as gr
import numpy as np
import torch
# Load Vision Transformer
hf_model = "tanlq/vit-base-patch16-224-in21k-finetuned-cifar10"
vit = ViTForImageClassification.from_pretrained(hf_model)
vit.eval()
# Load Feature Extractor
feature_extractor = ViTFeatureExtractor.from_pretrained(hf_model, return_tensors="pt")
feature_extractor = UnNest(feature_extractor)
# Load Vision DiffMask
diffmask = ImageInterpretationNet.load_from_checkpoint('checkpoints/diffmask.ckpt')
diffmask.set_vision_transformer(vit)
# Define mask plotting functions
def draw_mask(image, mask):
return draw_mask_on_image(image, smoothen(mask))\
.permute(1, 2, 0)\
.clip(0, 1)\
.numpy()
def draw_heatmap(image, mask):
return draw_heatmap_on_image(image, smoothen(mask))\
.permute(1, 2, 0)\
.clip(0, 1)\
.numpy()
# Define callable method for the demo
def get_mask(image):
if image is None:
return None
image = torch.from_numpy(image).permute(2, 0, 1).float() / 255
dm_image = feature_extractor(image).unsqueeze(0)
mask = diffmask.get_mask(dm_image)["mask"][0].detach()
masked_img = draw_mask(image, mask)
heatmap = draw_heatmap(image, mask)
return np.hstack((masked_img, heatmap))
# Launch demo interface
gr.Interface(
get_mask,
inputs=gr.inputs.Image(label="Input", shape=(224, 224), source="upload", type="numpy"),
outputs=[gr.outputs.Image(label="Output")],
title="Vision DiffMask Demo",
live=True,
).launch()