nielsr HF staff commited on
Commit
28dff12
β€’
1 Parent(s): d0b4edc

Add title, description, example image

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -9,6 +9,8 @@ import torch.nn as nn
9
  import torchvision
10
  import matplotlib.pyplot as plt
11
 
 
 
12
  def get_attention_maps(pixel_values, attentions, nh):
13
  threshold = 0.6
14
  w_featmap = pixel_values.shape[-2] // model.config.patch_size
@@ -34,7 +36,6 @@ def get_attention_maps(pixel_values, attentions, nh):
34
  output_dir = '.'
35
  os.makedirs(output_dir, exist_ok=True)
36
  attention_maps = []
37
- print("Number of heads:", nh)
38
  for j in range(nh):
39
  fname = os.path.join(output_dir, "attn-head" + str(j) + ".png")
40
  # save the attention map
@@ -65,7 +66,16 @@ def visualize_attention(image):
65
 
66
  return attention_maps
67
 
 
 
 
 
 
68
  iface = gr.Interface(fn=visualize_attention,
69
  inputs=gr.inputs.Image(shape=(480, 480), type="pil"),
70
- outputs=[gr.outputs.Image(type='file', label=f'attention_head_{i}') for i in range(6)])
 
 
 
 
71
  iface.launch()
 
9
  import torchvision
10
  import matplotlib.pyplot as plt
11
 
12
+ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
13
+
14
  def get_attention_maps(pixel_values, attentions, nh):
15
  threshold = 0.6
16
  w_featmap = pixel_values.shape[-2] // model.config.patch_size
 
36
  output_dir = '.'
37
  os.makedirs(output_dir, exist_ok=True)
38
  attention_maps = []
 
39
  for j in range(nh):
40
  fname = os.path.join(output_dir, "attn-head" + str(j) + ".png")
41
  # save the attention map
 
66
 
67
  return attention_maps
68
 
69
+ title = "Interactive demo: DINO"
70
+ description = "Demo for Facebook AI's DINO, a new method for self-supervised training of Vision Transformers. Using this method, they are capable of segmenting objects within an image without having ever been trained to do so. This can be observed by displaying the self-attention of the heads from the last layer for the [CLS] token query. This demo uses a ViT-S/8 trained with DINO. To use it, simply upload an image or use the example image below. Results will show up in a few seconds."
71
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.14294'>Emerging Properties in Self-Supervised Vision Transformers</a> | <a href='https://github.com/facebookresearch/dino'>Github Repo</a></p>"
72
+ examples =[['cats.jpg']]
73
+
74
  iface = gr.Interface(fn=visualize_attention,
75
  inputs=gr.inputs.Image(shape=(480, 480), type="pil"),
76
+ outputs=[gr.outputs.Image(type='file', label=f'attention_head_{i}') for i in range(6)],
77
+ title=title,
78
+ description=description,
79
+ article=article,
80
+ examples=examples)
81
  iface.launch()