yshenaw commited on
Commit
8fa7aad
Β·
verified Β·
1 Parent(s): 7bcf3b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -50,7 +50,7 @@ railway_file_path = "assets/railway.jpg"
50
  happy_file_path = "assets/happy.jpg"
51
 
52
  unrelated_features = [
53
- 693, 793, 2033, 1195, 4879, 890, 1762, 45, 4258
54
  ]
55
 
56
  @spaces.GPU
@@ -83,7 +83,7 @@ def generate_activations(image):
83
  result.scatter_(-1, topk.indices, topk.values)
84
  cached_list.append(result.detach().cpu())
85
  topk_indices = (
86
- latents.squeeze(0).mean(dim=0).topk(k=500).indices.detach().cpu()
87
  )
88
 
89
  handles = [hooked_module.register_forward_hook(hook)]
@@ -209,7 +209,7 @@ with gr.Blocks() as demo:
209
  """
210
  # Large Multi-modal Models Can Interpret Features in Large Multi-modal Models
211
 
212
- πŸ” [ArXiv Paper](https://arxiv.org/abs/2411.14982) | 🏠 [LMMs-Lab Homepage](https://lmms-lab.framer.ai) | πŸ€— [Huggingface Collections](https://huggingface.co/collections/lmms-lab/llava-sae-674026e4e7bc8c29c70bc3a3) | [GitHub Repo](https://github.com/EvolvingLMMs-Lab/multimodal-sae) | [Feature Interpretation](https://huggingface.co/datasets/lmms-lab/llava-sae-explanations-5k)
213
  """
214
  )
215
  with gr.Accordion("ℹ️ Instructions", open=False):
@@ -237,10 +237,12 @@ with gr.Blocks() as demo:
237
  [
238
  ["assets/sunglasses.jpg", 10, "Sunglasses"],
239
  ["assets/greedy.jpg", 14, "Greedy eating"],
 
240
  ["assets/railway.jpg", 28, "Railway tracks"],
241
  ["assets/bird.png", 1803, "The seagull feathers."],
242
  ["assets/eyes.png", 2274, "Eyes"],
243
- ["assets/monkey.png", 2692, "Monkey"],
 
244
  ],
245
  inputs=[image, feature_num, dummy_text],
246
  label="Examples",
 
50
  happy_file_path = "assets/happy.jpg"
51
 
52
  unrelated_features = [
53
+ 693, 793, 2033, 1195, 1257, 4879, 890, 1762, 45, 4258
54
  ]
55
 
56
  @spaces.GPU
 
83
  result.scatter_(-1, topk.indices, topk.values)
84
  cached_list.append(result.detach().cpu())
85
  topk_indices = (
86
+ latents.squeeze(0).mean(dim=0).topk(k=1000).indices.detach().cpu()
87
  )
88
 
89
  handles = [hooked_module.register_forward_hook(hook)]
 
209
  """
210
  # Large Multi-modal Models Can Interpret Features in Large Multi-modal Models
211
 
212
+ πŸ” [A Database for Interpreted 5k Neurons](https://huggingface.co/datasets/lmms-lab/llava-sae-explanations-5k) | πŸ” [ArXiv Paper](https://arxiv.org/abs/2411.14982) [ArXiv Paper](https://arxiv.org/abs/2411.14982) | 🏠 [LMMs-Lab Homepage](https://lmms-lab.framer.ai) | πŸ€— [Huggingface Collections](https://huggingface.co/collections/lmms-lab/llava-sae-674026e4e7bc8c29c70bc3a3) | [GitHub Repo](https://github.com/EvolvingLMMs-Lab/multimodal-sae) | [Feature Interpretation](https://huggingface.co/datasets/lmms-lab/llava-sae-explanations-5k)
213
  """
214
  )
215
  with gr.Accordion("ℹ️ Instructions", open=False):
 
237
  [
238
  ["assets/sunglasses.jpg", 10, "Sunglasses"],
239
  ["assets/greedy.jpg", 14, "Greedy eating"],
240
+ ["assets/greedy.jpg", 1085, "Human Faces"],
241
  ["assets/railway.jpg", 28, "Railway tracks"],
242
  ["assets/bird.png", 1803, "The seagull feathers."],
243
  ["assets/eyes.png", 2274, "Eyes"],
244
+ ["assets/happy.jpg", 19379, "Happy"],
245
+ ["assets/sad.jpg", 108692, "Sad"],
246
  ],
247
  inputs=[image, feature_num, dummy_text],
248
  label="Examples",