Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -50,7 +50,7 @@ railway_file_path = "assets/railway.jpg"
|
|
50 |
happy_file_path = "assets/happy.jpg"
|
51 |
|
52 |
unrelated_features = [
|
53 |
-
693, 793, 2033, 1195, 4879, 890, 1762, 45, 4258
|
54 |
]
|
55 |
|
56 |
@spaces.GPU
|
@@ -83,7 +83,7 @@ def generate_activations(image):
|
|
83 |
result.scatter_(-1, topk.indices, topk.values)
|
84 |
cached_list.append(result.detach().cpu())
|
85 |
topk_indices = (
|
86 |
-
latents.squeeze(0).mean(dim=0).topk(k=
|
87 |
)
|
88 |
|
89 |
handles = [hooked_module.register_forward_hook(hook)]
|
@@ -209,7 +209,7 @@ with gr.Blocks() as demo:
|
|
209 |
"""
|
210 |
# Large Multi-modal Models Can Interpret Features in Large Multi-modal Models
|
211 |
|
212 |
-
π [ArXiv Paper](https://arxiv.org/abs/2411.14982) | π [LMMs-Lab Homepage](https://lmms-lab.framer.ai) | π€ [Huggingface Collections](https://huggingface.co/collections/lmms-lab/llava-sae-674026e4e7bc8c29c70bc3a3) | [GitHub Repo](https://github.com/EvolvingLMMs-Lab/multimodal-sae) | [Feature Interpretation](https://huggingface.co/datasets/lmms-lab/llava-sae-explanations-5k)
|
213 |
"""
|
214 |
)
|
215 |
with gr.Accordion("βΉοΈ Instructions", open=False):
|
@@ -237,10 +237,12 @@ with gr.Blocks() as demo:
|
|
237 |
[
|
238 |
["assets/sunglasses.jpg", 10, "Sunglasses"],
|
239 |
["assets/greedy.jpg", 14, "Greedy eating"],
|
|
|
240 |
["assets/railway.jpg", 28, "Railway tracks"],
|
241 |
["assets/bird.png", 1803, "The seagull feathers."],
|
242 |
["assets/eyes.png", 2274, "Eyes"],
|
243 |
-
["assets/
|
|
|
244 |
],
|
245 |
inputs=[image, feature_num, dummy_text],
|
246 |
label="Examples",
|
|
|
50 |
happy_file_path = "assets/happy.jpg"
|
51 |
|
52 |
unrelated_features = [
|
53 |
+
693, 793, 2033, 1195, 1257, 4879, 890, 1762, 45, 4258
|
54 |
]
|
55 |
|
56 |
@spaces.GPU
|
|
|
83 |
result.scatter_(-1, topk.indices, topk.values)
|
84 |
cached_list.append(result.detach().cpu())
|
85 |
topk_indices = (
|
86 |
+
latents.squeeze(0).mean(dim=0).topk(k=1000).indices.detach().cpu()
|
87 |
)
|
88 |
|
89 |
handles = [hooked_module.register_forward_hook(hook)]
|
|
|
209 |
"""
|
210 |
# Large Multi-modal Models Can Interpret Features in Large Multi-modal Models
|
211 |
|
212 |
+
π [A Database for Interpreted 5k Neurons](https://huggingface.co/datasets/lmms-lab/llava-sae-explanations-5k) | π [ArXiv Paper](https://arxiv.org/abs/2411.14982) [ArXiv Paper](https://arxiv.org/abs/2411.14982) | π [LMMs-Lab Homepage](https://lmms-lab.framer.ai) | π€ [Huggingface Collections](https://huggingface.co/collections/lmms-lab/llava-sae-674026e4e7bc8c29c70bc3a3) | [GitHub Repo](https://github.com/EvolvingLMMs-Lab/multimodal-sae) | [Feature Interpretation](https://huggingface.co/datasets/lmms-lab/llava-sae-explanations-5k)
|
213 |
"""
|
214 |
)
|
215 |
with gr.Accordion("βΉοΈ Instructions", open=False):
|
|
|
237 |
[
|
238 |
["assets/sunglasses.jpg", 10, "Sunglasses"],
|
239 |
["assets/greedy.jpg", 14, "Greedy eating"],
|
240 |
+
["assets/greedy.jpg", 1085, "Human Faces"],
|
241 |
["assets/railway.jpg", 28, "Railway tracks"],
|
242 |
["assets/bird.png", 1803, "The seagull feathers."],
|
243 |
["assets/eyes.png", 2274, "Eyes"],
|
244 |
+
["assets/happy.jpg", 19379, "Happy"],
|
245 |
+
["assets/sad.jpg", 108692, "Sad"],
|
246 |
],
|
247 |
inputs=[image, feature_num, dummy_text],
|
248 |
label="Examples",
|