mshook StefanHex commited on
Commit
a26206c
0 Parent(s):

Duplicate from StefanHex/simple-trafo-mech-int

Browse files

Co-authored-by: Stefan Heimersheim <StefanHex@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. INFO.md +8 -0
  3. README.md +14 -0
  4. app.py +246 -0
  5. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
INFO.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Trafo Mech Int playground
2
+
3
+ Mechanistic Interpretability for everyone!
4
+ Website to visualise Transformer internals
5
+
6
+ By [Stefan Heimersheim](https://github.com/Stefan-Heimersheim/) and [Jonathan Ng](https://github.com/derpyplops).
7
+
8
+ [Mechanistic Interpretability Hackathon](https://itch.io/jam/mechint) submission.
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Simple Trafo Mech Int
3
+ emoji: 🌍
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: streamlit
7
+ sdk_version: 1.17.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: StefanHex/simple-trafo-mech-int
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformer_lens import HookedTransformer, utils
3
+ from io import StringIO
4
+ import sys
5
+ import torch
6
+ from functools import partial
7
+ import plotly.offline as pyo
8
+ import plotly.graph_objs as go
9
+ import numpy as np
10
+ import plotly.express as px
11
+ import circuitsvis as cv
12
+
13
+ # Little bit of front end for model selector
14
+
15
+ # Radio buttons
16
+ model_name = st.sidebar.radio("Model (only use patching for\nsmall (<4L) models due to memory limits)", [
17
+ "gelu-1l",
18
+ "gelu-2l",
19
+ "gelu-3l",
20
+ "gelu-4l",
21
+ "attn-only-1l",
22
+ "attn-only-2l",
23
+ "attn-only-3l",
24
+ "attn-only-4l",
25
+ "solu-1l",
26
+ "solu-2l",
27
+ "solu-3l",
28
+ "solu-4l",
29
+ "solu-6l",
30
+ "solu-8l",
31
+ "solu-10l",
32
+ "solu-12l",
33
+ "gpt2-small",
34
+ "gpt2-medium",
35
+ #"gpt2-large",
36
+ #"gpt2-xl",
37
+ ], index=1)
38
+
39
+
40
+ # Backend code
41
+
42
+ model = HookedTransformer.from_pretrained(model_name)
43
+
44
+ def predict_next_token(prompt):
45
+ logits = model(prompt)[0,-1]
46
+ answer_index = logits.argmax()
47
+ answer = model.tokenizer.decode(answer_index)
48
+ answer = f"<b>|{answer}|</b> (answer by {model.cfg.model_name})"
49
+ return answer
50
+
51
+ def test_prompt(prompt, answer):
52
+ output = StringIO()
53
+ sys.stdout = output
54
+ utils.test_prompt(prompt, answer, model)
55
+ output = output.getvalue()
56
+ return output
57
+
58
+ def compute_residual_stream_patch(clean_prompt=None, answer=None, corrupt_prompt=None, corrupt_answer=None, layers=None):
59
+ model.reset_hooks()
60
+ clean_answer_index = model.tokenizer.encode(answer)[0]
61
+ corrupt_answer_index = model.tokenizer.encode(corrupt_answer)[0]
62
+ clean_tokens = model.to_str_tokens(clean_prompt)
63
+ _, corrupt_cache = model.run_with_cache(corrupt_prompt)
64
+ # Patching function
65
+ def patch_residual_stream(activations, hook, layer="blocks.6.hook_resid_post", pos=5):
66
+ activations[:, pos, :] = corrupt_cache[layer][:, pos, :]
67
+ return activations
68
+ # Compute logit diffs
69
+ n_layers = len(layers)
70
+ n_pos = len(clean_tokens)
71
+ patching_effect = torch.zeros(n_layers, n_pos)
72
+ for l, layer in enumerate(layers):
73
+ for pos in range(n_pos):
74
+ fwd_hooks = [(layer, partial(patch_residual_stream, layer=layer, pos=pos))]
75
+ prediction_logits = model.run_with_hooks(clean_prompt, fwd_hooks=fwd_hooks)[0, -1]
76
+ patching_effect[l, pos] = prediction_logits[clean_answer_index] - prediction_logits[corrupt_answer_index]
77
+ return patching_effect
78
+
79
+ def compute_attn_patch(clean_prompt=None, answer=None, corrupt_prompt=None, corrupt_answer=None):
80
+ use_attn_result_prev = model.cfg.use_attn_result
81
+ model.cfg.use_attn_result = True
82
+ clean_answer_index = model.tokenizer.encode(answer)[0]
83
+ corrupt_answer_index = model.tokenizer.encode(corrupt_answer)[0]
84
+ clean_tokens = model.to_str_tokens(clean_prompt)
85
+ _, corrupt_cache = model.run_with_cache(corrupt_prompt)
86
+ # Patching function
87
+ def patch_head_result(activations, hook, head=None, pos=None):
88
+ activations[:, pos, head, :] = corrupt_cache[hook.name][:, pos, head, :]
89
+ return activations
90
+
91
+ n_layers = model.cfg.n_layers
92
+ n_heads = model.cfg.n_heads
93
+ n_pos = len(clean_tokens)
94
+ patching_effect = torch.zeros(n_layers*n_heads, n_pos)
95
+ for layer in range(n_layers):
96
+ for head in range(n_heads):
97
+ for pos in range(n_pos):
98
+ fwd_hooks = [(f"blocks.{layer}.attn.hook_result", partial(patch_head_result, head=head, pos=pos))]
99
+ prediction_logits = model.run_with_hooks(clean_prompt, fwd_hooks=fwd_hooks)[0, -1]
100
+ patching_effect[n_heads*layer+head, pos] = prediction_logits[clean_answer_index] - prediction_logits[corrupt_answer_index]
101
+ model.cfg.use_attn_result = use_attn_result_prev
102
+ return patching_effect
103
+
104
+ def imshow(tensor, xlabel="X", ylabel="Y", zlabel=None, xticks=None, yticks=None, c_midpoint=0.0, c_scale="RdBu", **kwargs):
105
+ tensor = utils.to_numpy(tensor)
106
+ xticks = [str(x) for x in xticks]
107
+ yticks = [str(y) for y in yticks]
108
+ labels = {"x": xlabel, "y": ylabel}
109
+ if zlabel is not None:
110
+ labels["color"] = zlabel
111
+ fig = px.imshow(tensor, x=xticks, y=yticks, labels=labels, color_continuous_midpoint=c_midpoint,
112
+ color_continuous_scale=c_scale, **kwargs)
113
+ return fig
114
+
115
+ def plot_residual_stream_patch(clean_prompt=None, answer=None, corrupt_prompt=None, corrupt_answer=None):
116
+ layers = ["blocks.0.hook_resid_pre", *[f"blocks.{i}.hook_resid_post" for i in range(model.cfg.n_layers)]]
117
+ clean_tokens = model.to_str_tokens(clean_prompt)
118
+ token_labels = [f"(pos {i:2}) {t}" for i, t in enumerate(clean_tokens)]
119
+ patching_effect = compute_residual_stream_patch(clean_prompt=clean_prompt, answer=answer, corrupt_prompt=corrupt_prompt, corrupt_answer=corrupt_answer, layers=layers)
120
+ fig = imshow(patching_effect, xticks=token_labels, yticks=layers, xlabel="Position", ylabel="Layer",
121
+ zlabel="Logit Difference", title="Patching residual stream at specific layer and position")
122
+ return fig
123
+
124
+ def plot_attn_patch(clean_prompt=None, answer=None, corrupt_prompt=None, corrupt_answer=None):
125
+ clean_tokens = model.to_str_tokens(clean_prompt)
126
+ n_layers = model.cfg.n_layers
127
+ n_heads = model.cfg.n_heads
128
+ layerhead_labels = [f"{l}.{h}" for l in range(n_layers) for h in range(n_heads)]
129
+ token_labels = [f"(pos {i:2}) {t}" for i, t in enumerate(clean_tokens)]
130
+ patching_effect = compute_attn_patch(clean_prompt=clean_prompt, answer=answer, corrupt_prompt=corrupt_prompt, corrupt_answer=corrupt_answer)
131
+ return imshow(patching_effect, xticks=token_labels, yticks=layerhead_labels, xlabel="Position", ylabel="Layer.Head",
132
+ zlabel="Logit Difference", title=f"Patching attention outputs for specific layer, head, and position", width=600, height=300+200*n_layers)
133
+
134
+
135
+ # Frontend code
136
+ st.title("Simple Trafo Mech Int")
137
+ st.subheader("Transformer Mechanistic Interpretability")
138
+ st.markdown("Powered by [TransformerLens](https://github.com/neelnanda-io/TransformerLens/)")
139
+ st.markdown("For _what_ these plots are, and _why_, see this [tutorial](https://docs.google.com/document/d/1e6cs8d9QNretWvOLsv_KaMp6kSPWpJEW0GWc0nwjqxo/).")
140
+
141
+ # Predict next token
142
+ st.header("Predict the next token")
143
+ st.markdown("Just a simple test UI, enter a prompt and the model will predict the next token")
144
+ prompt_simple = st.text_input("Prompt:", "Today, the weather is", key="prompt_simple")
145
+
146
+ if "prompt_simple_output" not in st.session_state:
147
+ st.session_state.prompt_simple_output = None
148
+
149
+ if st.button("Run model", key="key_button_prompt_simple"):
150
+ res = predict_next_token(prompt_simple)
151
+ st.session_state.prompt_simple_output = res
152
+
153
+ if st.session_state.prompt_simple_output:
154
+ st.markdown(st.session_state.prompt_simple_output, unsafe_allow_html=True)
155
+
156
+
157
+ # Test prompt
158
+ st.header("Verbose test prompt")
159
+ st.markdown("Enter a prompt and the correct answer, the model will run the prompt and print the results")
160
+
161
+ prompt = st.text_input("Prompt:", "The most popular programming language is", key="prompt")
162
+ answer = st.text_input("Answer:", " Java", key="answer")
163
+
164
+ if "test_prompt_output" not in st.session_state:
165
+ st.session_state.test_prompt_output = None
166
+
167
+ if st.button("Run model", key="key_button_test_prompt"):
168
+ res = test_prompt(prompt, answer)
169
+ st.session_state.test_prompt_output = res
170
+
171
+ if st.session_state.test_prompt_output:
172
+ st.code(st.session_state.test_prompt_output)
173
+
174
+
175
+ # Residual stream patching
176
+
177
+ st.header("Residual stream patching")
178
+ st.markdown("Enter a clean prompt, correct answer, corrupt prompt and corrupt answer, the model will compute the patching effect")
179
+
180
+ default_clean_prompt = "Her name was Alex Hart. Tomorrow at lunch time Alex"
181
+ default_clean_answer = "Hart"
182
+ default_corrupt_prompt = "Her name was Alex Carroll. Tomorrow at lunch time Alex"
183
+ default_corrupt_answer = "Carroll"
184
+
185
+ clean_prompt = st.text_input("Clean Prompt:", default_clean_prompt)
186
+ clean_answer = st.text_input("Correct Answer:", default_clean_answer)
187
+ corrupt_prompt = st.text_input("Corrupt Prompt:", default_corrupt_prompt)
188
+ corrupt_answer = st.text_input("Corrupt Answer:", default_corrupt_answer)
189
+
190
+ if "residual_stream_patch_out" not in st.session_state:
191
+ st.session_state.residual_stream_patch_out = None
192
+
193
+ if st.button("Run model", key="key_button_residual_stream_patch"):
194
+ fig = plot_residual_stream_patch(clean_prompt=clean_prompt, answer=clean_answer, corrupt_prompt=corrupt_prompt, corrupt_answer=corrupt_answer)
195
+ st.session_state.residual_stream_patch_out = fig
196
+
197
+ if st.session_state.residual_stream_patch_out:
198
+ st.plotly_chart(st.session_state.residual_stream_patch_out)
199
+
200
+
201
+ # Attention head output
202
+
203
+ st.header("Attention head output patching")
204
+ st.markdown("Enter a clean prompt, correct answer, corrupt prompt and corrupt answer, the model will compute the patching effect")
205
+
206
+ clean_prompt_attn = st.text_input("Clean Prompt:", default_clean_prompt, key="key2_clean_prompt_attn")
207
+ clean_answer_attn = st.text_input("Correct Answer:", default_clean_answer, key="key2_clean_answer_attn")
208
+ corrupt_prompt_attn = st.text_input("Corrupt Prompt:", default_corrupt_prompt, key="key2_corrupt_prompt_attn")
209
+ corrupt_answer_attn = st.text_input("Corrupt Answer:", default_corrupt_answer, key="key2_corrupt_answer_attn")
210
+
211
+ if "attn_head_patch_out" not in st.session_state:
212
+ st.session_state.attn_head_patch_out = None
213
+
214
+ if st.button("Run model", key="key_button_attn_head_patch"):
215
+ fig = plot_attn_patch(clean_prompt=clean_prompt_attn, answer=clean_answer_attn, corrupt_prompt=corrupt_prompt_attn, corrupt_answer=corrupt_answer_attn)
216
+ st.session_state.attn_head_patch_out = fig
217
+
218
+ if st.session_state.attn_head_patch_out:
219
+ st.plotly_chart(st.session_state.attn_head_patch_out)
220
+
221
+
222
+ # Attention Head Visualization
223
+
224
+ st.header("Attention Pattern Visualization")
225
+ st.markdown("Powered by [CircuitsVis](https://github.com/alan-cooney/CircuitsVis)")
226
+ st.markdown("Enter a prompt, show attention patterns")
227
+
228
+ default_prompt_attn = "Her name was Alex Hart. Tomorrow at lunch time Alex"
229
+ prompt_attn = st.text_input("Prompt:", default_prompt_attn)
230
+
231
+ if "attn_html" not in st.session_state:
232
+ st.session_state.attn_html = None
233
+
234
+ if st.button("Run model", key="key_button_attention_head"):
235
+ _, cache = model.run_with_cache(prompt_attn)
236
+ st.session_state.attn_html = []
237
+ for layer in range(model.cfg.n_layers):
238
+ html = cv.attention.attention_patterns(tokens=model.to_str_tokens(prompt_attn),
239
+ attention=cache[f'blocks.{layer}.attn.hook_pattern'][0])
240
+ st.session_state.attn_html.append(html.show_code())
241
+
242
+ if st.session_state.attn_html:
243
+ for layer in range(len(st.session_state.attn_html)):
244
+ st.write(f"Attention patterns Layer {layer}:")
245
+ st.components.v1.html(st.session_state.attn_html[layer], height=500)
246
+
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ git+https://github.com/neelnanda-io/TransformerLens/
2
+ torch
3
+ flask
4
+ gunicorn
5
+ plotly
6
+ circuitsvis
7
+ streamlit