Kieran Fraser commited on
Commit
778d12b
1 Parent(s): f6d47e5

Initial commit adding UI v1.

Browse files

Signed-off-by: Kieran Fraser <Kieran.Fraser@ibm.com>

.gitignore ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ !.vscode/*.code-snippets
132
+ !.vscode/extensions.json
133
+ !.vscode/launch.json
134
+ !.vscode/settings.json
135
+ !.vscode/tasks.json
136
+ *$py.class
137
+ *.code-workspace
138
+ *.cover
139
+ *.egg
140
+ *.egg-info/
141
+ *.iws
142
+ *.log
143
+ *.manifest
144
+ *.mo
145
+ *.pot
146
+ *.py,cover
147
+ *.py[cod]
148
+ *.sage.py
149
+ *.so
150
+ *.spec
151
+ *.vsix
152
+ .Python
153
+ .cache
154
+ .coverage
155
+ .coverage.*
156
+ .dmypy.json
157
+ .eggs/
158
+ .env
159
+ .history
160
+ .history/
161
+ .hypothesis/
162
+ .idea/$CACHE_FILE$
163
+ .idea/**/aws.xml
164
+ .idea/**/azureSettings.xml
165
+ .idea/**/contentModel.xml
166
+ .idea/**/dataSources.ids
167
+ .idea/**/dataSources.local.xml
168
+ .idea/**/dataSources/
169
+ .idea/**/dbnavigator.xml
170
+ .idea/**/dictionaries
171
+ .idea/**/dynamic.xml
172
+ .idea/**/gradle.xml
173
+ .idea/**/libraries
174
+ .idea/**/markdown-navigator-enh.xml
175
+ .idea/**/markdown-navigator.xml
176
+ .idea/**/markdown-navigator/
177
+ .idea/**/mongoSettings.xml
178
+ .idea/**/shelf
179
+ .idea/**/sonarIssues.xml
180
+ .idea/**/sonarlint/
181
+ .idea/**/sqlDataSources.xml
182
+ .idea/**/tasks.xml
183
+ .idea/**/uiDesigner.xml
184
+ .idea/**/usage.statistics.xml
185
+ .idea/**/workspace.xml
186
+ .idea/caches/build_file_checksums.ser
187
+ .idea/codestream.xml
188
+ .idea/httpRequests
189
+ .idea/replstate.xml
190
+ .idea/sonarlint/
191
+ .idea_modules/
192
+ .installed.cfg
193
+ .ionide
194
+ .ipynb_checkpoints
195
+ .mypy_cache/
196
+ .nox/
197
+ .pdm.toml
198
+ .pybuilder/
199
+ .pyre/
200
+ .pytest_cache/
201
+ .pytype/
202
+ .ropeproject
203
+ .scrapy
204
+ .spyderproject
205
+ .spyproject
206
+ .tox/
207
+ .venv
208
+ .vscode/*
209
+ .vscode/*.code-snippets
210
+ .webassets-cache
211
+ /site
212
+ ENV/
213
+ MANIFEST
214
+ __pycache__/
215
+ __pypackages__/
216
+ atlassian-ide-plugin.xml
217
+ build/
218
+ celerybeat-schedule
219
+ celerybeat.pid
220
+ cmake-build-*/
221
+ com_crashlytics_export_strings.xml
222
+ cover/
223
+ coverage.xml
224
+ crashlytics-build.properties
225
+ crashlytics.properties
226
+ cython_debug/
227
+ db.sqlite3
228
+ db.sqlite3-journal
229
+ develop-eggs/
230
+ dist/
231
+ dmypy.json
232
+ docs/_build/
233
+ downloads/
234
+ eggs/
235
+ env.bak/
236
+ env/
237
+ fabric.properties
238
+ htmlcov/
239
+ instance/
240
+ ipython_config.py
241
+ lib/
242
+ lib64/
243
+ local_settings.py
244
+ nosetests.xml
245
+ out/
246
+ parts/
247
+ pip-delete-this-directory.txt
248
+ pip-log.txt
249
+ profile_default/
250
+ sdist/
251
+ share/python-wheels/
252
+ target/
253
+ var/
254
+ venv.bak/
255
+ venv/
256
+ wheels/
257
+ Pipfile
258
+ .vscode
259
+ Pipfile.lock
260
+ Data - DELETE AT THE END OF THE PROJECT
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Art Huggingface Demo
3
  emoji: 📚
4
  colorFrom: yellow
5
  colorTo: purple
 
1
  ---
2
+ title: ART Huggingface Demo
3
  emoji: 📚
4
  colorFrom: yellow
5
  colorTo: purple
app.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ ART-JATIC Gradio Example App
3
+
4
+ To run:
5
+ - clone the repository
6
+ - execute: gradio examples/gradio_app.py or python examples/gradio_app.py
7
+ - navigate to local URL e.g. http://127.0.0.1:7860
8
+ '''
9
+
10
+ import gradio as gr
11
+ import numpy as np
12
+ from carbon_theme import Carbon
13
+
14
+ import numpy as np
15
+ import torch
16
+ import transformers
17
+
18
+ from art.estimators.classification.hugging_face import HuggingFaceClassifierPyTorch
19
+ from art.attacks.evasion import ProjectedGradientDescentPyTorch, AdversarialPatchPyTorch
20
+ from art.utils import load_dataset
21
+
22
+ from art.attacks.poisoning import PoisoningAttackBackdoor
23
+ from art.attacks.poisoning.perturbations import insert_image
24
+
25
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
26
+
27
+ css = """
28
+ .input-image { margin: auto !important }
29
+ .plot-padding { padding: 20px; }
30
+ """
31
+
32
+ def clf_evasion_evaluate(*args):
33
+ '''
34
+ Run a classification task evaluation
35
+ '''
36
+ attack = args[0]
37
+ model_type = args[1]
38
+ model_url = args[2]
39
+ model_channels = args[3]
40
+ model_height = args[4]
41
+ model_width = args[5]
42
+ model_classes = args[6]
43
+ model_clip = args[7]
44
+ model_upsample = args[8]
45
+ attack_max_iter = args[9]
46
+ attack_eps = args[10]
47
+ attack_eps_steps = args[11]
48
+ x_location = args[12]
49
+ y_location = args[13]
50
+ patch_height = args[14]
51
+ patch_width = args[15]
52
+ data_type = args[-1]
53
+
54
+ if model_type == "Example":
55
+ model = transformers.AutoModelForImageClassification.from_pretrained(
56
+ 'facebook/deit-tiny-distilled-patch16-224',
57
+ ignore_mismatched_sizes=True,
58
+ num_labels=10
59
+ )
60
+ upsampler = torch.nn.Upsample(scale_factor=7, mode='nearest')
61
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
62
+ loss_fn = torch.nn.CrossEntropyLoss()
63
+
64
+ hf_model = HuggingFaceClassifierPyTorch(
65
+ model=model,
66
+ loss=loss_fn,
67
+ optimizer=optimizer,
68
+ input_shape=(3, 32, 32),
69
+ nb_classes=10,
70
+ clip_values=(0, 1),
71
+ processor=upsampler
72
+ )
73
+ model_checkpoint_path = './state_dicts/deit_cifar_base_model.pt'
74
+ hf_model.model.load_state_dict(torch.load(model_checkpoint_path, map_location=device))
75
+
76
+ if data_type == "Example":
77
+ (x_train, y_train), (_, _), _, _ = load_dataset('cifar10')
78
+ x_train = np.transpose(x_train, (0, 3, 1, 2)).astype(np.float32)
79
+ y_train = np.argmax(y_train, axis=1)
80
+
81
+ classes = np.unique(y_train)
82
+ samples_per_class = 1
83
+
84
+ x_subset = []
85
+ y_subset = []
86
+
87
+ for c in classes:
88
+ indices = y_train == c
89
+ x_subset.append(x_train[indices][:samples_per_class])
90
+ y_subset.append(y_train[indices][:samples_per_class])
91
+
92
+ x_subset = np.concatenate(x_subset)
93
+ y_subset = np.concatenate(y_subset)
94
+
95
+ label_names = [
96
+ 'airplane',
97
+ 'automobile',
98
+ 'bird',
99
+ 'cat',
100
+ 'deer',
101
+ 'dog',
102
+ 'frog',
103
+ 'horse',
104
+ 'ship',
105
+ 'truck',
106
+ ]
107
+
108
+ outputs = hf_model.predict(x_subset)
109
+ clean_preds = np.argmax(outputs, axis=1)
110
+ clean_acc = np.mean(clean_preds == y_subset)
111
+ benign_gallery_out = []
112
+ for i, im in enumerate(x_subset):
113
+ benign_gallery_out.append(( im.transpose(1,2,0), label_names[np.argmax(outputs[i])] ))
114
+
115
+ if attack == "PGD":
116
+ attacker = ProjectedGradientDescentPyTorch(hf_model, max_iter=attack_max_iter,
117
+ eps=attack_eps, eps_step=attack_eps_steps)
118
+ x_adv = attacker.generate(x_subset)
119
+
120
+ outputs = hf_model.predict(x_adv)
121
+ adv_preds = np.argmax(outputs, axis=1)
122
+ adv_acc = np.mean(adv_preds == y_subset)
123
+ adv_gallery_out = []
124
+ for i, im in enumerate(x_adv):
125
+ adv_gallery_out.append(( im.transpose(1,2,0), label_names[np.argmax(outputs[i])] ))
126
+
127
+ delta = ((x_subset - x_adv) + 8/255) * 10
128
+ delta_gallery_out = delta.transpose(0, 2, 3, 1)
129
+
130
+ if attack == "Adversarial Patch":
131
+ scale_min = 0.3
132
+ scale_max = 1.0
133
+ rotation_max = 0
134
+ learning_rate = 5000.
135
+ attacker = AdversarialPatchPyTorch(hf_model, scale_max=scale_max,
136
+ scale_min=scale_min,
137
+ rotation_max=rotation_max,
138
+ learning_rate=learning_rate,
139
+ max_iter=attack_max_iter, patch_type='square',
140
+ patch_location=(x_location, y_location),
141
+ patch_shape=(3, patch_height, patch_width))
142
+ patch, _ = attacker.generate(x_subset)
143
+ x_adv = attacker.apply_patch(x_subset, scale=0.3)
144
+
145
+ outputs = hf_model.predict(x_adv)
146
+ adv_preds = np.argmax(outputs, axis=1)
147
+ adv_acc = np.mean(adv_preds == y_subset)
148
+ adv_gallery_out = []
149
+ for i, im in enumerate(x_adv):
150
+ adv_gallery_out.append(( im.transpose(1,2,0), label_names[np.argmax(outputs[i])] ))
151
+
152
+ delta_gallery_out = np.expand_dims(patch, 0).transpose(0,2,3,1)
153
+
154
+ return benign_gallery_out, adv_gallery_out, delta_gallery_out, clean_acc, adv_acc
155
+
156
+ def clf_poison_evaluate(*args):
157
+
158
+ attack = args[0]
159
+ model_type = args[1]
160
+ trigger_image = args[2]
161
+ target_class = args[3]
162
+ data_type = args[-1]
163
+
164
+
165
+ if model_type == "Example":
166
+ model = transformers.AutoModelForImageClassification.from_pretrained(
167
+ 'facebook/deit-tiny-distilled-patch16-224',
168
+ ignore_mismatched_sizes=True,
169
+ num_labels=10
170
+ )
171
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
172
+ loss_fn = torch.nn.CrossEntropyLoss()
173
+
174
+ hf_model = HuggingFaceClassifierPyTorch(
175
+ model=model,
176
+ loss=loss_fn,
177
+ optimizer=optimizer,
178
+ input_shape=(3, 224, 224),
179
+ nb_classes=10,
180
+ clip_values=(0, 1),
181
+ )
182
+
183
+ if data_type == "Example":
184
+ import torchvision
185
+ transform = torchvision.transforms.Compose([
186
+ torchvision.transforms.Resize((224, 224)),
187
+ torchvision.transforms.ToTensor(),
188
+ ])
189
+ train_dataset = torchvision.datasets.ImageFolder(root="./data/imagenette2-320/train", transform=transform)
190
+ labels = np.asarray(train_dataset.targets)
191
+ classes = np.unique(labels)
192
+ samples_per_class = 100
193
+
194
+ x_subset = []
195
+ y_subset = []
196
+
197
+ for c in classes:
198
+ indices = np.where(labels == c)[0][:samples_per_class]
199
+ for i in indices:
200
+ x_subset.append(train_dataset[i][0])
201
+ y_subset.append(train_dataset[i][1])
202
+
203
+ x_subset = np.stack(x_subset)
204
+ y_subset = np.asarray(y_subset)
205
+ label_names = [
206
+ 'fish',
207
+ 'dog',
208
+ 'cassette player',
209
+ 'chainsaw',
210
+ 'church',
211
+ 'french horn',
212
+ 'garbage truck',
213
+ 'gas pump',
214
+ 'golf ball',
215
+ 'parachutte',
216
+ ]
217
+
218
+ if attack == "Backdoor":
219
+ from PIL import Image
220
+ im = Image.fromarray(trigger_image)
221
+ im.save("./tmp.png")
222
+ def poison_func(x):
223
+ return insert_image(
224
+ x,
225
+ backdoor_path='./tmp.png',
226
+ channels_first=True,
227
+ random=False,
228
+ x_shift=0,
229
+ y_shift=0,
230
+ size=(32, 32),
231
+ mode='RGB',
232
+ blend=0.8
233
+ )
234
+ backdoor = PoisoningAttackBackdoor(poison_func)
235
+ source_class = 0
236
+ target_class = label_names.index(target_class)
237
+ poison_percent = 0.5
238
+
239
+ x_poison = np.copy(x_subset)
240
+ y_poison = np.copy(y_subset)
241
+ is_poison = np.zeros(len(x_subset)).astype(bool)
242
+
243
+ indices = np.where(y_subset == source_class)[0]
244
+ num_poison = int(poison_percent * len(indices))
245
+
246
+ for i in indices[:num_poison]:
247
+ x_poison[i], _ = backdoor.poison(x_poison[i], [])
248
+ y_poison[i] = target_class
249
+ is_poison[i] = True
250
+
251
+ poison_indices = np.where(is_poison)[0]
252
+ hf_model.fit(x_poison, y_poison, nb_epochs=2)
253
+
254
+ clean_x = x_poison[~is_poison]
255
+ clean_y = y_poison[~is_poison]
256
+
257
+ outputs = hf_model.predict(clean_x)
258
+ clean_preds = np.argmax(outputs, axis=1)
259
+ clean_acc = np.mean(clean_preds == clean_y)
260
+
261
+ clean_out = []
262
+ for i, im in enumerate(clean_x):
263
+ clean_out.append( (im.transpose(1,2,0), label_names[clean_preds[i]]) )
264
+
265
+ poison_x = x_poison[is_poison]
266
+ poison_y = y_poison[is_poison]
267
+
268
+ outputs = hf_model.predict(poison_x)
269
+ poison_preds = np.argmax(outputs, axis=1)
270
+ poison_acc = np.mean(poison_preds == poison_y)
271
+
272
+ poison_out = []
273
+ for i, im in enumerate(poison_x):
274
+ poison_out.append( (im.transpose(1,2,0), label_names[poison_preds[i]]) )
275
+
276
+
277
+ return clean_out, poison_out, clean_acc, poison_acc
278
+
279
+
280
+ def show_params(type):
281
+ '''
282
+ Show model parameters based on selected model type
283
+ '''
284
+ if type!="Example":
285
+ return gr.Column(visible=True)
286
+ return gr.Column(visible=False)
287
+
288
+ def run_inference(*args):
289
+ model_type = args[0]
290
+ model_url = args[1]
291
+ model_channels = args[2]
292
+ model_height = args[3]
293
+ model_width = args[4]
294
+ model_classes = args[5]
295
+ model_clip = args[6]
296
+ model_upsample = args[7]
297
+ data_type = args[8]
298
+
299
+ if model_type == "Example":
300
+ model = transformers.AutoModelForImageClassification.from_pretrained(
301
+ 'facebook/deit-tiny-distilled-patch16-224',
302
+ ignore_mismatched_sizes=True,
303
+ num_labels=10
304
+ )
305
+ upsampler = torch.nn.Upsample(scale_factor=7, mode='nearest')
306
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
307
+ loss_fn = torch.nn.CrossEntropyLoss()
308
+
309
+ hf_model = HuggingFaceClassifierPyTorch(
310
+ model=model,
311
+ loss=loss_fn,
312
+ optimizer=optimizer,
313
+ input_shape=(3, 32, 32),
314
+ nb_classes=10,
315
+ clip_values=(0, 1),
316
+ processor=upsampler
317
+ )
318
+ model_checkpoint_path = './state_dicts/deit_cifar_base_model.pt'
319
+ hf_model.model.load_state_dict(torch.load(model_checkpoint_path, map_location=device))
320
+
321
+ if data_type == "Example":
322
+ (x_train, y_train), (_, _), _, _ = load_dataset('cifar10')
323
+ x_train = np.transpose(x_train, (0, 3, 1, 2)).astype(np.float32)
324
+ y_train = np.argmax(y_train, axis=1)
325
+
326
+ classes = np.unique(y_train)
327
+ samples_per_class = 5
328
+
329
+ x_subset = []
330
+ y_subset = []
331
+
332
+ for c in classes:
333
+ indices = y_train == c
334
+ x_subset.append(x_train[indices][:samples_per_class])
335
+ y_subset.append(y_train[indices][:samples_per_class])
336
+
337
+ x_subset = np.concatenate(x_subset)
338
+ y_subset = np.concatenate(y_subset)
339
+
340
+ label_names = [
341
+ 'airplane',
342
+ 'automobile',
343
+ 'bird',
344
+ 'cat',
345
+ 'deer',
346
+ 'dog',
347
+ 'frog',
348
+ 'horse',
349
+ 'ship',
350
+ 'truck',
351
+ ]
352
+
353
+ outputs = hf_model.predict(x_subset)
354
+ clean_preds = np.argmax(outputs, axis=1)
355
+ clean_acc = np.mean(clean_preds == y_subset)
356
+ gallery_out = []
357
+ for i, im in enumerate(x_subset):
358
+ gallery_out.append(( im.transpose(1,2,0), label_names[np.argmax(outputs[i])] ))
359
+
360
+ return gallery_out, clean_acc
361
+
362
+
363
+
364
+ # e.g. To use a local alternative theme: carbon_theme = Carbon()
365
+ carbon_theme = Carbon()
366
+ with gr.Blocks(css=css, theme=gr.themes.Base()) as demo:
367
+ import art
368
+ text = art.__version__
369
+
370
+ with gr.Row():
371
+ with gr.Column(scale=1):
372
+ gr.Image(value="./art_lfai.png", show_label=False, show_download_button=False, width=100)
373
+ with gr.Column(scale=20):
374
+ gr.Markdown(f"<h1>Red-teaming HuggingFace with ART (v{text})</h1>", elem_classes="plot-padding")
375
+
376
+
377
+ gr.Markdown('''This app guides you through a common workflow for assessing the robustness
378
+ of HuggingFace models using standard datasets and state-of-the-art adversarial attacks
379
+ found within the Adversarial Robustness Toolbox (ART).<br/><br/>Follow the instructions in each
380
+ step below to carry out your own evaluation and determine the risks associated with using
381
+ some of your favorite models! <b>#redteaming</b> <b>#trustworthyAI</b>''')
382
+
383
+ # Model and Dataset Selection
384
+ with gr.Accordion("1. Model selection", open=False):
385
+
386
+ gr.Markdown("Select a Hugging Face model to launch an adversarial attack against.")
387
+ model_type = gr.Radio(label="Hugging Face Model", choices=["Example", "Other"], value="Example")
388
+ with gr.Column(visible=False) as other_model:
389
+ model_url = gr.Text(label="Model URL",
390
+ placeholder="e.g. facebook/deit-tiny-distilled-patch16-224",
391
+ value='facebook/deit-tiny-distilled-patch16-224')
392
+ model_input_channels = gr.Text(label="Input channels", value=3)
393
+ model_input_height = gr.Text(label="Input height", value=32)
394
+ model_input_width = gr.Text(label="Input width", value=32)
395
+ model_num_classes = gr.Text(label="Number of classes", value=10)
396
+ model_clip_values = gr.Radio(label="Clip values", choices=[1, 255], value=1)
397
+ model_upsample_scaling = gr.Slider(label="Upsample scale factor", minimum=1, maximum=10, value=7)
398
+
399
+ model_type.change(show_params, model_type, other_model)
400
+
401
+ with gr.Accordion("2. Data selection", open=False):
402
+ gr.Markdown("This section enables you to select a dataset for evaluation or upload your own image.")
403
+ data_type = gr.Radio(label="Hugging Face dataset", choices=["Example", "URL", "Local"], value="Example")
404
+ with gr.Column(visible=False) as other_dataset:
405
+ gr.Markdown("Coming soon.")
406
+ data_type.change(show_params, data_type, other_dataset)
407
+
408
+ with gr.Accordion("3. Model inference", open=False):
409
+
410
+ with gr.Row():
411
+ with gr.Column(scale=1):
412
+ preds_gallery = gr.Gallery(label="Predictions", preview=False, show_download_button=True)
413
+ with gr.Column(scale=2):
414
+ clean_accuracy = gr.Number(label="Clean accuracy",
415
+ info="The accuracy achieved by the model in normal (non-adversarial) conditions.")
416
+ bt_run_inference = gr.Button("Run inference")
417
+ bt_clear = gr.ClearButton(components=[preds_gallery, clean_accuracy])
418
+
419
+ bt_run_inference.click(run_inference, inputs=[model_type, model_url, model_input_channels, model_input_height, model_input_width,
420
+ model_num_classes, model_clip_values, model_upsample_scaling, data_type],
421
+ outputs=[preds_gallery, clean_accuracy])
422
+
423
+ # Attack Selection
424
+ with gr.Accordion("4. Run attack", open=False):
425
+
426
+ gr.Markdown("In this section you can select the type of adversarial attack you wish to deploy against your selected model.")
427
+
428
+ with gr.Accordion("Evasion", open=False):
429
+ gr.Markdown("Evasion attacks are deployed to cause a model to incorrectly classify or detect items/objects in an image.")
430
+
431
+ with gr.Accordion("Projected Gradient Descent", open=False):
432
+ gr.Markdown("This attack uses PGD to identify adversarial examples.")
433
+
434
+ with gr.Row():
435
+
436
+ with gr.Column(scale=1):
437
+ attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
438
+ max_iter = gr.Slider(minimum=1, maximum=1000, label="Max iterations", value=10)
439
+ eps = gr.Slider(minimum=0.0001, maximum=255, label="Epslion", value=8/255)
440
+ eps_steps = gr.Slider(minimum=0.0001, maximum=255, label="Epsilon steps", value=1/255)
441
+ bt_eval_pgd = gr.Button("Evaluate")
442
+
443
+ # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
444
+ with gr.Column(scale=3):
445
+ with gr.Row():
446
+ with gr.Column():
447
+ original_gallery = gr.Gallery(label="Original", preview=False, show_download_button=True)
448
+ benign_output = gr.Label(num_top_classes=3, visible=False)
449
+ clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
450
+ quality_plot = gr.LinePlot(label="Gradient Quality", x='iteration', y='value', color='metric',
451
+ x_title='Iteration', y_title='Avg in Gradients (%)',
452
+ caption="""Illustrates the average percent of zero, infinity
453
+ or NaN gradients identified in images
454
+ across all batches.""", elem_classes="plot-padding", visible=False)
455
+
456
+ with gr.Column():
457
+ adversarial_gallery = gr.Gallery(label="Adversarial", preview=False, show_download_button=True)
458
+ adversarial_output = gr.Label(num_top_classes=3, visible=False)
459
+ robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
460
+
461
+ with gr.Column():
462
+ delta_gallery = gr.Gallery(label="Added perturbation", preview=False, show_download_button=True)
463
+
464
+ bt_eval_pgd.click(clf_evasion_evaluate, inputs=[attack, model_type, model_url, model_input_channels, model_input_height, model_input_width,
465
+ model_num_classes, model_clip_values, model_upsample_scaling,
466
+ max_iter, eps, eps_steps, attack, attack, attack, attack, data_type],
467
+ outputs=[original_gallery, adversarial_gallery, delta_gallery, clean_accuracy,
468
+ robust_accuracy])
469
+
470
+ with gr.Accordion("Adversarial Patch", open=False):
471
+ gr.Markdown("This attack crafts an adversarial patch that facilitates evasion.")
472
+
473
+ with gr.Row():
474
+
475
+ with gr.Column(scale=1):
476
+ attack = gr.Textbox(visible=True, value="Adversarial Patch", label="Attack", interactive=False)
477
+ max_iter = gr.Slider(minimum=1, maximum=1000, label="Max iterations", value=10)
478
+ x_location = gr.Slider(minimum=1, maximum=32, label="Location (x)", value=1)
479
+ y_location = gr.Slider(minimum=1, maximum=32, label="Location (y)", value=1)
480
+ patch_height = gr.Slider(minimum=1, maximum=32, label="Patch height", value=12)
481
+ patch_width = gr.Slider(minimum=1, maximum=32, label="Patch width", value=12)
482
+ eval_btn_patch = gr.Button("Evaluate")
483
+
484
+ # Evaluation Output. Visualisations of success/failures of running evaluation attacks.
485
+ with gr.Column(scale=3):
486
+ with gr.Row():
487
+ with gr.Column():
488
+ original_gallery = gr.Gallery(label="Original", preview=False, show_download_button=True)
489
+ clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
490
+
491
+ with gr.Column():
492
+ adversarial_gallery = gr.Gallery(label="Adversarial", preview=False, show_download_button=True)
493
+ robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
494
+
495
+ with gr.Column():
496
+ delta_gallery = gr.Gallery(label="Patches", preview=False, show_download_button=True)
497
+
498
+ eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, model_type, model_url, model_input_channels, model_input_height, model_input_width,
499
+ model_num_classes, model_clip_values, model_upsample_scaling,
500
+ max_iter, eps, eps_steps, x_location, y_location, patch_height, patch_width, data_type],
501
+ outputs=[original_gallery, adversarial_gallery, delta_gallery, clean_accuracy,
502
+ robust_accuracy])
503
+
504
+ with gr.Accordion("Poisoning", open=False):
505
+
506
+ with gr.Accordion("Backdoor"):
507
+
508
+ with gr.Row():
509
+ with gr.Column(scale=1):
510
+ attack = gr.Textbox(visible=True, value="Backdoor", label="Attack", interactive=False)
511
+ target_class = gr.Radio(label="Target class", info="The class you wish to force the model to predict.",
512
+ choices=['dog',
513
+ 'cassette player',
514
+ 'chainsaw',
515
+ 'church',
516
+ 'french horn',
517
+ 'garbage truck',
518
+ 'gas pump',
519
+ 'golf ball',
520
+ 'parachutte',], value='dog')
521
+ trigger_image = gr.Image(label="Trigger Image", value="./baby-on-board.png")
522
+ eval_btn_patch = gr.Button("Evaluate")
523
+ with gr.Column(scale=2):
524
+ clean_gallery = gr.Gallery(label="Clean", preview=False, show_download_button=True)
525
+ clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
526
+ with gr.Column(scale=2):
527
+ poison_gallery = gr.Gallery(label="Poisoned", preview=False, show_download_button=True)
528
+ poison_success = gr.Number(label="Poison Success", precision=2)
529
+
530
+ eval_btn_patch.click(clf_poison_evaluate, inputs=[attack, model_type, trigger_image, target_class, data_type],
531
+ outputs=[clean_gallery, poison_gallery, clean_accuracy, poison_success])
532
+
533
+ if __name__ == "__main__":
534
+
535
+ # For development
536
+ '''demo.launch(show_api=False, debug=True, share=False,
537
+ server_name="0.0.0.0",
538
+ server_port=7777,
539
+ ssl_verify=False,
540
+ max_threads=20)'''
541
+
542
+ # For deployment
543
+ demo.launch(share=True, ssl_verify=False)
art_lfai.png ADDED
baby-on-board.png ADDED
carbon_colors.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+
4
+ class Color:
5
+ all = []
6
+
7
+ def __init__(
8
+ self,
9
+ c50: str,
10
+ c100: str,
11
+ c200: str,
12
+ c300: str,
13
+ c400: str,
14
+ c500: str,
15
+ c600: str,
16
+ c700: str,
17
+ c800: str,
18
+ c900: str,
19
+ c950: str,
20
+ name: str | None = None,
21
+ ):
22
+ self.c50 = c50
23
+ self.c100 = c100
24
+ self.c200 = c200
25
+ self.c300 = c300
26
+ self.c400 = c400
27
+ self.c500 = c500
28
+ self.c600 = c600
29
+ self.c700 = c700
30
+ self.c800 = c800
31
+ self.c900 = c900
32
+ self.c950 = c950
33
+ self.name = name
34
+ Color.all.append(self)
35
+
36
+ def expand(self) -> list[str]:
37
+ return [
38
+ self.c50,
39
+ self.c100,
40
+ self.c200,
41
+ self.c300,
42
+ self.c400,
43
+ self.c500,
44
+ self.c600,
45
+ self.c700,
46
+ self.c800,
47
+ self.c900,
48
+ self.c950,
49
+ ]
50
+
51
+
52
+ black = Color(
53
+ name="black",
54
+ c50="#000000",
55
+ c100="#000000",
56
+ c200="#000000",
57
+ c300="#000000",
58
+ c400="#000000",
59
+ c500="#000000",
60
+ c600="#000000",
61
+ c700="#000000",
62
+ c800="#000000",
63
+ c900="#000000",
64
+ c950="#000000",
65
+ )
66
+
67
+ blackHover = Color(
68
+ name="blackHover",
69
+ c50="#212121",
70
+ c100="#212121",
71
+ c200="#212121",
72
+ c300="#212121",
73
+ c400="#212121",
74
+ c500="#212121",
75
+ c600="#212121",
76
+ c700="#212121",
77
+ c800="#212121",
78
+ c900="#212121",
79
+ c950="#212121",
80
+ )
81
+
82
+ white = Color(
83
+ name="white",
84
+ c50="#ffffff",
85
+ c100="#ffffff",
86
+ c200="#ffffff",
87
+ c300="#ffffff",
88
+ c400="#ffffff",
89
+ c500="#ffffff",
90
+ c600="#ffffff",
91
+ c700="#ffffff",
92
+ c800="#ffffff",
93
+ c900="#ffffff",
94
+ c950="#ffffff",
95
+ )
96
+
97
+ whiteHover = Color(
98
+ name="whiteHover",
99
+ c50="#e8e8e8",
100
+ c100="#e8e8e8",
101
+ c200="#e8e8e8",
102
+ c300="#e8e8e8",
103
+ c400="#e8e8e8",
104
+ c500="#e8e8e8",
105
+ c600="#e8e8e8",
106
+ c700="#e8e8e8",
107
+ c800="#e8e8e8",
108
+ c900="#e8e8e8",
109
+ c950="#e8e8e8",
110
+ )
111
+
112
+ red = Color(
113
+ name="red",
114
+ c50="#fff1f1",
115
+ c100="#ffd7d9",
116
+ c200="#ffb3b8",
117
+ c300="#ff8389",
118
+ c400="#fa4d56",
119
+ c500="#da1e28",
120
+ c600="#a2191f",
121
+ c700="#750e13",
122
+ c800="#520408",
123
+ c900="#2d0709",
124
+ c950="#2d0709",
125
+ )
126
+
127
+ redHover = Color(
128
+ name="redHover",
129
+ c50="#540d11",
130
+ c100="#66050a",
131
+ c200="#921118",
132
+ c300="#c21e25",
133
+ c400="#b81922",
134
+ c500="#ee0713",
135
+ c600="#ff6168",
136
+ c700="#ff99a0",
137
+ c800="#ffc2c5",
138
+ c900="#ffe0e0",
139
+ c950="#ffe0e0",
140
+ )
141
+
142
+ blue = Color(
143
+ name="blue",
144
+ c50="#edf5ff",
145
+ c100="#d0e2ff",
146
+ c200="#a6c8ff",
147
+ c300="#78a9ff",
148
+ c400="#4589ff",
149
+ c500="#0f62fe",
150
+ c600="#0043ce",
151
+ c700="#002d9c",
152
+ c800="#001d6c",
153
+ c900="#001141",
154
+ c950="#001141",
155
+ )
156
+
157
+ blueHover = Color(
158
+ name="blueHover",
159
+
160
+ c50="#001f75",
161
+ c100="#00258a",
162
+ c200="#0039c7",
163
+ c300="#0053ff",
164
+ c400="#0050e6",
165
+ c500="#1f70ff",
166
+ c600="#5c97ff",
167
+ c700="#8ab6ff",
168
+ c800="#b8d3ff",
169
+ c900="#dbebff",
170
+ c950="#dbebff",
171
+ )
172
+
173
+
carbon_theme.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Iterable
4
+
5
+ from gradio.themes.base import Base
6
+ from gradio.themes.utils import colors, fonts, sizes
7
+ import carbon_colors
8
+
9
+
10
+ class Carbon(Base):
11
+ def __init__(
12
+ self,
13
+ *,
14
+ primary_hue: carbon_colors.Color | str = carbon_colors.white,
15
+ secondary_hue: carbon_colors.Color | str = carbon_colors.red,
16
+ neutral_hue: carbon_colors.Color | str = carbon_colors.blue,
17
+ spacing_size: sizes.Size | str = sizes.spacing_lg,
18
+ radius_size: sizes.Size | str = sizes.radius_none,
19
+ text_size: sizes.Size | str = sizes.text_md,
20
+ font: fonts.Font
21
+ | str
22
+ | Iterable[fonts.Font | str] = (
23
+ fonts.GoogleFont("IBM Plex Mono"),
24
+ fonts.GoogleFont("IBM Plex Sans"),
25
+ fonts.GoogleFont("IBM Plex Serif"),
26
+ ),
27
+ font_mono: fonts.Font
28
+ | str
29
+ | Iterable[fonts.Font | str] = (
30
+ fonts.GoogleFont("IBM Plex Mono"),
31
+ ),
32
+ ):
33
+ super().__init__(
34
+ primary_hue=primary_hue,
35
+ secondary_hue=secondary_hue,
36
+ neutral_hue=neutral_hue,
37
+ spacing_size=spacing_size,
38
+ radius_size=radius_size,
39
+ text_size=text_size,
40
+ font=font,
41
+ font_mono=font_mono,
42
+ )
43
+ self.name = "carbon"
44
+ super().set(
45
+ # Colors
46
+ slider_color="*neutral_900",
47
+ slider_color_dark="*neutral_500",
48
+ body_text_color="*neutral_900",
49
+ block_label_text_color="*body_text_color",
50
+ block_title_text_color="*body_text_color",
51
+ body_text_color_subdued="*neutral_700",
52
+ background_fill_primary_dark="*neutral_900",
53
+ background_fill_secondary_dark="*neutral_800",
54
+ block_background_fill_dark="*neutral_800",
55
+ input_background_fill_dark="*neutral_700",
56
+ # Button Colors
57
+ button_primary_background_fill=carbon_colors.blue.c500,
58
+ button_primary_background_fill_hover="*neutral_300",
59
+ button_primary_text_color="white",
60
+ button_primary_background_fill_dark="*neutral_600",
61
+ button_primary_background_fill_hover_dark="*neutral_600",
62
+ button_primary_text_color_dark="white",
63
+ button_secondary_background_fill="*button_primary_background_fill",
64
+ button_secondary_background_fill_hover="*button_primary_background_fill_hover",
65
+ button_secondary_text_color="*button_primary_text_color",
66
+ button_cancel_background_fill="*button_primary_background_fill",
67
+ button_cancel_background_fill_hover="*button_primary_background_fill_hover",
68
+ button_cancel_text_color="*button_primary_text_color",
69
+ checkbox_background_color=carbon_colors.black.c50,
70
+ checkbox_label_background_fill="*button_primary_background_fill",
71
+ checkbox_label_background_fill_hover="*button_primary_background_fill_hover",
72
+ checkbox_label_text_color="*button_primary_text_color",
73
+ checkbox_background_color_selected=carbon_colors.black.c50,
74
+ checkbox_border_width="1px",
75
+ checkbox_border_width_dark="1px",
76
+ checkbox_border_color=carbon_colors.white.c50,
77
+ checkbox_border_color_dark=carbon_colors.white.c50,
78
+
79
+ checkbox_border_color_focus=carbon_colors.blue.c900,
80
+ checkbox_border_color_focus_dark=carbon_colors.blue.c900,
81
+ checkbox_border_color_selected=carbon_colors.white.c50,
82
+ checkbox_border_color_selected_dark=carbon_colors.white.c50,
83
+
84
+ checkbox_background_color_hover=carbon_colors.black.c50,
85
+ checkbox_background_color_hover_dark=carbon_colors.black.c50,
86
+ checkbox_background_color_dark=carbon_colors.black.c50,
87
+ checkbox_background_color_selected_dark=carbon_colors.black.c50,
88
+ # Padding
89
+ checkbox_label_padding="16px",
90
+ button_large_padding="*spacing_lg",
91
+ button_small_padding="*spacing_sm",
92
+ # Borders
93
+ block_border_width="0px",
94
+ block_border_width_dark="1px",
95
+ shadow_drop_lg="0 1px 4px 0 rgb(0 0 0 / 0.1)",
96
+ block_shadow="*shadow_drop_lg",
97
+ block_shadow_dark="none",
98
+ # Block Labels
99
+ block_title_text_weight="600",
100
+ block_label_text_weight="600",
101
+ block_label_text_size="*text_md",
102
+ )
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00000293.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00002138.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00003014.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00006697.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00007197.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00009346.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00009379.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00009396.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00010306.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00011233.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00011993.JPEG ADDED
data/imagenette2-320/train/n01440764/ILSVRC2012_val_00012503.JPEG ADDED
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ pandas
2
+ jupyter
3
+ torch
4
+ torchvision
5
+ transformers
6
+ tensorflow==2.10.1; sys_platform != "darwin"
7
+ tensorflow-macos; sys_platform == "darwin"
8
+ tensorflow-metal; sys_platform == "darwin"
9
+ adversarial-robustness-toolbox
10
+ gradio==4.2
state_dicts/deit_cifar_base_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3add51bcd51ca3c1c7836d60cabf85798c8c551e8bc9c4450f4fb6cb3227421
3
+ size 22192555
state_dicts/deit_imagenette_poisoned_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ead74cf5a180328dfb7fa179d91d51f79081f25eb7de7a146d0ab0cbc0dd01b
3
+ size 22192555