Spaces:
Sleeping
Sleeping
Kieran Fraser
commited on
Commit
•
5301278
1
Parent(s):
c38bb51
Fix css
Browse filesSigned-off-by: Kieran Fraser <Kieran.Fraser@ibm.com>
app.py
CHANGED
@@ -26,6 +26,19 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
26 |
|
27 |
css = """
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
.input-image { margin: auto !important }
|
30 |
.plot-padding { padding: 20px; }
|
31 |
.eta-bar.svelte-1occ011.svelte-1occ011 {
|
@@ -192,7 +205,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
192 |
import art
|
193 |
text = art.__version__
|
194 |
|
195 |
-
with gr.Row():
|
196 |
with gr.Column(scale=1,):
|
197 |
gr.Image(value="./art_lfai.png", show_label=False, show_download_button=False, width=100, show_share_button=False)
|
198 |
with gr.Column(scale=2):
|
@@ -213,7 +226,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
213 |
gr.Markdown('''<hr/>''')
|
214 |
|
215 |
|
216 |
-
with gr.Row(elem_classes=
|
217 |
with gr.Column(scale=1):
|
218 |
gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as CIFAR-10.</p>''')
|
219 |
gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Note: CIFAR-10 images are low resolution images which span 10 different categories as shown.</i></p>''')
|
@@ -235,19 +248,19 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
235 |
<p style="font-size: 20px;">👀 take a look at the sample images from the CIFAR-10 dataset and their respective labels.</p>
|
236 |
''')
|
237 |
with gr.Column(scale=1):
|
238 |
-
gr.Gallery(label="CIFAR-10", preview=True, value=sample_CIFAR10())
|
239 |
|
240 |
gr.Markdown('''<hr/>''')
|
241 |
|
242 |
-
gr.Markdown('''<p style="text-align: justify">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
|
243 |
attacks which might manipulate the prediction. For instance, ships become classified as birds. To do this, you will run deploy
|
244 |
adversarial attacks against your own model and assess its performance.</p>''')
|
245 |
|
246 |
-
gr.Markdown('''<p style="text-align: justify">ℹ️ Below are two common types of evasion attack. Both create adversarial images, which at first glance, seem the same as the original images,
|
247 |
however they contain subtle changes which cause the AI model to make incorrect predictions.</p><br/>''')
|
248 |
-
|
249 |
|
250 |
-
with gr.Accordion("Projected Gradient Descent", open=False):
|
251 |
gr.Markdown('''This attack uses the PGD optimization algorithm to identify the optimal perturbations
|
252 |
to add to an image (i.e. changing pixel values) to cause the model to misclassify images. See more
|
253 |
<a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
|
@@ -289,7 +302,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
289 |
|
290 |
gr.Markdown('''<br/>''')
|
291 |
|
292 |
-
with gr.Accordion("Adversarial Patch", open=False):
|
293 |
gr.Markdown('''This attack optimizes pixels in a patch which can be overlayed on an image, causing a model to misclassify. See more
|
294 |
<a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
|
295 |
target="blank_">here</a>.''')
|
@@ -309,7 +322,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
309 |
with gr.Column(scale=3):
|
310 |
with gr.Row(elem_classes='symbols'):
|
311 |
with gr.Column(scale=10):
|
312 |
-
gr.Markdown('''<p style="font-size: 18px"><i>The unmodified, original CIFAR-10 images, with model predictions.</i></p><br>''')
|
313 |
original_gallery = gr.Gallery(label="Original", preview=False, show_download_button=True)
|
314 |
clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
|
315 |
|
@@ -317,7 +330,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
317 |
gr.Markdown('''➕''')
|
318 |
|
319 |
with gr.Column(scale=10):
|
320 |
-
gr.Markdown('''<p style="font-size: 18px"><i>Visual representation of the optimized patch for attacking the model.</i></p><br>''')
|
321 |
delta_gallery = gr.Gallery(label="Patches", preview=True, show_download_button=True)
|
322 |
|
323 |
with gr.Column(scale=1, min_width='0px'):
|
@@ -338,11 +351,11 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
338 |
if __name__ == "__main__":
|
339 |
|
340 |
# For development
|
341 |
-
|
342 |
server_name="0.0.0.0",
|
343 |
server_port=7777,
|
344 |
ssl_verify=False,
|
345 |
-
max_threads=20)
|
346 |
|
347 |
# For deployment
|
348 |
-
demo.launch(share=True, ssl_verify=False)
|
|
|
26 |
|
27 |
css = """
|
28 |
|
29 |
+
.custom-text {
|
30 |
+
--text-md: 20px !important;
|
31 |
+
--text-sm: 18px !important;
|
32 |
+
--block-info-text-size: var(--text-sm);
|
33 |
+
--block-label-text-size: var(--text-sm);
|
34 |
+
--block-title-text-size: var(--text-md);
|
35 |
+
--body-text-size: var(--text-md);
|
36 |
+
--button-small-text-size: var(--text-md);
|
37 |
+
--checkbox-label-text-size: var(--text-md);
|
38 |
+
--input-text-size: var(--text-md);
|
39 |
+
--prose-text-size: var(--text-md);
|
40 |
+
--section-header-text-size: var(--text-md);
|
41 |
+
}
|
42 |
.input-image { margin: auto !important }
|
43 |
.plot-padding { padding: 20px; }
|
44 |
.eta-bar.svelte-1occ011.svelte-1occ011 {
|
|
|
205 |
import art
|
206 |
text = art.__version__
|
207 |
|
208 |
+
with gr.Row(elem_classes="custom-text"):
|
209 |
with gr.Column(scale=1,):
|
210 |
gr.Image(value="./art_lfai.png", show_label=False, show_download_button=False, width=100, show_share_button=False)
|
211 |
with gr.Column(scale=2):
|
|
|
226 |
gr.Markdown('''<hr/>''')
|
227 |
|
228 |
|
229 |
+
with gr.Row(elem_classes=["larger-gap", "custom-text"]):
|
230 |
with gr.Column(scale=1):
|
231 |
gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as CIFAR-10.</p>''')
|
232 |
gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Note: CIFAR-10 images are low resolution images which span 10 different categories as shown.</i></p>''')
|
|
|
248 |
<p style="font-size: 20px;">👀 take a look at the sample images from the CIFAR-10 dataset and their respective labels.</p>
|
249 |
''')
|
250 |
with gr.Column(scale=1):
|
251 |
+
gr.Gallery(label="CIFAR-10", preview=True, value=sample_CIFAR10(), height=420)
|
252 |
|
253 |
gr.Markdown('''<hr/>''')
|
254 |
|
255 |
+
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
|
256 |
attacks which might manipulate the prediction. For instance, ships become classified as birds. To do this, you will run deploy
|
257 |
adversarial attacks against your own model and assess its performance.</p>''')
|
258 |
|
259 |
+
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Below are two common types of evasion attack. Both create adversarial images, which at first glance, seem the same as the original images,
|
260 |
however they contain subtle changes which cause the AI model to make incorrect predictions.</p><br/>''')
|
261 |
+
|
262 |
|
263 |
+
with gr.Accordion("Projected Gradient Descent", open=False, elem_classes="custom-text"):
|
264 |
gr.Markdown('''This attack uses the PGD optimization algorithm to identify the optimal perturbations
|
265 |
to add to an image (i.e. changing pixel values) to cause the model to misclassify images. See more
|
266 |
<a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
|
|
|
302 |
|
303 |
gr.Markdown('''<br/>''')
|
304 |
|
305 |
+
with gr.Accordion("Adversarial Patch", open=False, elem_classes="custom-text"):
|
306 |
gr.Markdown('''This attack optimizes pixels in a patch which can be overlayed on an image, causing a model to misclassify. See more
|
307 |
<a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
|
308 |
target="blank_">here</a>.''')
|
|
|
322 |
with gr.Column(scale=3):
|
323 |
with gr.Row(elem_classes='symbols'):
|
324 |
with gr.Column(scale=10):
|
325 |
+
gr.Markdown('''<p style="font-size: 18px"><i>The unmodified, original CIFAR-10 images, with model predictions.</i></p><br><br>''')
|
326 |
original_gallery = gr.Gallery(label="Original", preview=False, show_download_button=True)
|
327 |
clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
|
328 |
|
|
|
330 |
gr.Markdown('''➕''')
|
331 |
|
332 |
with gr.Column(scale=10):
|
333 |
+
gr.Markdown('''<p style="font-size: 18px"><i>Visual representation of the optimized patch for attacking the model.</i></p><br><br>''')
|
334 |
delta_gallery = gr.Gallery(label="Patches", preview=True, show_download_button=True)
|
335 |
|
336 |
with gr.Column(scale=1, min_width='0px'):
|
|
|
351 |
if __name__ == "__main__":
|
352 |
|
353 |
# For development
|
354 |
+
demo.launch(show_api=False, debug=True, share=False,
|
355 |
server_name="0.0.0.0",
|
356 |
server_port=7777,
|
357 |
ssl_verify=False,
|
358 |
+
max_threads=20)
|
359 |
|
360 |
# For deployment
|
361 |
+
'''demo.launch(share=True, ssl_verify=False)'''
|