Kieran Fraser commited on
Commit
8786a62
1 Parent(s): b13c627

default images show on app startup

Browse files

Signed-off-by: Kieran Fraser <[email protected]>

app.py CHANGED
@@ -192,7 +192,54 @@ def show_params(type):
192
  return gr.Column(visible=True)
193
  return gr.Column(visible=False)
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  # e.g. To use a local alternative theme: carbon_theme = Carbon()
197
  carbon_theme = Carbon()
198
  with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
@@ -224,9 +271,9 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
224
 
225
  with gr.Row(elem_classes=["larger-gap", "custom-text"]):
226
  with gr.Column(scale=1, elem_classes="cust-width"):
227
- gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as CIFAR-10.</p>''')
228
  gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Note: CIFAR-10 images are low resolution images which span 10 different categories as shown.</i></p>''')
229
- gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ Your goal is to have an AI model capable of classifying these images. So you
230
  train a model on this dataset, or use a pre-trained model from Hugging Face,
231
  such as Meta's Distilled Data-efficient Image Transformer.</p>''')
232
  with gr.Column(scale=1, elem_classes="cust-width"):
@@ -248,11 +295,11 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
248
 
249
  gr.Markdown('''<hr/>''')
250
 
251
- gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
252
  attacks which might manipulate the prediction. For instance, ships become classified as birds. To do this, you will deploy
253
  adversarial attacks against your own model and assess its performance.</p>''')
254
 
255
- gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Below are two common types of evasion attack. Both create adversarial images, which at first glance, seem the same as the original images,
256
  however they contain subtle changes which cause the AI model to make incorrect predictions.</p><br/>''')
257
 
258
 
@@ -276,25 +323,26 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
276
  with gr.Row(elem_classes='symbols'):
277
  with gr.Column(scale=10):
278
  gr.Markdown('''<p style="font-size: 18px"><i>The unmodified, original CIFAR-10 images, with model predictions.</i></p><br>''')
279
- original_gallery = gr.Gallery(label="Original", preview=False, show_download_button=True)
280
  benign_output = gr.Label(num_top_classes=3, visible=False)
281
- clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
282
  with gr.Column(scale=1, min_width=0, elem_classes='symbols'):
283
  gr.Markdown('''➕''')
284
  with gr.Column(scale=10):
285
  gr.Markdown('''<p style="font-size: 18px"><i>Visual representation of the calculated perturbations for attacking the model.</i></p><br>''')
286
- delta_gallery = gr.Gallery(label="Added perturbation", preview=False, show_download_button=True)
287
  with gr.Column(scale=1, min_width=0):
288
  gr.Markdown('''🟰''', elem_classes='symbols')
289
  with gr.Column(scale=10):
290
  gr.Markdown('''<p style="font-size: 18px"><i>The original image (with optimized perturbations applied) gives us an adversarial image which fools the model.</i></p>''')
291
- adversarial_gallery = gr.Gallery(label="Adversarial", preview=False, show_download_button=True)
292
  adversarial_output = gr.Label(num_top_classes=3, visible=False)
293
- robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
294
 
295
  bt_eval_pgd.click(clf_evasion_evaluate, inputs=[attack, max_iter, eps, eps_steps, attack, attack, attack, attack],
296
  outputs=[original_gallery, adversarial_gallery, delta_gallery, clean_accuracy,
297
  robust_accuracy])
 
298
 
299
  gr.Markdown('''<br/>''')
300
 
@@ -319,23 +367,23 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
319
  with gr.Row(elem_classes='symbols'):
320
  with gr.Column(scale=10):
321
  gr.Markdown('''<p style="font-size: 18px"><i>The unmodified, original CIFAR-10 images, with model predictions.</i></p><br><br>''')
322
- original_gallery = gr.Gallery(label="Original", preview=False, show_download_button=True)
323
- clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
324
 
325
  with gr.Column(scale=1, min_width=0, elem_classes='symbols'):
326
  gr.Markdown('''➕''')
327
 
328
  with gr.Column(scale=10):
329
  gr.Markdown('''<p style="font-size: 18px"><i>Visual representation of the optimized patch for attacking the model.</i></p><br><br>''')
330
- delta_gallery = gr.Gallery(label="Patches", preview=True, show_download_button=True)
331
 
332
  with gr.Column(scale=1, min_width=0):
333
  gr.Markdown('''🟰''', elem_classes='symbols')
334
 
335
  with gr.Column(scale=10):
336
  gr.Markdown('''<p style="font-size: 18px"><i>The original image (with optimized perturbations applied) gives us an adversarial image which fools the model.</i></p>''')
337
- adversarial_gallery = gr.Gallery(label="Adversarial", preview=False, show_download_button=True)
338
- robust_accuracy = gr.Number(label="Robust Accuracy", precision=2)
339
 
340
  eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, max_iter, eps, eps_steps, x_location, y_location, patch_height,
341
  patch_width],
 
192
  return gr.Column(visible=True)
193
  return gr.Column(visible=False)
194
 
195
+ def default_clean():
196
+ return [('./data/pgd/clean/0_airplane.png', 'airplane'),
197
+ ('./data/pgd/clean/1_automobile.png', 'automobile'),
198
+ ('./data/pgd/clean/2_bird.png', 'bird'),
199
+ ('./data/pgd/clean/3_cat.png', 'cat'),
200
+ ('./data/pgd/clean/4_deer.png', 'deer'),
201
+ ('./data/pgd/clean/5_dog.png', 'dog'),
202
+ ('./data/pgd/clean/6_frog.png', 'frog'),
203
+ ('./data/pgd/clean/7_horse.png', 'horse'),
204
+ ('./data/pgd/clean/8_ship.png', 'ship'),
205
+ ('./data/pgd/clean/9_truck.png', 'truck')]
206
+
207
+ def default_perturbation():
208
+ return [('./data/pgd/perturb/p1.png'),
209
+ ('./data/pgd/perturb/p2.png'),
210
+ ('./data/pgd/perturb/p3.png'),
211
+ ('./data/pgd/perturb/p4.png'),
212
+ ('./data/pgd/perturb/p5.png'),
213
+ ('./data/pgd/perturb/p6.png'),
214
+ ('./data/pgd/perturb/p7.png'),
215
+ ('./data/pgd/perturb/p8.png'),
216
+ ('./data/pgd/perturb/p9.png'),
217
+ ('./data/pgd/perturb/p10.png')]
218
+
219
+ def default_pgd():
220
+ return [('./data/pgd/attacked/0_airplane.png', 'airplane'),
221
+ ('./data/pgd/attacked/1_automobile.png', 'automobile'),
222
+ ('./data/pgd/attacked/2_bird.png', 'bird'),
223
+ ('./data/pgd/attacked/3_cat.png', 'cat'),
224
+ ('./data/pgd/attacked/4_deer.png', 'deer'),
225
+ ('./data/pgd/attacked/5_dog.png', 'dog'),
226
+ ('./data/pgd/attacked/6_frog.png', 'frog'),
227
+ ('./data/pgd/attacked/7_horse.png', 'horse'),
228
+ ('./data/pgd/attacked/8_ship.png', 'ship'),
229
+ ('./data/pgd/attacked/9_truck.png', 'truck')]
230
 
231
+ def default_patch():
232
+ return [('./data/patch/0_airplane.png', 'airplane'),
233
+ ('./data/patch/1_automobile.png', 'automobile'),
234
+ ('./data/patch/2_bird.png', 'bird'),
235
+ ('./data/patch/3_cat.png', 'cat'),
236
+ ('./data/patch/4_deer.png', 'deer'),
237
+ ('./data/patch/5_dog.png', 'dog'),
238
+ ('./data/patch/6_frog.png', 'frog'),
239
+ ('./data/patch/7_horse.png', 'horse'),
240
+ ('./data/patch/8_ship.png', 'ship'),
241
+ ('./data/patch/9_truck.png', 'truck')]
242
+
243
  # e.g. To use a local alternative theme: carbon_theme = Carbon()
244
  carbon_theme = Carbon()
245
  with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
 
271
 
272
  with gr.Row(elem_classes=["larger-gap", "custom-text"]):
273
  with gr.Column(scale=1, elem_classes="cust-width"):
274
+ gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as CIFAR-10.</p>''')
275
  gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Note: CIFAR-10 images are low resolution images which span 10 different categories as shown.</i></p>''')
276
+ gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ Your goal is to have an AI model capable of classifying these images. So you
277
  train a model on this dataset, or use a pre-trained model from Hugging Face,
278
  such as Meta's Distilled Data-efficient Image Transformer.</p>''')
279
  with gr.Column(scale=1, elem_classes="cust-width"):
 
295
 
296
  gr.Markdown('''<hr/>''')
297
 
298
+ gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
299
  attacks which might manipulate the prediction. For instance, ships become classified as birds. To do this, you will deploy
300
  adversarial attacks against your own model and assess its performance.</p>''')
301
 
302
+ gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Below are two common types of evasion attack. Both create adversarial images, which at first glance, seem the same as the original images,
303
  however they contain subtle changes which cause the AI model to make incorrect predictions.</p><br/>''')
304
 
305
 
 
323
  with gr.Row(elem_classes='symbols'):
324
  with gr.Column(scale=10):
325
  gr.Markdown('''<p style="font-size: 18px"><i>The unmodified, original CIFAR-10 images, with model predictions.</i></p><br>''')
326
+ original_gallery = gr.Gallery(default_clean, label="Original", preview=False, show_download_button=True)
327
  benign_output = gr.Label(num_top_classes=3, visible=False)
328
+ clean_accuracy = gr.Number(1, label="Clean Accuracy", precision=2)
329
  with gr.Column(scale=1, min_width=0, elem_classes='symbols'):
330
  gr.Markdown('''➕''')
331
  with gr.Column(scale=10):
332
  gr.Markdown('''<p style="font-size: 18px"><i>Visual representation of the calculated perturbations for attacking the model.</i></p><br>''')
333
+ delta_gallery = gr.Gallery(default_perturbation, label="Added perturbation", preview=False, show_download_button=True)
334
  with gr.Column(scale=1, min_width=0):
335
  gr.Markdown('''🟰''', elem_classes='symbols')
336
  with gr.Column(scale=10):
337
  gr.Markdown('''<p style="font-size: 18px"><i>The original image (with optimized perturbations applied) gives us an adversarial image which fools the model.</i></p>''')
338
+ adversarial_gallery = gr.Gallery(default_pgd, label="Adversarial", preview=False, show_download_button=True)
339
  adversarial_output = gr.Label(num_top_classes=3, visible=False)
340
+ robust_accuracy = gr.Number(0, label="Robust Accuracy", precision=2)
341
 
342
  bt_eval_pgd.click(clf_evasion_evaluate, inputs=[attack, max_iter, eps, eps_steps, attack, attack, attack, attack],
343
  outputs=[original_gallery, adversarial_gallery, delta_gallery, clean_accuracy,
344
  robust_accuracy])
345
+
346
 
347
  gr.Markdown('''<br/>''')
348
 
 
367
  with gr.Row(elem_classes='symbols'):
368
  with gr.Column(scale=10):
369
  gr.Markdown('''<p style="font-size: 18px"><i>The unmodified, original CIFAR-10 images, with model predictions.</i></p><br><br>''')
370
+ original_gallery = gr.Gallery(default_clean, label="Original", preview=False, show_download_button=True)
371
+ clean_accuracy = gr.Number(1, label="Clean Accuracy", precision=2)
372
 
373
  with gr.Column(scale=1, min_width=0, elem_classes='symbols'):
374
  gr.Markdown('''➕''')
375
 
376
  with gr.Column(scale=10):
377
  gr.Markdown('''<p style="font-size: 18px"><i>Visual representation of the optimized patch for attacking the model.</i></p><br><br>''')
378
+ delta_gallery = gr.Gallery(['./data/patch/patch.png'], label="Patches", preview=True, show_download_button=True)
379
 
380
  with gr.Column(scale=1, min_width=0):
381
  gr.Markdown('''🟰''', elem_classes='symbols')
382
 
383
  with gr.Column(scale=10):
384
  gr.Markdown('''<p style="font-size: 18px"><i>The original image (with optimized perturbations applied) gives us an adversarial image which fools the model.</i></p>''')
385
+ adversarial_gallery = gr.Gallery(default_patch, label="Adversarial", preview=False, show_download_button=True)
386
+ robust_accuracy = gr.Number(0.8, label="Robust Accuracy", precision=2)
387
 
388
  eval_btn_patch.click(clf_evasion_evaluate, inputs=[attack, max_iter, eps, eps_steps, x_location, y_location, patch_height,
389
  patch_width],
data/patch/0_airplane.png ADDED
data/patch/1_automobile.png ADDED
data/patch/2_bird.png ADDED
data/patch/3_cat.png ADDED
data/patch/4_deer.png ADDED
data/patch/5_dog.png ADDED
data/patch/6_frog.png ADDED
data/patch/7_horse.png ADDED
data/patch/8_ship.png ADDED
data/patch/9_truck.png ADDED
data/patch/patch.png ADDED
data/pgd/attacked/0_airplane.png ADDED
data/pgd/attacked/1_automobile.png ADDED
data/pgd/attacked/2_bird.png ADDED
data/pgd/attacked/3_cat.png ADDED
data/pgd/attacked/4_deer.png ADDED
data/pgd/attacked/5_dog.png ADDED
data/pgd/attacked/6_frog.png ADDED
data/pgd/attacked/7_horse.png ADDED
data/pgd/attacked/8_ship.png ADDED
data/pgd/attacked/9_truck.png ADDED
data/pgd/clean/0_airplane.png ADDED
data/pgd/clean/1_automobile.png ADDED
data/pgd/clean/2_bird.png ADDED
data/pgd/clean/3_cat.png ADDED
data/pgd/clean/4_deer.png ADDED
data/pgd/clean/5_dog.png ADDED
data/pgd/clean/6_frog.png ADDED
data/pgd/clean/7_horse.png ADDED
data/pgd/clean/8_ship.png ADDED
data/pgd/clean/9_truck.png ADDED
data/pgd/perturb/p1.png ADDED
data/pgd/perturb/p10.png ADDED
data/pgd/perturb/p2.png ADDED
data/pgd/perturb/p3.png ADDED
data/pgd/perturb/p4.png ADDED
data/pgd/perturb/p5.png ADDED
data/pgd/perturb/p6.png ADDED
data/pgd/perturb/p7.png ADDED
data/pgd/perturb/p8.png ADDED
data/pgd/perturb/p9.png ADDED