Spaces:
Runtime error
Runtime error
Kieran Fraser
commited on
Commit
·
1985d41
1
Parent(s):
64286ff
Updated to be responsive on mobile. Button colour updated.
Browse files
app.py
CHANGED
|
@@ -48,16 +48,14 @@ css = """
|
|
| 48 |
.larger-gap { gap: 100px !important; }
|
| 49 |
.symbols { text-align: center !important; margin: auto !important; }
|
| 50 |
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
}
|
| 54 |
"""
|
| 55 |
|
| 56 |
global model
|
| 57 |
model = transformers.AutoModelForImageClassification.from_pretrained(
|
| 58 |
'facebook/deit-tiny-distilled-patch16-224',
|
| 59 |
ignore_mismatched_sizes=True,
|
| 60 |
-
force_download=True,
|
| 61 |
num_labels=10
|
| 62 |
)
|
| 63 |
|
|
@@ -270,13 +268,13 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
| 270 |
gr.Markdown('''<hr/>''')
|
| 271 |
|
| 272 |
with gr.Row(elem_classes=["larger-gap", "custom-text"]):
|
| 273 |
-
with gr.Column(scale=1):
|
| 274 |
gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as Imagenette.</p>''')
|
| 275 |
gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Note: Imagenette is a subset of 10 easily classified classes from Imagenet as shown.</i></p>''')
|
| 276 |
gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ Your goal is to have an AI model capable of classifying these images. So you
|
| 277 |
find a pre-trained model from Hugging Face,
|
| 278 |
such as Meta's Distilled Data-efficient Image Transformer, which has been trained on this data (or so you think ☠️).</p>''')
|
| 279 |
-
with gr.Column(scale=1):
|
| 280 |
gr.Markdown('''
|
| 281 |
<p style="font-size: 20px;"><b>Hugging Face dataset:</b>
|
| 282 |
<a href="https://huggingface.co/datasets/frgfm/imagenette" target="_blank">Imagenette</a></p>
|
|
@@ -289,14 +287,14 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
| 289 |
<br/>
|
| 290 |
<p style="font-size: 20px;">👀 take a look at the sample images from the Imagenette dataset and their respective labels.</p>
|
| 291 |
''')
|
| 292 |
-
with gr.Column(scale=1):
|
| 293 |
gr.Gallery(label="Imagenette", preview=False, value=sample_imagenette(), height=420)
|
| 294 |
|
| 295 |
gr.Markdown('''<hr/>''')
|
| 296 |
|
| 297 |
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
|
| 298 |
attacks which might manipulate the prediction. For instance, fish become classified as dogs or golf balls. To do this, you will deploy
|
| 299 |
-
a backdoor poisoning attack against your own model and assess its performance.</p>''')
|
| 300 |
|
| 301 |
with gr.Row(elem_classes="custom-text"):
|
| 302 |
with gr.Column(scale=6):
|
|
@@ -311,15 +309,15 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
| 311 |
'gas pump',
|
| 312 |
'golf ball',
|
| 313 |
'parachutte',], value='church')
|
| 314 |
-
eval_btn_patch = gr.Button("Evaluate")
|
| 315 |
with gr.Column(scale=10):
|
| 316 |
clean_gallery = gr.Gallery(label="Clean", preview=False, show_download_button=True, height=600)
|
| 317 |
clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
|
| 318 |
-
with gr.Column(scale=1, min_width=
|
| 319 |
gr.Markdown('''➕''')
|
| 320 |
with gr.Column(scale=3, elem_classes='symbols'):
|
| 321 |
trigger_image = gr.Image(label="Trigger", value="./baby-on-board.png", interactive=False)
|
| 322 |
-
with gr.Column(scale=1, min_width=
|
| 323 |
gr.Markdown('''🟰''', elem_classes='symbols')
|
| 324 |
with gr.Column(scale=10):
|
| 325 |
poison_gallery = gr.Gallery(label="Poisoned", preview=False, show_download_button=True, height=600)
|
|
|
|
| 48 |
.larger-gap { gap: 100px !important; }
|
| 49 |
.symbols { text-align: center !important; margin: auto !important; }
|
| 50 |
|
| 51 |
+
.eval-bt { background-color: #3b74f4 !important; color: white !important; }
|
| 52 |
+
.cust-width { min-width: 250px !important;}
|
|
|
|
| 53 |
"""
|
| 54 |
|
| 55 |
global model
|
| 56 |
model = transformers.AutoModelForImageClassification.from_pretrained(
|
| 57 |
'facebook/deit-tiny-distilled-patch16-224',
|
| 58 |
ignore_mismatched_sizes=True,
|
|
|
|
| 59 |
num_labels=10
|
| 60 |
)
|
| 61 |
|
|
|
|
| 268 |
gr.Markdown('''<hr/>''')
|
| 269 |
|
| 270 |
with gr.Row(elem_classes=["larger-gap", "custom-text"]):
|
| 271 |
+
with gr.Column(scale=1, elem_classes="cust-width"):
|
| 272 |
gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ First lets set the scene. You have a dataset of images, such as Imagenette.</p>''')
|
| 273 |
gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Note: Imagenette is a subset of 10 easily classified classes from Imagenet as shown.</i></p>''')
|
| 274 |
gr.Markdown('''<p style="font-size: 20px; text-align: justify">ℹ️ Your goal is to have an AI model capable of classifying these images. So you
|
| 275 |
find a pre-trained model from Hugging Face,
|
| 276 |
such as Meta's Distilled Data-efficient Image Transformer, which has been trained on this data (or so you think ☠️).</p>''')
|
| 277 |
+
with gr.Column(scale=1, elem_classes="cust-width"):
|
| 278 |
gr.Markdown('''
|
| 279 |
<p style="font-size: 20px;"><b>Hugging Face dataset:</b>
|
| 280 |
<a href="https://huggingface.co/datasets/frgfm/imagenette" target="_blank">Imagenette</a></p>
|
|
|
|
| 287 |
<br/>
|
| 288 |
<p style="font-size: 20px;">👀 take a look at the sample images from the Imagenette dataset and their respective labels.</p>
|
| 289 |
''')
|
| 290 |
+
with gr.Column(scale=1, elem_classes="cust-width"):
|
| 291 |
gr.Gallery(label="Imagenette", preview=False, value=sample_imagenette(), height=420)
|
| 292 |
|
| 293 |
gr.Markdown('''<hr/>''')
|
| 294 |
|
| 295 |
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
|
| 296 |
attacks which might manipulate the prediction. For instance, fish become classified as dogs or golf balls. To do this, you will deploy
|
| 297 |
+
a backdoor poisoning attack against your own model and assess its performance. Click the button below 👇 to evaluate a poisoned model.</p>''')
|
| 298 |
|
| 299 |
with gr.Row(elem_classes="custom-text"):
|
| 300 |
with gr.Column(scale=6):
|
|
|
|
| 309 |
'gas pump',
|
| 310 |
'golf ball',
|
| 311 |
'parachutte',], value='church')
|
| 312 |
+
eval_btn_patch = gr.Button("Evaluate ✨", elem_classes="eval-bt")
|
| 313 |
with gr.Column(scale=10):
|
| 314 |
clean_gallery = gr.Gallery(label="Clean", preview=False, show_download_button=True, height=600)
|
| 315 |
clean_accuracy = gr.Number(label="Clean Accuracy", precision=2)
|
| 316 |
+
with gr.Column(scale=1, min_width=0, elem_classes='symbols'):
|
| 317 |
gr.Markdown('''➕''')
|
| 318 |
with gr.Column(scale=3, elem_classes='symbols'):
|
| 319 |
trigger_image = gr.Image(label="Trigger", value="./baby-on-board.png", interactive=False)
|
| 320 |
+
with gr.Column(scale=1, min_width=0):
|
| 321 |
gr.Markdown('''🟰''', elem_classes='symbols')
|
| 322 |
with gr.Column(scale=10):
|
| 323 |
poison_gallery = gr.Gallery(label="Poisoned", preview=False, show_download_button=True, height=600)
|